diff --git a/go.mod b/go.mod
index bd14688948..a5a92af411 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module github.com/Microsoft/hcsshim
-go 1.23.0
+go 1.24.6
require (
github.com/Microsoft/cosesign1go v1.4.0
@@ -11,7 +11,7 @@ require (
github.com/containerd/cgroups/v3 v3.0.5
github.com/containerd/console v1.0.4
github.com/containerd/containerd/api v1.9.0
- github.com/containerd/containerd/v2 v2.1.2
+ github.com/containerd/containerd/v2 v2.1.4
github.com/containerd/errdefs v1.0.0
github.com/containerd/errdefs/pkg v0.3.0
github.com/containerd/go-runc v1.1.0
@@ -25,13 +25,13 @@ require (
github.com/linuxkit/virtsock v0.0.0-20241009230534-cb6a20cc0422
github.com/mattn/go-shellwords v1.0.12
github.com/moby/sys/user v0.4.0
- github.com/open-policy-agent/opa v0.70.0
+ github.com/open-policy-agent/opa v1.10.1
github.com/opencontainers/cgroups v0.0.4
github.com/opencontainers/runc v1.3.0
github.com/opencontainers/runtime-spec v1.2.1
github.com/pelletier/go-toml v1.9.5
github.com/pkg/errors v0.9.1
- github.com/sirupsen/logrus v1.9.3
+ github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af
github.com/urfave/cli v1.22.16
github.com/urfave/cli/v2 v2.27.6
github.com/vishvananda/netlink v1.3.1
@@ -39,16 +39,15 @@ require (
go.etcd.io/bbolt v1.4.0
go.opencensus.io v0.24.0
go.uber.org/mock v0.6.0
- golang.org/x/sync v0.16.0
- golang.org/x/sys v0.35.0
- google.golang.org/grpc v1.75.0
+ golang.org/x/sync v0.17.0
+ golang.org/x/sys v0.36.0
+ google.golang.org/grpc v1.75.1
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1
- google.golang.org/protobuf v1.36.7
+ google.golang.org/protobuf v1.36.9
)
require (
- github.com/OneOfOne/xxhash v1.2.8 // indirect
- github.com/agnivade/levenshtein v1.2.0 // indirect
+ github.com/agnivade/levenshtein v1.2.1 // indirect
github.com/akavel/rsrc v0.10.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
@@ -58,8 +57,8 @@ require (
github.com/containerd/plugin v1.0.0 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
- github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
- github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
+ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
github.com/docker/cli v24.0.0+incompatible // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker v28.3.0+incompatible // indirect
@@ -70,20 +69,24 @@ require (
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/gobwas/glob v0.2.3 // indirect
- github.com/goccy/go-json v0.10.2 // indirect
+ github.com/goccy/go-json v0.10.5 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/gorilla/mux v1.8.1 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/lestrrat-go/backoff/v2 v2.0.8 // indirect
- github.com/lestrrat-go/blackmagic v1.0.2 // indirect
+ github.com/lestrrat-go/blackmagic v1.0.4 // indirect
+ github.com/lestrrat-go/dsig v1.0.0 // indirect
+ github.com/lestrrat-go/dsig-secp256k1 v1.0.0 // indirect
github.com/lestrrat-go/httpcc v1.0.1 // indirect
+ github.com/lestrrat-go/httprc/v3 v3.0.1 // indirect
github.com/lestrrat-go/iter v1.0.2 // indirect
github.com/lestrrat-go/jwx v1.2.29 // indirect
+ github.com/lestrrat-go/jwx/v3 v3.0.11 // indirect
github.com/lestrrat-go/option v1.0.1 // indirect
+ github.com/lestrrat-go/option/v2 v2.0.0 // indirect
github.com/mdlayher/socket v0.5.1 // indirect
github.com/mdlayher/vsock v1.2.1 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
@@ -92,14 +95,17 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
- github.com/prometheus/client_golang v1.22.0 // indirect
- github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common v0.62.0 // indirect
- github.com/prometheus/procfs v0.15.1 // indirect
- github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect
+ github.com/prometheus/client_golang v1.23.2 // indirect
+ github.com/prometheus/client_model v0.6.2 // indirect
+ github.com/prometheus/common v0.66.1 // indirect
+ github.com/prometheus/procfs v0.17.0 // indirect
+ github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
- github.com/tchap/go-patricia/v2 v2.3.2 // indirect
+ github.com/segmentio/asm v1.2.0 // indirect
+ github.com/tchap/go-patricia/v2 v2.3.3 // indirect
+ github.com/valyala/fastjson v1.6.4 // indirect
github.com/vbatts/tar-split v0.11.5 // indirect
+ github.com/vektah/gqlparser/v2 v2.5.30 // indirect
github.com/veraison/go-cose v1.1.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
@@ -107,18 +113,19 @@ require (
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
github.com/yashtewari/glob-intersection v0.2.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
- go.opentelemetry.io/otel v1.37.0 // indirect
- go.opentelemetry.io/otel/metric v1.37.0 // indirect
- go.opentelemetry.io/otel/sdk v1.37.0 // indirect
- go.opentelemetry.io/otel/trace v1.37.0 // indirect
- golang.org/x/crypto v0.41.0 // indirect
+ go.opentelemetry.io/otel v1.38.0 // indirect
+ go.opentelemetry.io/otel/metric v1.38.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.38.0 // indirect
+ go.opentelemetry.io/otel/trace v1.38.0 // indirect
+ go.yaml.in/yaml/v2 v2.4.2 // indirect
+ golang.org/x/crypto v0.42.0 // indirect
golang.org/x/mod v0.27.0 // indirect
- golang.org/x/net v0.43.0 // indirect
- golang.org/x/text v0.28.0 // indirect
+ golang.org/x/net v0.44.0 // indirect
+ golang.org/x/text v0.29.0 // indirect
golang.org/x/tools v0.36.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- sigs.k8s.io/yaml v1.4.0 // indirect
+ sigs.k8s.io/yaml v1.6.0 // indirect
)
replace google.golang.org/genproto => google.golang.org/genproto v0.0.0-20250428153025-10db94c68c34
diff --git a/go.sum b/go.sum
index 53ceeffb17..010bea995a 100644
--- a/go.sum
+++ b/go.sum
@@ -367,10 +367,8 @@ github.com/Microsoft/didx509go v0.0.3/go.mod h1:wWt+iQsLzn3011+VfESzznLIp/Owhuj7
github.com/Microsoft/go-winio v0.6.3-0.20251027160822-ad3df93bed29 h1:0kQAzHq8vLs7Pptv+7TxjdETLf/nIqJpIB4oC6Ba4vY=
github.com/Microsoft/go-winio v0.6.3-0.20251027160822-ad3df93bed29/go.mod h1:ZWa7ssZJT30CCDGJ7fk/2SBTq9BIQrrVjrcss0UW2s0=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
-github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
-github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY=
-github.com/agnivade/levenshtein v1.2.0/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU=
+github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM=
+github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU=
github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY=
github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
@@ -382,6 +380,8 @@ github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhk
github.com/alecthomas/participle/v2 v2.0.0/go.mod h1:rAKZdJldHu8084ojcWevWAL8KmEU+AT+Olodb+WoN2Y=
github.com/alecthomas/participle/v2 v2.1.0/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c=
github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
+github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
+github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/apache/arrow/go/v15 v15.0.2/go.mod h1:DGXsR3ajT524njufqf95822i+KTh+yea1jass9YXgjA=
@@ -395,14 +395,15 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
-github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA=
-github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q=
+github.com/bytecodealliance/wasmtime-go/v37 v37.0.0 h1:DPjdn2V3JhXHMoZ2ymRqGK+y1bDyr9wgpyYCvhjMky8=
+github.com/bytecodealliance/wasmtime-go/v37 v37.0.0/go.mod h1:Pf1l2JCTUFMnOqDIwkjzx1qfVJ09xbaXETKgRVE4jZ0=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
+github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
-github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -445,8 +446,8 @@ github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn
github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
github.com/containerd/containerd/api v1.9.0 h1:HZ/licowTRazus+wt9fM6r/9BQO7S0vD5lMcWspGIg0=
github.com/containerd/containerd/api v1.9.0/go.mod h1:GhghKFmTR3hNtyznBoQ0EMWr9ju5AqHjcZPsSpTKutI=
-github.com/containerd/containerd/v2 v2.1.2 h1:4ZQxB+FVYmwXZgpBcKfar6ieppm3KC5C6FRKvtJ6DRU=
-github.com/containerd/containerd/v2 v2.1.2/go.mod h1:8C5QV9djwsYDNhxfTCFjWtTBZrqjditQ4/ghHSYjnHM=
+github.com/containerd/containerd/v2 v2.1.4 h1:/hXWjiSFd6ftrBOBGfAZ6T30LJcx1dBjdKEeI8xucKQ=
+github.com/containerd/containerd/v2 v2.1.4/go.mod h1:8C5QV9djwsYDNhxfTCFjWtTBZrqjditQ4/ghHSYjnHM=
github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4=
github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
@@ -473,19 +474,22 @@ github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++
github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
+github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
-github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
-github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg=
-github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw=
-github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8=
-github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
+github.com/dgraph-io/badger/v4 v4.8.0 h1:JYph1ChBijCw8SLeybvPINizbDKWZ5n/GYbz2yhN/bs=
+github.com/dgraph-io/badger/v4 v4.8.0/go.mod h1:U6on6e8k/RTbUWxqKR0MvugJuVmkxSNc79ap4917h4w=
+github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM=
+github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI=
github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo=
github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
github.com/docker/cli v24.0.0+incompatible h1:0+1VshNwBQzQAx9lOl+OYCTCEAD8fKs/qeXMx3O0wqM=
@@ -577,8 +581,9 @@ github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+
github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
-github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
+github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/goccy/go-yaml v1.9.8/go.mod h1:JubOolP3gh0HpiBc4BLRD4YmjEjHAmIIB2aaXKkTfoE=
github.com/goccy/go-yaml v1.11.0/go.mod h1:H+mJrWtjPTJAHvRbV09MCK9xYwODM+wRTVFFTWckfng=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
@@ -596,8 +601,6 @@ github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwm
github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/glog v1.2.3/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
-github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I=
-github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -634,14 +637,14 @@ github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
-github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg=
github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
+github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -742,15 +745,13 @@ github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07
github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA=
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
-github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
-github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
github.com/hamba/avro/v2 v2.17.2/go.mod h1:Q9YK+qxAhtVrNqOhwlZTATLgLA8qxG2vtvkhK8fJ7Jo=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -789,17 +790,28 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/lestrrat-go/backoff/v2 v2.0.8 h1:oNb5E5isby2kiro9AgdHLv5N5tint1AnDVVf2E2un5A=
github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
-github.com/lestrrat-go/blackmagic v1.0.2 h1:Cg2gVSc9h7sz9NOByczrbUvLopQmXrfFx//N+AkAr5k=
github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU=
+github.com/lestrrat-go/blackmagic v1.0.4 h1:IwQibdnf8l2KoO+qC3uT4OaTWsW7tuRQXy9TRN9QanA=
+github.com/lestrrat-go/blackmagic v1.0.4/go.mod h1:6AWFyKNNj0zEXQYfTMPfZrAXUWUfTIZ5ECEUEJaijtw=
+github.com/lestrrat-go/dsig v1.0.0 h1:OE09s2r9Z81kxzJYRn07TFM9XA4akrUdoMwr0L8xj38=
+github.com/lestrrat-go/dsig v1.0.0/go.mod h1:dEgoOYYEJvW6XGbLasr8TFcAxoWrKlbQvmJgCR0qkDo=
+github.com/lestrrat-go/dsig-secp256k1 v1.0.0 h1:JpDe4Aybfl0soBvoVwjqDbp+9S1Y2OM7gcrVVMFPOzY=
+github.com/lestrrat-go/dsig-secp256k1 v1.0.0/go.mod h1:CxUgAhssb8FToqbL8NjSPoGQlnO4w3LG1P0qPWQm/NU=
github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE=
github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
+github.com/lestrrat-go/httprc/v3 v3.0.1 h1:3n7Es68YYGZb2Jf+k//llA4FTZMl3yCwIjFIk4ubevI=
+github.com/lestrrat-go/httprc/v3 v3.0.1/go.mod h1:2uAvmbXE4Xq8kAUjVrZOq1tZVYYYs5iP62Cmtru00xk=
github.com/lestrrat-go/iter v1.0.2 h1:gMXo1q4c2pHmC3dn8LzRhJfP1ceCbgSiT9lUydIzltI=
github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4=
github.com/lestrrat-go/jwx v1.2.29 h1:QT0utmUJ4/12rmsVQrJ3u55bycPkKqGYuGT4tyRhxSQ=
github.com/lestrrat-go/jwx v1.2.29/go.mod h1:hU8k2l6WF0ncx20uQdOmik/Gjg6E3/wIRtXSNFeZuB8=
+github.com/lestrrat-go/jwx/v3 v3.0.11 h1:yEeUGNUuNjcez/Voxvr7XPTYNraSQTENJgtVTfwvG/w=
+github.com/lestrrat-go/jwx/v3 v3.0.11/go.mod h1:XSOAh2SiXm0QgRe3DulLZLyt+wUuEdFo81zuKTLcvgQ=
github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU=
github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
+github.com/lestrrat-go/option/v2 v2.0.0 h1:XxrcaJESE1fokHy3FpaQ/cXW8ZsIdWcdFzzLOcID3Ss=
+github.com/lestrrat-go/option/v2 v2.0.0/go.mod h1:oSySsmzMoR0iRzCDCaUfsCzxQHUEuhOViQObyy7S6Vg=
github.com/linuxkit/virtsock v0.0.0-20241009230534-cb6a20cc0422 h1:XvRuyDDRvi+UDxHN/M4MW4HxjmNVMmUKQj/+AbgsYgk=
github.com/linuxkit/virtsock v0.0.0-20241009230534-cb6a20cc0422/go.mod h1:JLgfq4XMVbvfNlAXla/41lZnp21O72a/wWHGJefAvgQ=
github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
@@ -840,8 +852,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/open-policy-agent/opa v0.70.0 h1:B3cqCN2iQAyKxK6+GI+N40uqkin+wzIrM7YA60t9x1U=
-github.com/open-policy-agent/opa v0.70.0/go.mod h1:Y/nm5NY0BX0BqjBriKUiV81sCl8XOjjvqQG7dXrggtI=
+github.com/open-policy-agent/opa v1.10.1 h1:haIvxZSPky8HLjRrvQwWAjCPLg8JDFSZMbbG4yyUHgY=
+github.com/open-policy-agent/opa v1.10.1/go.mod h1:7uPI3iRpOalJ0BhK6s1JALWPU9HvaV1XeBSSMZnr/PM=
github.com/opencontainers/cgroups v0.0.4 h1:XVj8P/IHVms/j+7eh8ggdkTLAxjz84ZzuFyGoE28DR4=
github.com/opencontainers/cgroups v0.0.4/go.mod h1:s8lktyhlGUqM7OSRL5P7eAW6Wb+kWPNvt4qvVfzA5vs=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
@@ -866,24 +878,26 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
-github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
+github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
-github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
-github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
-github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
-github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
-github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
-github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=
-github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
+github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
+github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
+github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
+github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
+github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg=
+github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
@@ -897,8 +911,12 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk=
-github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
-github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
+github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
+github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
+github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
+github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0=
+github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
@@ -922,11 +940,12 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
-github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/substrait-io/substrait-go v0.4.2/go.mod h1:qhpnLmrcvAnlZsUyPXZRqldiHapPTXC3t7xFgDi3aQg=
-github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM=
-github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
+github.com/tchap/go-patricia/v2 v2.3.3 h1:xfNEsODumaEcCcY3gI0hYPZ/PcpVv5ju6RMAhgwZDDc=
+github.com/tchap/go-patricia/v2 v2.3.3/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
@@ -935,8 +954,12 @@ github.com/urfave/cli v1.22.16 h1:MH0k6uJxdwdeWQTwhSO42Pwr4YLrNLwBtg1MRgTqPdQ=
github.com/urfave/cli v1.22.16/go.mod h1:EeJR6BKodywf4zciqrdw6hpCPk68JO9z5LazXZMn5Po=
github.com/urfave/cli/v2 v2.27.6 h1:VdRdS98FNhKZ8/Az8B7MTyGQmpIr36O1EHybx/LaZ4g=
github.com/urfave/cli/v2 v2.27.6/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
+github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ=
+github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
+github.com/vektah/gqlparser/v2 v2.5.30 h1:EqLwGAFLIzt1wpx1IPpY67DwUujF1OfzgEyDsLrN6kE=
+github.com/vektah/gqlparser/v2 v2.5.30/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo=
github.com/veraison/go-cose v1.1.0 h1:AalPS4VGiKavpAzIlBjrn7bhqXiXi4jbMYY/2+UC+4o=
github.com/veraison/go-cose v1.1.0/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4=
github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0=
@@ -1001,8 +1024,9 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI=
go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0=
@@ -1016,12 +1040,14 @@ go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGi
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
-go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
-go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo=
+go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
+go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I=
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY=
@@ -1036,8 +1062,8 @@ go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzau
go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
-go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
-go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
+go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
+go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc=
go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
@@ -1049,8 +1075,8 @@ go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJC
go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
-go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
-go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
+go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
+go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg=
go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ=
go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y=
@@ -1058,8 +1084,8 @@ go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC
go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ=
go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
-go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
-go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
+go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
+go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo=
go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk=
@@ -1073,18 +1099,22 @@ go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L
go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
-go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
-go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
+go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
+go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
-go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
-go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
+go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
+go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
+go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
+go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
+go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -1125,8 +1155,8 @@ golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5
golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
-golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
-golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
+golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
+golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1283,8 +1313,8 @@ golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
-golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
-golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
+golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
+golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1361,8 +1391,8 @@ golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
-golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
-golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
+golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1468,8 +1498,8 @@ golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
-golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
-golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
+golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -1536,8 +1566,8 @@ golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
-golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
-golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
+golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
+golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1806,8 +1836,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.
google.golang.org/genproto/googleapis/api v0.0.0-20250313205543-e70fdf4c4cb4/go.mod h1:c8q6Z6OCqnfVIqUFJkCzKcrj8eCvUrz+K4KRzSTuANg=
google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e/go.mod h1:085qFyf2+XaZlRdCgKNCIZ3afY2p4HHZdoIRpId8F4A=
google.golang.org/genproto/googleapis/api v0.0.0-20250425173222-7b384671a197/go.mod h1:Cd8IzgPo5Akum2c9R6FsXNaZbH3Jpa2gpHlW89FqlyQ=
-google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU=
-google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA=
+google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY=
+google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE=
google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA=
google.golang.org/genproto/googleapis/bytestream v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:+34luvCflYKiKylNwGJfn9cFBbcL/WrkciMmDmsTQ/A=
google.golang.org/genproto/googleapis/bytestream v0.0.0-20231030173426-d783a09b4405/go.mod h1:GRUCuLdzVqZte8+Dl/D4N25yLzcGqqWaYkeVOwulFqw=
@@ -1921,8 +1951,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4/go.
google.golang.org/genproto/googleapis/rpc v0.0.0-20250409194420-de1ac958c67a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250425173222-7b384671a197/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -1997,8 +2027,8 @@ google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7
google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw=
google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
-google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4=
-google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
+google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI=
+google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 h1:F29+wU6Ee6qgu9TddPgooOdaqsxTMunOoj8KA5yuS5A=
@@ -2033,8 +2063,8 @@ google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojt
google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
-google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
-google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
+google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
+google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
@@ -2091,5 +2121,5 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
-sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
+sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
diff --git a/vendor/github.com/OneOfOne/xxhash/.gitignore b/vendor/github.com/OneOfOne/xxhash/.gitignore
deleted file mode 100644
index f4faa7f8f1..0000000000
--- a/vendor/github.com/OneOfOne/xxhash/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-*.txt
-*.pprof
-cmap2/
-cache/
diff --git a/vendor/github.com/OneOfOne/xxhash/.travis.yml b/vendor/github.com/OneOfOne/xxhash/.travis.yml
deleted file mode 100644
index 1c6dc55bc7..0000000000
--- a/vendor/github.com/OneOfOne/xxhash/.travis.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-language: go
-sudo: false
-
-go:
- - "1.10"
- - "1.11"
- - "1.12"
- - master
-
-script:
- - go test -tags safe ./...
- - go test ./...
- -
diff --git a/vendor/github.com/OneOfOne/xxhash/LICENSE b/vendor/github.com/OneOfOne/xxhash/LICENSE
deleted file mode 100644
index 9e30b4f342..0000000000
--- a/vendor/github.com/OneOfOne/xxhash/LICENSE
+++ /dev/null
@@ -1,187 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
diff --git a/vendor/github.com/OneOfOne/xxhash/README.md b/vendor/github.com/OneOfOne/xxhash/README.md
deleted file mode 100644
index 8eea28c394..0000000000
--- a/vendor/github.com/OneOfOne/xxhash/README.md
+++ /dev/null
@@ -1,74 +0,0 @@
-# xxhash [](https://godoc.org/github.com/OneOfOne/xxhash) [](https://travis-ci.org/OneOfOne/xxhash) [](https://gocover.io/github.com/OneOfOne/xxhash)
-
-This is a native Go implementation of the excellent [xxhash](https://github.com/Cyan4973/xxHash)* algorithm, an extremely fast non-cryptographic Hash algorithm, working at speeds close to RAM limits.
-
-* The C implementation is ([Copyright](https://github.com/Cyan4973/xxHash/blob/master/LICENSE) (c) 2012-2014, Yann Collet)
-
-## Install
-
- go get github.com/OneOfOne/xxhash
-
-## Features
-
-* On Go 1.7+ the pure go version is faster than CGO for all inputs.
-* Supports ChecksumString{32,64} xxhash{32,64}.WriteString, which uses no copies when it can, falls back to copy on appengine.
-* The native version falls back to a less optimized version on appengine due to the lack of unsafe.
-* Almost as fast as the mostly pure assembly version written by the brilliant [cespare](https://github.com/cespare/xxhash), while also supporting seeds.
-* To manually toggle the appengine version build with `-tags safe`.
-
-## Benchmark
-
-### Core i7-4790 @ 3.60GHz, Linux 4.12.6-1-ARCH (64bit), Go tip (+ff90f4af66 2017-08-19)
-
-```bash
-➤ go test -bench '64' -count 5 -tags cespare | benchstat /dev/stdin
-name time/op
-
-# https://github.com/cespare/xxhash
-XXSum64Cespare/Func-8 160ns ± 2%
-XXSum64Cespare/Struct-8 173ns ± 1%
-XXSum64ShortCespare/Func-8 6.78ns ± 1%
-XXSum64ShortCespare/Struct-8 19.6ns ± 2%
-
-# this package (default mode, using unsafe)
-XXSum64/Func-8 170ns ± 1%
-XXSum64/Struct-8 182ns ± 1%
-XXSum64Short/Func-8 13.5ns ± 3%
-XXSum64Short/Struct-8 20.4ns ± 0%
-
-# this package (appengine, *not* using unsafe)
-XXSum64/Func-8 241ns ± 5%
-XXSum64/Struct-8 243ns ± 6%
-XXSum64Short/Func-8 15.2ns ± 2%
-XXSum64Short/Struct-8 23.7ns ± 5%
-
-CRC64ISO-8 1.23µs ± 1%
-CRC64ISOString-8 2.71µs ± 4%
-CRC64ISOShort-8 22.2ns ± 3%
-
-Fnv64-8 2.34µs ± 1%
-Fnv64Short-8 74.7ns ± 8%
-```
-
-## Usage
-
-```go
- h := xxhash.New64()
- // r, err := os.Open("......")
- // defer f.Close()
- r := strings.NewReader(F)
- io.Copy(h, r)
- fmt.Println("xxhash.Backend:", xxhash.Backend)
- fmt.Println("File checksum:", h.Sum64())
-```
-
-[playground](https://play.golang.org/p/wHKBwfu6CPV)
-
-## TODO
-
-* Rewrite the 32bit version to be more optimized.
-* General cleanup as the Go inliner gets smarter.
-
-## License
-
-This project is released under the Apache v2. license. See [LICENSE](LICENSE) for more details.
diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash.go b/vendor/github.com/OneOfOne/xxhash/xxhash.go
deleted file mode 100644
index af2496b77f..0000000000
--- a/vendor/github.com/OneOfOne/xxhash/xxhash.go
+++ /dev/null
@@ -1,294 +0,0 @@
-package xxhash
-
-import (
- "encoding/binary"
- "errors"
- "hash"
-)
-
-const (
- prime32x1 uint32 = 2654435761
- prime32x2 uint32 = 2246822519
- prime32x3 uint32 = 3266489917
- prime32x4 uint32 = 668265263
- prime32x5 uint32 = 374761393
-
- prime64x1 uint64 = 11400714785074694791
- prime64x2 uint64 = 14029467366897019727
- prime64x3 uint64 = 1609587929392839161
- prime64x4 uint64 = 9650029242287828579
- prime64x5 uint64 = 2870177450012600261
-
- maxInt32 int32 = (1<<31 - 1)
-
- // precomputed zero Vs for seed 0
- zero64x1 = 0x60ea27eeadc0b5d6
- zero64x2 = 0xc2b2ae3d27d4eb4f
- zero64x3 = 0x0
- zero64x4 = 0x61c8864e7a143579
-)
-
-const (
- magic32 = "xxh\x07"
- magic64 = "xxh\x08"
- marshaled32Size = len(magic32) + 4*7 + 16
- marshaled64Size = len(magic64) + 8*6 + 32 + 1
-)
-
-func NewHash32() hash.Hash { return New32() }
-func NewHash64() hash.Hash { return New64() }
-
-// Checksum32 returns the checksum of the input data with the seed set to 0.
-func Checksum32(in []byte) uint32 {
- return Checksum32S(in, 0)
-}
-
-// ChecksumString32 returns the checksum of the input data, without creating a copy, with the seed set to 0.
-func ChecksumString32(s string) uint32 {
- return ChecksumString32S(s, 0)
-}
-
-type XXHash32 struct {
- mem [16]byte
- ln, memIdx int32
- v1, v2, v3, v4 uint32
- seed uint32
-}
-
-// Size returns the number of bytes Sum will return.
-func (xx *XXHash32) Size() int {
- return 4
-}
-
-// BlockSize returns the hash's underlying block size.
-// The Write method must be able to accept any amount
-// of data, but it may operate more efficiently if all writes
-// are a multiple of the block size.
-func (xx *XXHash32) BlockSize() int {
- return 16
-}
-
-// NewS32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the specific seed.
-func NewS32(seed uint32) (xx *XXHash32) {
- xx = &XXHash32{
- seed: seed,
- }
- xx.Reset()
- return
-}
-
-// New32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the seed set to 0.
-func New32() *XXHash32 {
- return NewS32(0)
-}
-
-func (xx *XXHash32) Reset() {
- xx.v1 = xx.seed + prime32x1 + prime32x2
- xx.v2 = xx.seed + prime32x2
- xx.v3 = xx.seed
- xx.v4 = xx.seed - prime32x1
- xx.ln, xx.memIdx = 0, 0
-}
-
-// Sum appends the current hash to b and returns the resulting slice.
-// It does not change the underlying hash state.
-func (xx *XXHash32) Sum(in []byte) []byte {
- s := xx.Sum32()
- return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
-}
-
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-func (xx *XXHash32) MarshalBinary() ([]byte, error) {
- b := make([]byte, 0, marshaled32Size)
- b = append(b, magic32...)
- b = appendUint32(b, xx.v1)
- b = appendUint32(b, xx.v2)
- b = appendUint32(b, xx.v3)
- b = appendUint32(b, xx.v4)
- b = appendUint32(b, xx.seed)
- b = appendInt32(b, xx.ln)
- b = appendInt32(b, xx.memIdx)
- b = append(b, xx.mem[:]...)
- return b, nil
-}
-
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
-func (xx *XXHash32) UnmarshalBinary(b []byte) error {
- if len(b) < len(magic32) || string(b[:len(magic32)]) != magic32 {
- return errors.New("xxhash: invalid hash state identifier")
- }
- if len(b) != marshaled32Size {
- return errors.New("xxhash: invalid hash state size")
- }
- b = b[len(magic32):]
- b, xx.v1 = consumeUint32(b)
- b, xx.v2 = consumeUint32(b)
- b, xx.v3 = consumeUint32(b)
- b, xx.v4 = consumeUint32(b)
- b, xx.seed = consumeUint32(b)
- b, xx.ln = consumeInt32(b)
- b, xx.memIdx = consumeInt32(b)
- copy(xx.mem[:], b)
- return nil
-}
-
-// Checksum64 an alias for Checksum64S(in, 0)
-func Checksum64(in []byte) uint64 {
- return Checksum64S(in, 0)
-}
-
-// ChecksumString64 returns the checksum of the input data, without creating a copy, with the seed set to 0.
-func ChecksumString64(s string) uint64 {
- return ChecksumString64S(s, 0)
-}
-
-type XXHash64 struct {
- v1, v2, v3, v4 uint64
- seed uint64
- ln uint64
- mem [32]byte
- memIdx int8
-}
-
-// Size returns the number of bytes Sum will return.
-func (xx *XXHash64) Size() int {
- return 8
-}
-
-// BlockSize returns the hash's underlying block size.
-// The Write method must be able to accept any amount
-// of data, but it may operate more efficiently if all writes
-// are a multiple of the block size.
-func (xx *XXHash64) BlockSize() int {
- return 32
-}
-
-// NewS64 creates a new hash.Hash64 computing the 64bit xxHash checksum starting with the specific seed.
-func NewS64(seed uint64) (xx *XXHash64) {
- xx = &XXHash64{
- seed: seed,
- }
- xx.Reset()
- return
-}
-
-// New64 creates a new hash.Hash64 computing the 64bit xxHash checksum starting with the seed set to 0x0.
-func New64() *XXHash64 {
- return NewS64(0)
-}
-
-func (xx *XXHash64) Reset() {
- xx.ln, xx.memIdx = 0, 0
- xx.v1, xx.v2, xx.v3, xx.v4 = resetVs64(xx.seed)
-}
-
-// Sum appends the current hash to b and returns the resulting slice.
-// It does not change the underlying hash state.
-func (xx *XXHash64) Sum(in []byte) []byte {
- s := xx.Sum64()
- return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
-}
-
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-func (xx *XXHash64) MarshalBinary() ([]byte, error) {
- b := make([]byte, 0, marshaled64Size)
- b = append(b, magic64...)
- b = appendUint64(b, xx.v1)
- b = appendUint64(b, xx.v2)
- b = appendUint64(b, xx.v3)
- b = appendUint64(b, xx.v4)
- b = appendUint64(b, xx.seed)
- b = appendUint64(b, xx.ln)
- b = append(b, byte(xx.memIdx))
- b = append(b, xx.mem[:]...)
- return b, nil
-}
-
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
-func (xx *XXHash64) UnmarshalBinary(b []byte) error {
- if len(b) < len(magic64) || string(b[:len(magic64)]) != magic64 {
- return errors.New("xxhash: invalid hash state identifier")
- }
- if len(b) != marshaled64Size {
- return errors.New("xxhash: invalid hash state size")
- }
- b = b[len(magic64):]
- b, xx.v1 = consumeUint64(b)
- b, xx.v2 = consumeUint64(b)
- b, xx.v3 = consumeUint64(b)
- b, xx.v4 = consumeUint64(b)
- b, xx.seed = consumeUint64(b)
- b, xx.ln = consumeUint64(b)
- xx.memIdx = int8(b[0])
- b = b[1:]
- copy(xx.mem[:], b)
- return nil
-}
-
-func appendInt32(b []byte, x int32) []byte { return appendUint32(b, uint32(x)) }
-
-func appendUint32(b []byte, x uint32) []byte {
- var a [4]byte
- binary.LittleEndian.PutUint32(a[:], x)
- return append(b, a[:]...)
-}
-
-func appendUint64(b []byte, x uint64) []byte {
- var a [8]byte
- binary.LittleEndian.PutUint64(a[:], x)
- return append(b, a[:]...)
-}
-
-func consumeInt32(b []byte) ([]byte, int32) { bn, x := consumeUint32(b); return bn, int32(x) }
-func consumeUint32(b []byte) ([]byte, uint32) { x := u32(b); return b[4:], x }
-func consumeUint64(b []byte) ([]byte, uint64) { x := u64(b); return b[8:], x }
-
-// force the compiler to use ROTL instructions
-
-func rotl32_1(x uint32) uint32 { return (x << 1) | (x >> (32 - 1)) }
-func rotl32_7(x uint32) uint32 { return (x << 7) | (x >> (32 - 7)) }
-func rotl32_11(x uint32) uint32 { return (x << 11) | (x >> (32 - 11)) }
-func rotl32_12(x uint32) uint32 { return (x << 12) | (x >> (32 - 12)) }
-func rotl32_13(x uint32) uint32 { return (x << 13) | (x >> (32 - 13)) }
-func rotl32_17(x uint32) uint32 { return (x << 17) | (x >> (32 - 17)) }
-func rotl32_18(x uint32) uint32 { return (x << 18) | (x >> (32 - 18)) }
-
-func rotl64_1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) }
-func rotl64_7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) }
-func rotl64_11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) }
-func rotl64_12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) }
-func rotl64_18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) }
-func rotl64_23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) }
-func rotl64_27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) }
-func rotl64_31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) }
-
-func mix64(h uint64) uint64 {
- h ^= h >> 33
- h *= prime64x2
- h ^= h >> 29
- h *= prime64x3
- h ^= h >> 32
- return h
-}
-
-func resetVs64(seed uint64) (v1, v2, v3, v4 uint64) {
- if seed == 0 {
- return zero64x1, zero64x2, zero64x3, zero64x4
- }
- return (seed + prime64x1 + prime64x2), (seed + prime64x2), (seed), (seed - prime64x1)
-}
-
-// borrowed from cespare
-func round64(h, v uint64) uint64 {
- h += v * prime64x2
- h = rotl64_31(h)
- h *= prime64x1
- return h
-}
-
-func mergeRound64(h, v uint64) uint64 {
- v = round64(0, v)
- h ^= v
- h = h*prime64x1 + prime64x4
- return h
-}
diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash_go17.go b/vendor/github.com/OneOfOne/xxhash/xxhash_go17.go
deleted file mode 100644
index ae48e0c5ca..0000000000
--- a/vendor/github.com/OneOfOne/xxhash/xxhash_go17.go
+++ /dev/null
@@ -1,161 +0,0 @@
-package xxhash
-
-func u32(in []byte) uint32 {
- return uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
-}
-
-func u64(in []byte) uint64 {
- return uint64(in[0]) | uint64(in[1])<<8 | uint64(in[2])<<16 | uint64(in[3])<<24 | uint64(in[4])<<32 | uint64(in[5])<<40 | uint64(in[6])<<48 | uint64(in[7])<<56
-}
-
-// Checksum32S returns the checksum of the input bytes with the specific seed.
-func Checksum32S(in []byte, seed uint32) (h uint32) {
- var i int
-
- if len(in) > 15 {
- var (
- v1 = seed + prime32x1 + prime32x2
- v2 = seed + prime32x2
- v3 = seed + 0
- v4 = seed - prime32x1
- )
- for ; i < len(in)-15; i += 16 {
- in := in[i : i+16 : len(in)]
- v1 += u32(in[0:4:len(in)]) * prime32x2
- v1 = rotl32_13(v1) * prime32x1
-
- v2 += u32(in[4:8:len(in)]) * prime32x2
- v2 = rotl32_13(v2) * prime32x1
-
- v3 += u32(in[8:12:len(in)]) * prime32x2
- v3 = rotl32_13(v3) * prime32x1
-
- v4 += u32(in[12:16:len(in)]) * prime32x2
- v4 = rotl32_13(v4) * prime32x1
- }
-
- h = rotl32_1(v1) + rotl32_7(v2) + rotl32_12(v3) + rotl32_18(v4)
-
- } else {
- h = seed + prime32x5
- }
-
- h += uint32(len(in))
- for ; i <= len(in)-4; i += 4 {
- in := in[i : i+4 : len(in)]
- h += u32(in[0:4:len(in)]) * prime32x3
- h = rotl32_17(h) * prime32x4
- }
-
- for ; i < len(in); i++ {
- h += uint32(in[i]) * prime32x5
- h = rotl32_11(h) * prime32x1
- }
-
- h ^= h >> 15
- h *= prime32x2
- h ^= h >> 13
- h *= prime32x3
- h ^= h >> 16
-
- return
-}
-
-func (xx *XXHash32) Write(in []byte) (n int, err error) {
- i, ml := 0, int(xx.memIdx)
- n = len(in)
- xx.ln += int32(n)
-
- if d := 16 - ml; ml > 0 && ml+len(in) > 16 {
- xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in[:d]))
- ml, in = 16, in[d:len(in):len(in)]
- } else if ml+len(in) < 16 {
- xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in))
- return
- }
-
- if ml > 0 {
- i += 16 - ml
- xx.memIdx += int32(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in))
- in := xx.mem[:16:len(xx.mem)]
-
- xx.v1 += u32(in[0:4:len(in)]) * prime32x2
- xx.v1 = rotl32_13(xx.v1) * prime32x1
-
- xx.v2 += u32(in[4:8:len(in)]) * prime32x2
- xx.v2 = rotl32_13(xx.v2) * prime32x1
-
- xx.v3 += u32(in[8:12:len(in)]) * prime32x2
- xx.v3 = rotl32_13(xx.v3) * prime32x1
-
- xx.v4 += u32(in[12:16:len(in)]) * prime32x2
- xx.v4 = rotl32_13(xx.v4) * prime32x1
-
- xx.memIdx = 0
- }
-
- for ; i <= len(in)-16; i += 16 {
- in := in[i : i+16 : len(in)]
- xx.v1 += u32(in[0:4:len(in)]) * prime32x2
- xx.v1 = rotl32_13(xx.v1) * prime32x1
-
- xx.v2 += u32(in[4:8:len(in)]) * prime32x2
- xx.v2 = rotl32_13(xx.v2) * prime32x1
-
- xx.v3 += u32(in[8:12:len(in)]) * prime32x2
- xx.v3 = rotl32_13(xx.v3) * prime32x1
-
- xx.v4 += u32(in[12:16:len(in)]) * prime32x2
- xx.v4 = rotl32_13(xx.v4) * prime32x1
- }
-
- if len(in)-i != 0 {
- xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in[i:len(in):len(in)]))
- }
-
- return
-}
-
-func (xx *XXHash32) Sum32() (h uint32) {
- var i int32
- if xx.ln > 15 {
- h = rotl32_1(xx.v1) + rotl32_7(xx.v2) + rotl32_12(xx.v3) + rotl32_18(xx.v4)
- } else {
- h = xx.seed + prime32x5
- }
-
- h += uint32(xx.ln)
-
- if xx.memIdx > 0 {
- for ; i < xx.memIdx-3; i += 4 {
- in := xx.mem[i : i+4 : len(xx.mem)]
- h += u32(in[0:4:len(in)]) * prime32x3
- h = rotl32_17(h) * prime32x4
- }
-
- for ; i < xx.memIdx; i++ {
- h += uint32(xx.mem[i]) * prime32x5
- h = rotl32_11(h) * prime32x1
- }
- }
- h ^= h >> 15
- h *= prime32x2
- h ^= h >> 13
- h *= prime32x3
- h ^= h >> 16
-
- return
-}
-
-// Checksum64S returns the 64bit xxhash checksum for a single input
-func Checksum64S(in []byte, seed uint64) uint64 {
- if len(in) == 0 && seed == 0 {
- return 0xef46db3751d8e999
- }
-
- if len(in) > 31 {
- return checksum64(in, seed)
- }
-
- return checksum64Short(in, seed)
-}
diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash_safe.go b/vendor/github.com/OneOfOne/xxhash/xxhash_safe.go
deleted file mode 100644
index e92ec29e02..0000000000
--- a/vendor/github.com/OneOfOne/xxhash/xxhash_safe.go
+++ /dev/null
@@ -1,183 +0,0 @@
-// +build appengine safe ppc64le ppc64be mipsle mips s390x
-
-package xxhash
-
-// Backend returns the current version of xxhash being used.
-const Backend = "GoSafe"
-
-func ChecksumString32S(s string, seed uint32) uint32 {
- return Checksum32S([]byte(s), seed)
-}
-
-func (xx *XXHash32) WriteString(s string) (int, error) {
- if len(s) == 0 {
- return 0, nil
- }
- return xx.Write([]byte(s))
-}
-
-func ChecksumString64S(s string, seed uint64) uint64 {
- return Checksum64S([]byte(s), seed)
-}
-
-func (xx *XXHash64) WriteString(s string) (int, error) {
- if len(s) == 0 {
- return 0, nil
- }
- return xx.Write([]byte(s))
-}
-
-func checksum64(in []byte, seed uint64) (h uint64) {
- var (
- v1, v2, v3, v4 = resetVs64(seed)
-
- i int
- )
-
- for ; i < len(in)-31; i += 32 {
- in := in[i : i+32 : len(in)]
- v1 = round64(v1, u64(in[0:8:len(in)]))
- v2 = round64(v2, u64(in[8:16:len(in)]))
- v3 = round64(v3, u64(in[16:24:len(in)]))
- v4 = round64(v4, u64(in[24:32:len(in)]))
- }
-
- h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
-
- h = mergeRound64(h, v1)
- h = mergeRound64(h, v2)
- h = mergeRound64(h, v3)
- h = mergeRound64(h, v4)
-
- h += uint64(len(in))
-
- for ; i < len(in)-7; i += 8 {
- h ^= round64(0, u64(in[i:len(in):len(in)]))
- h = rotl64_27(h)*prime64x1 + prime64x4
- }
-
- for ; i < len(in)-3; i += 4 {
- h ^= uint64(u32(in[i:len(in):len(in)])) * prime64x1
- h = rotl64_23(h)*prime64x2 + prime64x3
- }
-
- for ; i < len(in); i++ {
- h ^= uint64(in[i]) * prime64x5
- h = rotl64_11(h) * prime64x1
- }
-
- return mix64(h)
-}
-
-func checksum64Short(in []byte, seed uint64) uint64 {
- var (
- h = seed + prime64x5 + uint64(len(in))
- i int
- )
-
- for ; i < len(in)-7; i += 8 {
- k := u64(in[i : i+8 : len(in)])
- h ^= round64(0, k)
- h = rotl64_27(h)*prime64x1 + prime64x4
- }
-
- for ; i < len(in)-3; i += 4 {
- h ^= uint64(u32(in[i:i+4:len(in)])) * prime64x1
- h = rotl64_23(h)*prime64x2 + prime64x3
- }
-
- for ; i < len(in); i++ {
- h ^= uint64(in[i]) * prime64x5
- h = rotl64_11(h) * prime64x1
- }
-
- return mix64(h)
-}
-
-func (xx *XXHash64) Write(in []byte) (n int, err error) {
- var (
- ml = int(xx.memIdx)
- d = 32 - ml
- )
-
- n = len(in)
- xx.ln += uint64(n)
-
- if ml+len(in) < 32 {
- xx.memIdx += int8(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in))
- return
- }
-
- i, v1, v2, v3, v4 := 0, xx.v1, xx.v2, xx.v3, xx.v4
- if ml > 0 && ml+len(in) > 32 {
- xx.memIdx += int8(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in[:d:len(in)]))
- in = in[d:len(in):len(in)]
-
- in := xx.mem[0:32:len(xx.mem)]
-
- v1 = round64(v1, u64(in[0:8:len(in)]))
- v2 = round64(v2, u64(in[8:16:len(in)]))
- v3 = round64(v3, u64(in[16:24:len(in)]))
- v4 = round64(v4, u64(in[24:32:len(in)]))
-
- xx.memIdx = 0
- }
-
- for ; i < len(in)-31; i += 32 {
- in := in[i : i+32 : len(in)]
- v1 = round64(v1, u64(in[0:8:len(in)]))
- v2 = round64(v2, u64(in[8:16:len(in)]))
- v3 = round64(v3, u64(in[16:24:len(in)]))
- v4 = round64(v4, u64(in[24:32:len(in)]))
- }
-
- if len(in)-i != 0 {
- xx.memIdx += int8(copy(xx.mem[xx.memIdx:], in[i:len(in):len(in)]))
- }
-
- xx.v1, xx.v2, xx.v3, xx.v4 = v1, v2, v3, v4
-
- return
-}
-
-func (xx *XXHash64) Sum64() (h uint64) {
- var i int
- if xx.ln > 31 {
- v1, v2, v3, v4 := xx.v1, xx.v2, xx.v3, xx.v4
- h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
-
- h = mergeRound64(h, v1)
- h = mergeRound64(h, v2)
- h = mergeRound64(h, v3)
- h = mergeRound64(h, v4)
- } else {
- h = xx.seed + prime64x5
- }
-
- h += uint64(xx.ln)
- if xx.memIdx > 0 {
- in := xx.mem[:xx.memIdx]
- for ; i < int(xx.memIdx)-7; i += 8 {
- in := in[i : i+8 : len(in)]
- k := u64(in[0:8:len(in)])
- k *= prime64x2
- k = rotl64_31(k)
- k *= prime64x1
- h ^= k
- h = rotl64_27(h)*prime64x1 + prime64x4
- }
-
- for ; i < int(xx.memIdx)-3; i += 4 {
- in := in[i : i+4 : len(in)]
- h ^= uint64(u32(in[0:4:len(in)])) * prime64x1
- h = rotl64_23(h)*prime64x2 + prime64x3
- }
-
- for ; i < int(xx.memIdx); i++ {
- h ^= uint64(in[i]) * prime64x5
- h = rotl64_11(h) * prime64x1
- }
- }
-
- return mix64(h)
-}
diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go b/vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go
deleted file mode 100644
index 1e2b5e8f1f..0000000000
--- a/vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go
+++ /dev/null
@@ -1,240 +0,0 @@
-// +build !safe
-// +build !appengine
-// +build !ppc64le
-// +build !mipsle
-// +build !ppc64be
-// +build !mips
-// +build !s390x
-
-package xxhash
-
-import (
- "reflect"
- "unsafe"
-)
-
-// Backend returns the current version of xxhash being used.
-const Backend = "GoUnsafe"
-
-// ChecksumString32S returns the checksum of the input data, without creating a copy, with the specific seed.
-func ChecksumString32S(s string, seed uint32) uint32 {
- if len(s) == 0 {
- return Checksum32S(nil, seed)
- }
- ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
- return Checksum32S((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)], seed)
-}
-
-func (xx *XXHash32) WriteString(s string) (int, error) {
- if len(s) == 0 {
- return 0, nil
- }
-
- ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
- return xx.Write((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)])
-}
-
-// ChecksumString64S returns the checksum of the input data, without creating a copy, with the specific seed.
-func ChecksumString64S(s string, seed uint64) uint64 {
- if len(s) == 0 {
- return Checksum64S(nil, seed)
- }
-
- ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
- return Checksum64S((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)], seed)
-}
-
-func (xx *XXHash64) WriteString(s string) (int, error) {
- if len(s) == 0 {
- return 0, nil
- }
- ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
- return xx.Write((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)])
-}
-
-//go:nocheckptr
-func checksum64(in []byte, seed uint64) uint64 {
- var (
- wordsLen = len(in) >> 3
- words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen]
-
- v1, v2, v3, v4 = resetVs64(seed)
-
- h uint64
- i int
- )
-
- for ; i < len(words)-3; i += 4 {
- words := (*[4]uint64)(unsafe.Pointer(&words[i]))
-
- v1 = round64(v1, words[0])
- v2 = round64(v2, words[1])
- v3 = round64(v3, words[2])
- v4 = round64(v4, words[3])
- }
-
- h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
-
- h = mergeRound64(h, v1)
- h = mergeRound64(h, v2)
- h = mergeRound64(h, v3)
- h = mergeRound64(h, v4)
-
- h += uint64(len(in))
-
- for _, k := range words[i:] {
- h ^= round64(0, k)
- h = rotl64_27(h)*prime64x1 + prime64x4
- }
-
- if in = in[wordsLen<<3 : len(in) : len(in)]; len(in) > 3 {
- words := (*[1]uint32)(unsafe.Pointer(&in[0]))
- h ^= uint64(words[0]) * prime64x1
- h = rotl64_23(h)*prime64x2 + prime64x3
-
- in = in[4:len(in):len(in)]
- }
-
- for _, b := range in {
- h ^= uint64(b) * prime64x5
- h = rotl64_11(h) * prime64x1
- }
-
- return mix64(h)
-}
-
-//go:nocheckptr
-func checksum64Short(in []byte, seed uint64) uint64 {
- var (
- h = seed + prime64x5 + uint64(len(in))
- i int
- )
-
- if len(in) > 7 {
- var (
- wordsLen = len(in) >> 3
- words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen]
- )
-
- for i := range words {
- h ^= round64(0, words[i])
- h = rotl64_27(h)*prime64x1 + prime64x4
- }
-
- i = wordsLen << 3
- }
-
- if in = in[i:len(in):len(in)]; len(in) > 3 {
- words := (*[1]uint32)(unsafe.Pointer(&in[0]))
- h ^= uint64(words[0]) * prime64x1
- h = rotl64_23(h)*prime64x2 + prime64x3
-
- in = in[4:len(in):len(in)]
- }
-
- for _, b := range in {
- h ^= uint64(b) * prime64x5
- h = rotl64_11(h) * prime64x1
- }
-
- return mix64(h)
-}
-
-func (xx *XXHash64) Write(in []byte) (n int, err error) {
- mem, idx := xx.mem[:], int(xx.memIdx)
-
- xx.ln, n = xx.ln+uint64(len(in)), len(in)
-
- if idx+len(in) < 32 {
- xx.memIdx += int8(copy(mem[idx:len(mem):len(mem)], in))
- return
- }
-
- var (
- v1, v2, v3, v4 = xx.v1, xx.v2, xx.v3, xx.v4
-
- i int
- )
-
- if d := 32 - int(idx); d > 0 && int(idx)+len(in) > 31 {
- copy(mem[idx:len(mem):len(mem)], in[:len(in):len(in)])
-
- words := (*[4]uint64)(unsafe.Pointer(&mem[0]))
-
- v1 = round64(v1, words[0])
- v2 = round64(v2, words[1])
- v3 = round64(v3, words[2])
- v4 = round64(v4, words[3])
-
- if in, xx.memIdx = in[d:len(in):len(in)], 0; len(in) == 0 {
- goto RET
- }
- }
-
- for ; i < len(in)-31; i += 32 {
- words := (*[4]uint64)(unsafe.Pointer(&in[i]))
-
- v1 = round64(v1, words[0])
- v2 = round64(v2, words[1])
- v3 = round64(v3, words[2])
- v4 = round64(v4, words[3])
- }
-
- if len(in)-i != 0 {
- xx.memIdx += int8(copy(mem[xx.memIdx:len(mem):len(mem)], in[i:len(in):len(in)]))
- }
-
-RET:
- xx.v1, xx.v2, xx.v3, xx.v4 = v1, v2, v3, v4
-
- return
-}
-
-func (xx *XXHash64) Sum64() (h uint64) {
- if seed := xx.seed; xx.ln > 31 {
- v1, v2, v3, v4 := xx.v1, xx.v2, xx.v3, xx.v4
- h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
-
- h = mergeRound64(h, v1)
- h = mergeRound64(h, v2)
- h = mergeRound64(h, v3)
- h = mergeRound64(h, v4)
- } else if seed == 0 {
- h = prime64x5
- } else {
- h = seed + prime64x5
- }
-
- h += uint64(xx.ln)
-
- if xx.memIdx == 0 {
- return mix64(h)
- }
-
- var (
- in = xx.mem[:xx.memIdx:xx.memIdx]
- wordsLen = len(in) >> 3
- words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen]
- )
-
- for _, k := range words {
- h ^= round64(0, k)
- h = rotl64_27(h)*prime64x1 + prime64x4
- }
-
- if in = in[wordsLen<<3 : len(in) : len(in)]; len(in) > 3 {
- words := (*[1]uint32)(unsafe.Pointer(&in[0]))
-
- h ^= uint64(words[0]) * prime64x1
- h = rotl64_23(h)*prime64x2 + prime64x3
-
- in = in[4:len(in):len(in)]
- }
-
- for _, b := range in {
- h ^= uint64(b) * prime64x5
- h = rotl64_11(h) * prime64x1
- }
-
- return mix64(h)
-}
diff --git a/vendor/github.com/containerd/containerd/v2/version/version.go b/vendor/github.com/containerd/containerd/v2/version/version.go
index 758bd631fe..d6d0eb04ed 100644
--- a/vendor/github.com/containerd/containerd/v2/version/version.go
+++ b/vendor/github.com/containerd/containerd/v2/version/version.go
@@ -24,7 +24,7 @@ var (
Package = "github.com/containerd/containerd/v2"
// Version holds the complete version number. Filled in at linking time.
- Version = "2.1.2+unknown"
+ Version = "2.1.4+unknown"
// Revision is filled with the VCS (e.g. git) revision being used to build
// the program at linking time.
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
index 62d91b77d5..5673f5c0bc 100644
--- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
@@ -1,3 +1,4 @@
+// Package md2man aims in converting markdown into roff (man pages).
package md2man
import (
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
index 9d6c473fdc..4f1070fc5b 100644
--- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
@@ -47,13 +47,13 @@ const (
tableStart = "\n.TS\nallbox;\n"
tableEnd = ".TE\n"
tableCellStart = "T{\n"
- tableCellEnd = "\nT}\n"
+ tableCellEnd = "\nT}"
tablePreprocessor = `'\" t`
)
// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
// from markdown
-func NewRoffRenderer() *roffRenderer { // nolint: golint
+func NewRoffRenderer() *roffRenderer {
return &roffRenderer{}
}
@@ -104,7 +104,7 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
node.Parent.Prev.Type == blackfriday.Heading &&
node.Parent.Prev.FirstChild != nil &&
bytes.EqualFold(node.Parent.Prev.FirstChild.Literal, []byte("NAME")) {
- before, after, found := bytes.Cut(node.Literal, []byte(" - "))
+ before, after, found := bytesCut(node.Literal, []byte(" - "))
escapeSpecialChars(w, before)
if found {
out(w, ` \- `)
@@ -316,9 +316,8 @@ func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, ente
} else if nodeLiteralSize(node) > 30 {
end = tableCellEnd
}
- if node.Next == nil && end != tableCellEnd {
- // Last cell: need to carriage return if we are at the end of the
- // header row and content isn't wrapped in a "tablecell"
+ if node.Next == nil {
+ // Last cell: need to carriage return if we are at the end of the header row.
end += crTag
}
out(w, end)
@@ -356,7 +355,7 @@ func countColumns(node *blackfriday.Node) int {
}
func out(w io.Writer, output string) {
- io.WriteString(w, output) // nolint: errcheck
+ io.WriteString(w, output) //nolint:errcheck
}
func escapeSpecialChars(w io.Writer, text []byte) {
@@ -395,7 +394,7 @@ func escapeSpecialCharsLine(w io.Writer, text []byte) {
i++
}
if i > org {
- w.Write(text[org:i]) // nolint: errcheck
+ w.Write(text[org:i]) //nolint:errcheck
}
// escape a character
@@ -403,6 +402,15 @@ func escapeSpecialCharsLine(w io.Writer, text []byte) {
break
}
- w.Write([]byte{'\\', text[i]}) // nolint: errcheck
+ w.Write([]byte{'\\', text[i]}) //nolint:errcheck
}
}
+
+// bytesCut is a copy of [bytes.Cut] to provide compatibility with go1.17
+// and older. We can remove this once we drop support for go1.17 and older.
+func bytesCut(s, sep []byte) (before, after []byte, found bool) {
+ if i := bytes.Index(s, sep); i >= 0 {
+ return s[:i], s[i+len(sep):], true
+ }
+ return s, nil, false
+}
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/LICENSE b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/LICENSE
index d2d1dd933e..fdf6d88225 100644
--- a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/LICENSE
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/LICENSE
@@ -1,7 +1,7 @@
ISC License
Copyright (c) 2013-2017 The btcsuite developers
-Copyright (c) 2015-2020 The Decred developers
+Copyright (c) 2015-2024 The Decred developers
Copyright (c) 2017 The Lightning Network Developers
Permission to use, copy, modify, and distribute this software for any
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve.go
index c9d47f3078..6d6d669f19 100644
--- a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve.go
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2015-2022 The Decred developers
+// Copyright (c) 2015-2024 The Decred developers
// Copyright 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
@@ -149,6 +149,20 @@ func (p *JacobianPoint) ToAffine() {
p.Y.Normalize()
}
+// EquivalentNonConst returns whether or not two Jacobian points represent the
+// same affine point in *non-constant* time.
+func (p *JacobianPoint) EquivalentNonConst(other *JacobianPoint) bool {
+ // Since the point at infinity is the identity element for the group, note
+ // that P = P + ∞ trivially implies that P - P = ∞.
+ //
+ // Use that fact to determine if the points represent the same affine point.
+ var result JacobianPoint
+ result.Set(p)
+ result.Y.Normalize().Negate(1).Normalize()
+ AddNonConst(&result, other, &result)
+ return (result.X.IsZero() && result.Y.IsZero()) || result.Z.IsZero()
+}
+
// addZ1AndZ2EqualsOne adds two Jacobian points that are already known to have
// z values of 1 and stores the result in the provided result param. That is to
// say result = p1 + p2. It performs faster addition than the generic add
@@ -823,7 +837,7 @@ func splitK(k *ModNScalar) (ModNScalar, ModNScalar) {
//
// Finally, consider the vector u:
//
- // u = - v
+ // u = - v
//
// It follows that f(u) = k and thus the two components of vector u satisfy
// the required equation:
@@ -891,10 +905,10 @@ func splitK(k *ModNScalar) (ModNScalar, ModNScalar) {
// Therefore, the computation of va can be avoided to save two
// field multiplications and a field addition.
//
- // 2) Since k1 = k - k2*λ = k + k2*(-λ), an additional field negation is
+ // 2) Since k1 ≡ k - k2*λ ≡ k + k2*(-λ), an additional field negation is
// saved by storing and using the negative version of λ.
//
- // 3) Since k2 = -vb = -(c1*b1 + c2*b2) = c1*(-b1) + c2*(-b2), one more
+ // 3) Since k2 ≡ -vb ≡ -(c1*b1 + c2*b2) ≡ c1*(-b1) + c2*(-b2), one more
// field negation is saved by storing and using the negative versions of
// b1 and b2.
//
@@ -1221,6 +1235,25 @@ func ScalarMultNonConst(k *ModNScalar, point, result *JacobianPoint) {
//
// NOTE: The resulting point will be normalized.
func ScalarBaseMultNonConst(k *ModNScalar, result *JacobianPoint) {
+ scalarBaseMultNonConst(k, result)
+}
+
+// jacobianG is the secp256k1 base point converted to Jacobian coordinates and
+// is defined here to avoid repeatedly converting it.
+var jacobianG = func() JacobianPoint {
+ var G JacobianPoint
+ bigAffineToJacobian(curveParams.Gx, curveParams.Gy, &G)
+ return G
+}()
+
+// scalarBaseMultNonConstSlow computes k*G through ScalarMultNonConst.
+func scalarBaseMultNonConstSlow(k *ModNScalar, result *JacobianPoint) {
+ ScalarMultNonConst(k, &jacobianG, result)
+}
+
+// scalarBaseMultNonConstFast computes k*G through the precomputed lookup
+// tables.
+func scalarBaseMultNonConstFast(k *ModNScalar, result *JacobianPoint) {
bytePoints := s256BytePoints()
// Start with the point at infinity.
@@ -1252,8 +1285,13 @@ func isOnCurve(fx, fy *FieldVal) bool {
// based on the desired oddness and returns whether or not it was successful
// since not all X coordinates are valid.
//
-// The magnitude of the provided X coordinate field val must be a max of 8 for a
-// correct result. The resulting Y field val will have a max magnitude of 2.
+// The magnitude of the provided X coordinate field value must be a max of 8 for
+// a correct result. The resulting Y field value will have a magnitude of 1.
+//
+// Preconditions:
+// - The input field value MUST have a max magnitude of 8
+// Output Normalized: Yes if the func returns true, no otherwise
+// Output Max Magnitude: 1
func DecompressY(x *FieldVal, odd bool, resultY *FieldVal) bool {
// The curve equation for secp256k1 is: y^2 = x^3 + 7. Thus
// y = +-sqrt(x^3 + 7).
@@ -1266,7 +1304,7 @@ func DecompressY(x *FieldVal, odd bool, resultY *FieldVal) bool {
return false
}
if resultY.Normalize().IsOdd() != odd {
- resultY.Negate(1)
+ resultY.Negate(1).Normalize()
}
return true
}
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve_embedded.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve_embedded.go
new file mode 100644
index 0000000000..16288318c1
--- /dev/null
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve_embedded.go
@@ -0,0 +1,14 @@
+// Copyright (c) 2024 The Decred developers
+// Use of this source code is governed by an ISC
+// license that can be found in the LICENSE file.
+
+//go:build tinygo
+
+package secp256k1
+
+// This file contains the variants suitable for
+// memory or storage constrained environments.
+
+func scalarBaseMultNonConst(k *ModNScalar, result *JacobianPoint) {
+ scalarBaseMultNonConstSlow(k, result)
+}
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve_precompute.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve_precompute.go
new file mode 100644
index 0000000000..cf84f770ed
--- /dev/null
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve_precompute.go
@@ -0,0 +1,14 @@
+// Copyright (c) 2024 The Decred developers
+// Use of this source code is governed by an ISC
+// license that can be found in the LICENSE file.
+
+//go:build !tinygo
+
+package secp256k1
+
+// This file contains the variants that don't fit in
+// memory or storage constrained environments.
+
+func scalarBaseMultNonConst(k *ModNScalar, result *JacobianPoint) {
+ scalarBaseMultNonConstFast(k, result)
+}
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ellipticadaptor.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ellipticadaptor.go
index 42022646b1..a3a45af317 100644
--- a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ellipticadaptor.go
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ellipticadaptor.go
@@ -153,17 +153,17 @@ func moduloReduce(k []byte) []byte {
return k
}
-// ScalarMult returns k*(Bx, By) where k is a big endian integer.
+// ScalarMult returns k*(bx, by) where k is a big endian integer.
//
// This is part of the elliptic.Curve interface implementation.
-func (curve *KoblitzCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {
+func (curve *KoblitzCurve) ScalarMult(bx, by *big.Int, k []byte) (*big.Int, *big.Int) {
// Convert the affine coordinates from big integers to Jacobian points,
// do the multiplication in Jacobian projective space, and convert the
// Jacobian point back to affine big.Ints.
var kModN ModNScalar
kModN.SetByteSlice(moduloReduce(k))
var point, result JacobianPoint
- bigAffineToJacobian(Bx, By, &point)
+ bigAffineToJacobian(bx, by, &point)
ScalarMultNonConst(&kModN, &point, &result)
return jacobianToBigAffine(&result)
}
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/field.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/field.go
index 8d9ac74d53..f979bb2efe 100644
--- a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/field.go
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/field.go
@@ -1,6 +1,6 @@
// Copyright (c) 2013-2014 The btcsuite developers
-// Copyright (c) 2015-2022 The Decred developers
-// Copyright (c) 2013-2022 Dave Collins
+// Copyright (c) 2015-2024 The Decred developers
+// Copyright (c) 2013-2024 Dave Collins
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
@@ -139,7 +139,7 @@ const (
// and the caller MUST normalize the field value if a given operation would
// cause the magnitude of the result to exceed the max allowed value.
//
-// IMPORTANT: The max allowed magnitude of a field value is 64.
+// IMPORTANT: The max allowed magnitude of a field value is 32.
type FieldVal struct {
// Each 256-bit value is represented as 10 32-bit integers in base 2^26.
// This provides 6 bits of overflow in each word (10 bits in the most
@@ -364,26 +364,26 @@ func (f *FieldVal) Normalize() *FieldVal {
// additional carry to bit 256 (bit 22 of the high order word).
t9 := f.n[9]
m := t9 >> fieldMSBBits
- t9 = t9 & fieldMSBMask
+ t9 &= fieldMSBMask
t0 := f.n[0] + m*977
t1 := (t0 >> fieldBase) + f.n[1] + (m << 6)
- t0 = t0 & fieldBaseMask
+ t0 &= fieldBaseMask
t2 := (t1 >> fieldBase) + f.n[2]
- t1 = t1 & fieldBaseMask
+ t1 &= fieldBaseMask
t3 := (t2 >> fieldBase) + f.n[3]
- t2 = t2 & fieldBaseMask
+ t2 &= fieldBaseMask
t4 := (t3 >> fieldBase) + f.n[4]
- t3 = t3 & fieldBaseMask
+ t3 &= fieldBaseMask
t5 := (t4 >> fieldBase) + f.n[5]
- t4 = t4 & fieldBaseMask
+ t4 &= fieldBaseMask
t6 := (t5 >> fieldBase) + f.n[6]
- t5 = t5 & fieldBaseMask
+ t5 &= fieldBaseMask
t7 := (t6 >> fieldBase) + f.n[7]
- t6 = t6 & fieldBaseMask
+ t6 &= fieldBaseMask
t8 := (t7 >> fieldBase) + f.n[8]
- t7 = t7 & fieldBaseMask
+ t7 &= fieldBaseMask
t9 = (t8 >> fieldBase) + t9
- t8 = t8 & fieldBaseMask
+ t8 &= fieldBaseMask
// At this point, the magnitude is guaranteed to be one, however, the
// value could still be greater than the prime if there was either a
@@ -398,26 +398,26 @@ func (f *FieldVal) Normalize() *FieldVal {
m &= constantTimeEq(t8&t7&t6&t5&t4&t3&t2, fieldBaseMask)
m &= constantTimeGreater(t1+64+((t0+977)>>fieldBase), fieldBaseMask)
m |= t9 >> fieldMSBBits
- t0 = t0 + m*977
+ t0 += m * 977
t1 = (t0 >> fieldBase) + t1 + (m << 6)
- t0 = t0 & fieldBaseMask
+ t0 &= fieldBaseMask
t2 = (t1 >> fieldBase) + t2
- t1 = t1 & fieldBaseMask
+ t1 &= fieldBaseMask
t3 = (t2 >> fieldBase) + t3
- t2 = t2 & fieldBaseMask
+ t2 &= fieldBaseMask
t4 = (t3 >> fieldBase) + t4
- t3 = t3 & fieldBaseMask
+ t3 &= fieldBaseMask
t5 = (t4 >> fieldBase) + t5
- t4 = t4 & fieldBaseMask
+ t4 &= fieldBaseMask
t6 = (t5 >> fieldBase) + t6
- t5 = t5 & fieldBaseMask
+ t5 &= fieldBaseMask
t7 = (t6 >> fieldBase) + t7
- t6 = t6 & fieldBaseMask
+ t6 &= fieldBaseMask
t8 = (t7 >> fieldBase) + t8
- t7 = t7 & fieldBaseMask
+ t7 &= fieldBaseMask
t9 = (t8 >> fieldBase) + t9
- t8 = t8 & fieldBaseMask
- t9 = t9 & fieldMSBMask // Remove potential multiple of 2^256.
+ t8 &= fieldBaseMask
+ t9 &= fieldMSBMask // Remove potential multiple of 2^256.
// Finally, set the normalized and reduced words.
f.n[0] = t0
@@ -435,7 +435,7 @@ func (f *FieldVal) Normalize() *FieldVal {
// PutBytesUnchecked unpacks the field value to a 32-byte big-endian value
// directly into the passed byte slice in constant time. The target slice must
-// must have at least 32 bytes available or it will panic.
+// have at least 32 bytes available or it will panic.
//
// There is a similar function, PutBytes, which unpacks the field value into a
// 32-byte array directly. This version is provided since it can be useful
@@ -628,14 +628,14 @@ func (f *FieldVal) Equals(val *FieldVal) bool {
}
// NegateVal negates the passed value and stores the result in f in constant
-// time. The caller must provide the magnitude of the passed value for a
-// correct result.
+// time. The caller must provide the maximum magnitude of the passed value for
+// a correct result.
//
// The field value is returned to support chaining. This enables syntax like:
// f.NegateVal(f2).AddInt(1) so that f = -f2 + 1.
//
// Preconditions:
-// - The max magnitude MUST be 63
+// - The max magnitude MUST be 31
// Output Normalized: No
// Output Max Magnitude: Input magnitude + 1
func (f *FieldVal) NegateVal(val *FieldVal, magnitude uint32) *FieldVal {
@@ -672,14 +672,14 @@ func (f *FieldVal) NegateVal(val *FieldVal, magnitude uint32) *FieldVal {
}
// Negate negates the field value in constant time. The existing field value is
-// modified. The caller must provide the magnitude of the field value for a
-// correct result.
+// modified. The caller must provide the maximum magnitude of the field value
+// for a correct result.
//
// The field value is returned to support chaining. This enables syntax like:
// f.Negate().AddInt(1) so that f = -f + 1.
//
// Preconditions:
-// - The max magnitude MUST be 63
+// - The max magnitude MUST be 31
// Output Normalized: No
// Output Max Magnitude: Input magnitude + 1
func (f *FieldVal) Negate(magnitude uint32) *FieldVal {
@@ -694,7 +694,8 @@ func (f *FieldVal) Negate(magnitude uint32) *FieldVal {
// f.AddInt(1).Add(f2) so that f = f + 1 + f2.
//
// Preconditions:
-// - The field value MUST have a max magnitude of 63
+// - The field value MUST have a max magnitude of 31
+// - The integer MUST be a max of 32767
// Output Normalized: No
// Output Max Magnitude: Existing field magnitude + 1
func (f *FieldVal) AddInt(ui uint16) *FieldVal {
@@ -713,7 +714,7 @@ func (f *FieldVal) AddInt(ui uint16) *FieldVal {
// f.Add(f2).AddInt(1) so that f = f + f2 + 1.
//
// Preconditions:
-// - The sum of the magnitudes of the two field values MUST be a max of 64
+// - The sum of the magnitudes of the two field values MUST be a max of 32
// Output Normalized: No
// Output Max Magnitude: Sum of the magnitude of the two individual field values
func (f *FieldVal) Add(val *FieldVal) *FieldVal {
@@ -742,7 +743,7 @@ func (f *FieldVal) Add(val *FieldVal) *FieldVal {
// f3.Add2(f, f2).AddInt(1) so that f3 = f + f2 + 1.
//
// Preconditions:
-// - The sum of the magnitudes of the two field values MUST be a max of 64
+// - The sum of the magnitudes of the two field values MUST be a max of 32
// Output Normalized: No
// Output Max Magnitude: Sum of the magnitude of the two field values
func (f *FieldVal) Add2(val *FieldVal, val2 *FieldVal) *FieldVal {
@@ -774,7 +775,7 @@ func (f *FieldVal) Add2(val *FieldVal, val2 *FieldVal) *FieldVal {
// f.MulInt(2).Add(f2) so that f = 2 * f + f2.
//
// Preconditions:
-// - The field value magnitude multiplied by given val MUST be a max of 64
+// - The field value magnitude multiplied by given val MUST be a max of 32
// Output Normalized: No
// Output Max Magnitude: Existing field magnitude times the provided integer val
func (f *FieldVal) MulInt(val uint8) *FieldVal {
@@ -816,11 +817,10 @@ func (f *FieldVal) Mul(val *FieldVal) *FieldVal {
return f.Mul2(f, val)
}
-// Mul2 multiplies the passed two field values together and stores the result
-// result in f in constant time. Note that this function can overflow if
-// multiplying any of the individual words exceeds a max uint32. In practice,
-// this means the magnitude of either value involved in the multiplication must
-// be a max of 8.
+// Mul2 multiplies the passed two field values together and stores the result in
+// f in constant time. Note that this function can overflow if multiplying any
+// of the individual words exceeds a max uint32. In practice, this means the
+// magnitude of either value involved in the multiplication must be a max of 8.
//
// The field value is returned to support chaining. This enables syntax like:
// f3.Mul2(f, f2).AddInt(1) so that f3 = (f * f2) + 1.
@@ -1059,7 +1059,7 @@ func (f *FieldVal) Mul2(val *FieldVal, val2 *FieldVal) *FieldVal {
t8 = m & fieldBaseMask
m = (m >> fieldBase) + t9 + t18*1024 + t19*68719492368
t9 = m & fieldMSBMask
- m = m >> fieldMSBBits
+ m >>= fieldMSBBits
// At this point, if the magnitude is greater than 0, the overall value
// is greater than the max possible 256-bit value. In particular, it is
@@ -1466,7 +1466,7 @@ func (f *FieldVal) SquareVal(val *FieldVal) *FieldVal {
t8 = m & fieldBaseMask
m = (m >> fieldBase) + t9 + t18*1024 + t19*68719492368
t9 = m & fieldMSBMask
- m = m >> fieldMSBBits
+ m >>= fieldMSBBits
// At this point, if the magnitude is greater than 0, the overall value
// is greater than the max possible 256-bit value. In particular, it is
@@ -1509,111 +1509,126 @@ func (f *FieldVal) SquareVal(val *FieldVal) *FieldVal {
// Output Normalized: No
// Output Max Magnitude: 1
func (f *FieldVal) Inverse() *FieldVal {
- // Fermat's little theorem states that for a nonzero number a and prime
- // prime p, a^(p-1) = 1 (mod p). Since the multiplicative inverse is
- // a*b = 1 (mod p), it follows that b = a*a^(p-2) = a^(p-1) = 1 (mod p).
- // Thus, a^(p-2) is the multiplicative inverse.
+ // Fermat's little theorem states that for a nonzero number 'a' and prime
+ // 'p', a^(p-1) ≡ 1 (mod p). Multiplying both sides of the equation by the
+ // multiplicative inverse a^-1 yields a^(p-2) ≡ a^-1 (mod p). Thus, a^(p-2)
+ // is the multiplicative inverse.
+ //
+ // In order to efficiently compute a^(p-2), p-2 needs to be split into a
+ // sequence of squares and multiplications that minimizes the number of
+ // multiplications needed (since they are more costly than squarings).
+ // Intermediate results are saved and reused as well.
//
- // In order to efficiently compute a^(p-2), p-2 needs to be split into
- // a sequence of squares and multiplications that minimizes the number
- // of multiplications needed (since they are more costly than
- // squarings). Intermediate results are saved and reused as well.
+ // The secp256k1 prime - 2 is 2^256 - 4294968275. In binary, that is:
//
- // The secp256k1 prime - 2 is 2^256 - 4294968275.
+ // 11111111 11111111 11111111 11111111
+ // 11111111 11111111 11111111 11111111
+ // 11111111 11111111 11111111 11111111
+ // 11111111 11111111 11111111 11111111
+ // 11111111 11111111 11111111 11111111
+ // 11111111 11111111 11111111 11111111
+ // 11111111 11111111 11111111 11111110
+ // 11111111 11111111 11111100 00101101
//
- // This has a cost of 258 field squarings and 33 field multiplications.
- var a2, a3, a4, a10, a11, a21, a42, a45, a63, a1019, a1023 FieldVal
- a2.SquareVal(f)
- a3.Mul2(&a2, f)
- a4.SquareVal(&a2)
- a10.SquareVal(&a4).Mul(&a2)
- a11.Mul2(&a10, f)
- a21.Mul2(&a10, &a11)
- a42.SquareVal(&a21)
- a45.Mul2(&a42, &a3)
- a63.Mul2(&a42, &a21)
- a1019.SquareVal(&a63).Square().Square().Square().Mul(&a11)
- a1023.Mul2(&a1019, &a4)
- f.Set(&a63) // f = a^(2^6 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^11 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^16 - 1024)
- f.Mul(&a1023) // f = a^(2^16 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^21 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^26 - 1024)
- f.Mul(&a1023) // f = a^(2^26 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^31 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^36 - 1024)
- f.Mul(&a1023) // f = a^(2^36 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^41 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^46 - 1024)
- f.Mul(&a1023) // f = a^(2^46 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^51 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^56 - 1024)
- f.Mul(&a1023) // f = a^(2^56 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^61 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^66 - 1024)
- f.Mul(&a1023) // f = a^(2^66 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^71 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^76 - 1024)
- f.Mul(&a1023) // f = a^(2^76 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^81 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^86 - 1024)
- f.Mul(&a1023) // f = a^(2^86 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^91 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^96 - 1024)
- f.Mul(&a1023) // f = a^(2^96 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^101 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^106 - 1024)
- f.Mul(&a1023) // f = a^(2^106 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^111 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^116 - 1024)
- f.Mul(&a1023) // f = a^(2^116 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^121 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^126 - 1024)
- f.Mul(&a1023) // f = a^(2^126 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^131 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^136 - 1024)
- f.Mul(&a1023) // f = a^(2^136 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^141 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^146 - 1024)
- f.Mul(&a1023) // f = a^(2^146 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^151 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^156 - 1024)
- f.Mul(&a1023) // f = a^(2^156 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^161 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^166 - 1024)
- f.Mul(&a1023) // f = a^(2^166 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^171 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^176 - 1024)
- f.Mul(&a1023) // f = a^(2^176 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^181 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^186 - 1024)
- f.Mul(&a1023) // f = a^(2^186 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^191 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^196 - 1024)
- f.Mul(&a1023) // f = a^(2^196 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^201 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^206 - 1024)
- f.Mul(&a1023) // f = a^(2^206 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^211 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^216 - 1024)
- f.Mul(&a1023) // f = a^(2^216 - 1)
- f.Square().Square().Square().Square().Square() // f = a^(2^221 - 32)
- f.Square().Square().Square().Square().Square() // f = a^(2^226 - 1024)
- f.Mul(&a1019) // f = a^(2^226 - 5)
- f.Square().Square().Square().Square().Square() // f = a^(2^231 - 160)
- f.Square().Square().Square().Square().Square() // f = a^(2^236 - 5120)
- f.Mul(&a1023) // f = a^(2^236 - 4097)
- f.Square().Square().Square().Square().Square() // f = a^(2^241 - 131104)
- f.Square().Square().Square().Square().Square() // f = a^(2^246 - 4195328)
- f.Mul(&a1023) // f = a^(2^246 - 4194305)
- f.Square().Square().Square().Square().Square() // f = a^(2^251 - 134217760)
- f.Square().Square().Square().Square().Square() // f = a^(2^256 - 4294968320)
- return f.Mul(&a45) // f = a^(2^256 - 4294968275) = a^(p-2)
+ // Notice that can be broken up into five windows of consecutive 1s (in
+ // order of least to most significant) as:
+ //
+ // 2-bit window with 1 bit set (bit 1 unset)
+ // 3-bit window with 2 bits set (bit 4 unset)
+ // 5-bit window with 1 bit set (bits 6, 7, 8, 9 unset)
+ // 23-bit window with 22 bits set (bit 32 unset)
+ // 223-bit window with all 223 bits set
+ //
+ // Thus, the groups of 1 bits in each window forms the set:
+ // S = {1, 2, 22, 223}.
+ //
+ // The strategy is to calculate a^(2^n - 1) for each grouping via an
+ // addition chain with a sliding window.
+ //
+ // The addition chain used is (credits to Peter Dettman):
+ // (0,0),(1,0),(2,2),(3,2),(4,1),(5,5),(6,6),(7,7),(8,8),(9,7),(10,2)
+ // => 2^[1] 2^[2] 2^3 2^6 2^9 2^11 2^[22] 2^44 2^88 2^176 2^220 2^[223]
+ //
+ // This has a cost of 255 field squarings and 15 field multiplications.
+ var a, a2, a3, a6, a9, a11, a22, a44, a88, a176, a220, a223 FieldVal
+ a.Set(f)
+ a2.SquareVal(&a).Mul(&a) // a2 = a^(2^2 - 1)
+ a3.SquareVal(&a2).Mul(&a) // a3 = a^(2^3 - 1)
+ a6.SquareVal(&a3).Square().Square() // a6 = a^(2^6 - 2^3)
+ a6.Mul(&a3) // a6 = a^(2^6 - 1)
+ a9.SquareVal(&a6).Square().Square() // a9 = a^(2^9 - 2^3)
+ a9.Mul(&a3) // a9 = a^(2^9 - 1)
+ a11.SquareVal(&a9).Square() // a11 = a^(2^11 - 2^2)
+ a11.Mul(&a2) // a11 = a^(2^11 - 1)
+ a22.SquareVal(&a11).Square().Square().Square().Square() // a22 = a^(2^16 - 2^5)
+ a22.Square().Square().Square().Square().Square() // a22 = a^(2^21 - 2^10)
+ a22.Square() // a22 = a^(2^22 - 2^11)
+ a22.Mul(&a11) // a22 = a^(2^22 - 1)
+ a44.SquareVal(&a22).Square().Square().Square().Square() // a44 = a^(2^27 - 2^5)
+ a44.Square().Square().Square().Square().Square() // a44 = a^(2^32 - 2^10)
+ a44.Square().Square().Square().Square().Square() // a44 = a^(2^37 - 2^15)
+ a44.Square().Square().Square().Square().Square() // a44 = a^(2^42 - 2^20)
+ a44.Square().Square() // a44 = a^(2^44 - 2^22)
+ a44.Mul(&a22) // a44 = a^(2^44 - 1)
+ a88.SquareVal(&a44).Square().Square().Square().Square() // a88 = a^(2^49 - 2^5)
+ a88.Square().Square().Square().Square().Square() // a88 = a^(2^54 - 2^10)
+ a88.Square().Square().Square().Square().Square() // a88 = a^(2^59 - 2^15)
+ a88.Square().Square().Square().Square().Square() // a88 = a^(2^64 - 2^20)
+ a88.Square().Square().Square().Square().Square() // a88 = a^(2^69 - 2^25)
+ a88.Square().Square().Square().Square().Square() // a88 = a^(2^74 - 2^30)
+ a88.Square().Square().Square().Square().Square() // a88 = a^(2^79 - 2^35)
+ a88.Square().Square().Square().Square().Square() // a88 = a^(2^84 - 2^40)
+ a88.Square().Square().Square().Square() // a88 = a^(2^88 - 2^44)
+ a88.Mul(&a44) // a88 = a^(2^88 - 1)
+ a176.SquareVal(&a88).Square().Square().Square().Square() // a176 = a^(2^93 - 2^5)
+ a176.Square().Square().Square().Square().Square() // a176 = a^(2^98 - 2^10)
+ a176.Square().Square().Square().Square().Square() // a176 = a^(2^103 - 2^15)
+ a176.Square().Square().Square().Square().Square() // a176 = a^(2^108 - 2^20)
+ a176.Square().Square().Square().Square().Square() // a176 = a^(2^113 - 2^25)
+ a176.Square().Square().Square().Square().Square() // a176 = a^(2^118 - 2^30)
+ a176.Square().Square().Square().Square().Square() // a176 = a^(2^123 - 2^35)
+ a176.Square().Square().Square().Square().Square() // a176 = a^(2^128 - 2^40)
+ a176.Square().Square().Square().Square().Square() // a176 = a^(2^133 - 2^45)
+ a176.Square().Square().Square().Square().Square() // a176 = a^(2^138 - 2^50)
+ a176.Square().Square().Square().Square().Square() // a176 = a^(2^143 - 2^55)
+ a176.Square().Square().Square().Square().Square() // a176 = a^(2^148 - 2^60)
+ a176.Square().Square().Square().Square().Square() // a176 = a^(2^153 - 2^65)
+ a176.Square().Square().Square().Square().Square() // a176 = a^(2^158 - 2^70)
+ a176.Square().Square().Square().Square().Square() // a176 = a^(2^163 - 2^75)
+ a176.Square().Square().Square().Square().Square() // a176 = a^(2^168 - 2^80)
+ a176.Square().Square().Square().Square().Square() // a176 = a^(2^173 - 2^85)
+ a176.Square().Square().Square() // a176 = a^(2^176 - 2^88)
+ a176.Mul(&a88) // a176 = a^(2^176 - 1)
+ a220.SquareVal(&a176).Square().Square().Square().Square() // a220 = a^(2^181 - 2^5)
+ a220.Square().Square().Square().Square().Square() // a220 = a^(2^186 - 2^10)
+ a220.Square().Square().Square().Square().Square() // a220 = a^(2^191 - 2^15)
+ a220.Square().Square().Square().Square().Square() // a220 = a^(2^196 - 2^20)
+ a220.Square().Square().Square().Square().Square() // a220 = a^(2^201 - 2^25)
+ a220.Square().Square().Square().Square().Square() // a220 = a^(2^206 - 2^30)
+ a220.Square().Square().Square().Square().Square() // a220 = a^(2^211 - 2^35)
+ a220.Square().Square().Square().Square().Square() // a220 = a^(2^216 - 2^40)
+ a220.Square().Square().Square().Square() // a220 = a^(2^220 - 2^44)
+ a220.Mul(&a44) // a220 = a^(2^220 - 1)
+ a223.SquareVal(&a220).Square().Square() // a223 = a^(2^223 - 2^3)
+ a223.Mul(&a3) // a223 = a^(2^223 - 1)
+
+ f.SquareVal(&a223).Square().Square().Square().Square() // f = a^(2^228 - 2^5)
+ f.Square().Square().Square().Square().Square() // f = a^(2^233 - 2^10)
+ f.Square().Square().Square().Square().Square() // f = a^(2^238 - 2^15)
+ f.Square().Square().Square().Square().Square() // f = a^(2^243 - 2^20)
+ f.Square().Square().Square() // f = a^(2^246 - 2^23)
+ f.Mul(&a22) // f = a^(2^246 - 4194305)
+ f.Square().Square().Square().Square().Square() // f = a^(2^251 - 134217760)
+ f.Mul(&a) // f = a^(2^251 - 134217759)
+ f.Square().Square().Square() // f = a^(2^254 - 1073742072)
+ f.Mul(&a2) // f = a^(2^254 - 1073742069)
+ f.Square().Square() // f = a^(2^256 - 4294968276)
+ return f.Mul(&a) // f = a^(2^256 - 4294968275) = a^(p-2)
}
-// IsGtOrEqPrimeMinusOrder returns whether or not the field value exceeds the
-// group order divided by 2 in constant time.
+// IsGtOrEqPrimeMinusOrder returns whether or not the field value is greater
+// than or equal to the field prime minus the secp256k1 group order in constant
+// time.
//
// Preconditions:
// - The field value MUST be normalized
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/modnscalar.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/modnscalar.go
index f66496ed5e..225016d8de 100644
--- a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/modnscalar.go
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/modnscalar.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2020-2022 The Decred developers
+// Copyright (c) 2020-2024 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
@@ -46,6 +46,8 @@ const (
//
// The group order of the curve per [SECG] is:
// 0xffffffff ffffffff ffffffff fffffffe baaedce6 af48a03b bfd25e8c d0364141
+ //
+ // nolint: dupword
orderWordZero uint32 = 0xd0364141
orderWordOne uint32 = 0xbfd25e8c
orderWordTwo uint32 = 0xaf48a03b
@@ -65,10 +67,10 @@ const (
orderComplementWordOne uint32 = ^orderWordOne
orderComplementWordTwo uint32 = ^orderWordTwo
orderComplementWordThree uint32 = ^orderWordThree
- //orderComplementWordFour uint32 = ^orderWordFour // unused
- //orderComplementWordFive uint32 = ^orderWordFive // unused
- //orderComplementWordSix uint32 = ^orderWordSix // unused
- //orderComplementWordSeven uint32 = ^orderWordSeven // unused
+ // orderComplementWordFour uint32 = ^orderWordFour // unused
+ // orderComplementWordFive uint32 = ^orderWordFive // unused
+ // orderComplementWordSix uint32 = ^orderWordSix // unused
+ // orderComplementWordSeven uint32 = ^orderWordSeven // unused
// These fields provide convenient access to each of the words of the
// secp256k1 curve group order N / 2 to improve code readability and avoid
@@ -76,6 +78,8 @@ const (
//
// The half order of the secp256k1 curve group is:
// 0x7fffffff ffffffff ffffffff ffffffff 5d576e73 57a4501d dfe92f46 681b20a0
+ //
+ // nolint: dupword
halfOrderWordZero uint32 = 0x681b20a0
halfOrderWordOne uint32 = 0xdfe92f46
halfOrderWordTwo uint32 = 0x57a4501d
@@ -364,8 +368,8 @@ func (s *ModNScalar) SetByteSlice(b []byte) bool {
}
// PutBytesUnchecked unpacks the scalar to a 32-byte big-endian value directly
-// into the passed byte slice in constant time. The target slice must must have
-// at least 32 bytes available or it will panic.
+// into the passed byte slice in constant time. The target slice must have at
+// least 32 bytes available or it will panic.
//
// There is a similar function, PutBytes, which unpacks the scalar into a
// 32-byte array directly. This version is provided since it can be useful to
@@ -659,7 +663,7 @@ func (s *ModNScalar) reduce512(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11,
//
// Technically the max possible value here is (N-1)^2 since the two scalars
// being multiplied are always mod N. Nevertheless, it is safer to consider
- // it to be (2^256-1)^2 = 2^512 - 2^256 + 1 since it is the product of two
+ // it to be (2^256-1)^2 = 2^512 - 2^257 + 1 since it is the product of two
// 256-bit values.
//
// The algorithm is to reduce the result modulo the prime by subtracting
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/nonce.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/nonce.go
index 81b205d9c1..70a75bb81c 100644
--- a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/nonce.go
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/nonce.go
@@ -1,5 +1,5 @@
// Copyright (c) 2013-2014 The btcsuite developers
-// Copyright (c) 2015-2020 The Decred developers
+// Copyright (c) 2015-2024 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
@@ -181,7 +181,7 @@ func NonceRFC6979(privKey []byte, hash []byte, extra []byte, version []byte, ext
// with potential additional data as described by section 3.6 of the RFC.
hasher := newHMACSHA256(k)
hasher.Write(oneInitializer)
- hasher.Write(singleZero[:])
+ hasher.Write(singleZero)
hasher.Write(key)
k = hasher.Sum()
@@ -200,8 +200,8 @@ func NonceRFC6979(privKey []byte, hash []byte, extra []byte, version []byte, ext
// with potential additional data as described by section 3.6 of the RFC.
hasher.Reset()
hasher.Write(v)
- hasher.Write(singleOne[:])
- hasher.Write(key[:])
+ hasher.Write(singleOne)
+ hasher.Write(key)
k = hasher.Sum()
// Step G.
@@ -252,7 +252,7 @@ func NonceRFC6979(privKey []byte, hash []byte, extra []byte, version []byte, ext
// K = HMAC_K(V || 0x00)
hasher.Reset()
hasher.Write(v)
- hasher.Write(singleZero[:])
+ hasher.Write(singleZero)
k = hasher.Sum()
// V = HMAC_K(V)
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/privkey.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/privkey.go
index ca3e8da281..e6b7be3506 100644
--- a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/privkey.go
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/privkey.go
@@ -1,5 +1,5 @@
// Copyright (c) 2013-2014 The btcsuite developers
-// Copyright (c) 2015-2023 The Decred developers
+// Copyright (c) 2015-2024 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/pubkey.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/pubkey.go
index 54c54be5f1..2f8815bedf 100644
--- a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/pubkey.go
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/pubkey.go
@@ -1,5 +1,5 @@
// Copyright (c) 2013-2014 The btcsuite developers
-// Copyright (c) 2015-2022 The Decred developers
+// Copyright (c) 2015-2024 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
@@ -177,7 +177,6 @@ func ParsePubKey(serialized []byte) (key *PublicKey, err error) {
"the secp256k1 curve", x)
return nil, makeError(ErrPubKeyNotOnCurve, str)
}
- y.Normalize()
default:
str := fmt.Sprintf("malformed public key: invalid length: %d",
diff --git a/vendor/github.com/goccy/go-json/.golangci.yml b/vendor/github.com/goccy/go-json/.golangci.yml
index 57ae5a528f..977accaa9f 100644
--- a/vendor/github.com/goccy/go-json/.golangci.yml
+++ b/vendor/github.com/goccy/go-json/.golangci.yml
@@ -56,6 +56,9 @@ linters:
- cyclop
- containedctx
- revive
+ - nosnakecase
+ - exhaustruct
+ - depguard
issues:
exclude-rules:
diff --git a/vendor/github.com/goccy/go-json/Makefile b/vendor/github.com/goccy/go-json/Makefile
index 5bbfc4c9a2..c030577dcf 100644
--- a/vendor/github.com/goccy/go-json/Makefile
+++ b/vendor/github.com/goccy/go-json/Makefile
@@ -30,7 +30,7 @@ golangci-lint: | $(BIN_DIR)
GOLANGCI_LINT_TMP_DIR=$$(mktemp -d); \
cd $$GOLANGCI_LINT_TMP_DIR; \
go mod init tmp; \
- GOBIN=$(BIN_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.48.0; \
+ GOBIN=$(BIN_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.54.2; \
rm -rf $$GOLANGCI_LINT_TMP_DIR; \
}
diff --git a/vendor/github.com/goccy/go-json/encode.go b/vendor/github.com/goccy/go-json/encode.go
index 4bd899f38b..c5173825a9 100644
--- a/vendor/github.com/goccy/go-json/encode.go
+++ b/vendor/github.com/goccy/go-json/encode.go
@@ -52,7 +52,7 @@ func (e *Encoder) EncodeContext(ctx context.Context, v interface{}, optFuncs ...
rctx.Option.Flag |= encoder.ContextOption
rctx.Option.Context = ctx
- err := e.encodeWithOption(rctx, v, optFuncs...)
+ err := e.encodeWithOption(rctx, v, optFuncs...) //nolint: contextcheck
encoder.ReleaseRuntimeContext(rctx)
return err
@@ -120,7 +120,7 @@ func marshalContext(ctx context.Context, v interface{}, optFuncs ...EncodeOption
optFunc(rctx.Option)
}
- buf, err := encode(rctx, v)
+ buf, err := encode(rctx, v) //nolint: contextcheck
if err != nil {
encoder.ReleaseRuntimeContext(rctx)
return nil, err
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile.go b/vendor/github.com/goccy/go-json/internal/decoder/compile.go
index fab6437647..8ad50936c0 100644
--- a/vendor/github.com/goccy/go-json/internal/decoder/compile.go
+++ b/vendor/github.com/goccy/go-json/internal/decoder/compile.go
@@ -5,6 +5,7 @@ import (
"fmt"
"reflect"
"strings"
+ "sync"
"sync/atomic"
"unicode"
"unsafe"
@@ -17,22 +18,27 @@ var (
typeAddr *runtime.TypeAddr
cachedDecoderMap unsafe.Pointer // map[uintptr]decoder
cachedDecoder []Decoder
+ initOnce sync.Once
)
-func init() {
- typeAddr = runtime.AnalyzeTypeAddr()
- if typeAddr == nil {
- typeAddr = &runtime.TypeAddr{}
- }
- cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1)
+func initDecoder() {
+ initOnce.Do(func() {
+ typeAddr = runtime.AnalyzeTypeAddr()
+ if typeAddr == nil {
+ typeAddr = &runtime.TypeAddr{}
+ }
+ cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1)
+ })
}
func loadDecoderMap() map[uintptr]Decoder {
+ initDecoder()
p := atomic.LoadPointer(&cachedDecoderMap)
return *(*map[uintptr]Decoder)(unsafe.Pointer(&p))
}
func storeDecoder(typ uintptr, dec Decoder, m map[uintptr]Decoder) {
+ initDecoder()
newDecoderMap := make(map[uintptr]Decoder, len(m)+1)
newDecoderMap[typ] = dec
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go
index eb7e2b1345..025ca85b5e 100644
--- a/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go
+++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go
@@ -10,6 +10,7 @@ import (
)
func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) {
+ initDecoder()
typeptr := uintptr(unsafe.Pointer(typ))
if typeptr > typeAddr.MaxTypeAddr {
return compileToGetDecoderSlowPath(typeptr, typ)
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go
index 49cdda4a17..023b817c36 100644
--- a/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go
+++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go
@@ -13,6 +13,7 @@ import (
var decMu sync.RWMutex
func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) {
+ initDecoder()
typeptr := uintptr(unsafe.Pointer(typ))
if typeptr > typeAddr.MaxTypeAddr {
return compileToGetDecoderSlowPath(typeptr, typ)
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/ptr.go b/vendor/github.com/goccy/go-json/internal/decoder/ptr.go
index de12e105c6..ae2299466a 100644
--- a/vendor/github.com/goccy/go-json/internal/decoder/ptr.go
+++ b/vendor/github.com/goccy/go-json/internal/decoder/ptr.go
@@ -85,6 +85,7 @@ func (d *ptrDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.P
}
c, err := d.dec.Decode(ctx, cursor, depth, newptr)
if err != nil {
+ *(*unsafe.Pointer)(p) = nil
return 0, err
}
cursor = c
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go
index 6d37993f07..d711d0f85f 100644
--- a/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go
+++ b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go
@@ -147,7 +147,7 @@ func (d *unmarshalTextDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int
return nil, 0, fmt.Errorf("json: unmarshal text decoder does not support decode path")
}
-func unquoteBytes(s []byte) (t []byte, ok bool) {
+func unquoteBytes(s []byte) (t []byte, ok bool) { //nolint: nonamedreturns
length := len(s)
if length < 2 || s[0] != '"' || s[length-1] != '"' {
return
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compact.go b/vendor/github.com/goccy/go-json/internal/encoder/compact.go
index 0eb9545d89..e287a6c03f 100644
--- a/vendor/github.com/goccy/go-json/internal/encoder/compact.go
+++ b/vendor/github.com/goccy/go-json/internal/encoder/compact.go
@@ -213,8 +213,8 @@ func compactString(dst, src []byte, cursor int64, escape bool) ([]byte, int64, e
dst = append(dst, src[start:cursor]...)
dst = append(dst, `\u202`...)
dst = append(dst, hex[src[cursor+2]&0xF])
- cursor += 2
start = cursor + 3
+ cursor += 2
}
}
switch c {
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go
index 3ae39ba8c7..b107636890 100644
--- a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go
+++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go
@@ -5,6 +5,7 @@ import (
"encoding"
"encoding/json"
"reflect"
+ "sync"
"sync/atomic"
"unsafe"
@@ -24,14 +25,17 @@ var (
cachedOpcodeSets []*OpcodeSet
cachedOpcodeMap unsafe.Pointer // map[uintptr]*OpcodeSet
typeAddr *runtime.TypeAddr
+ initEncoderOnce sync.Once
)
-func init() {
- typeAddr = runtime.AnalyzeTypeAddr()
- if typeAddr == nil {
- typeAddr = &runtime.TypeAddr{}
- }
- cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1)
+func initEncoder() {
+ initEncoderOnce.Do(func() {
+ typeAddr = runtime.AnalyzeTypeAddr()
+ if typeAddr == nil {
+ typeAddr = &runtime.TypeAddr{}
+ }
+ cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1)
+ })
}
func loadOpcodeMap() map[uintptr]*OpcodeSet {
@@ -480,7 +484,7 @@ func (c *Compiler) mapCode(typ *runtime.Type) (*MapCode, error) {
func (c *Compiler) listElemCode(typ *runtime.Type) (Code, error) {
switch {
- case c.isPtrMarshalJSONType(typ):
+ case c.implementsMarshalJSONType(typ) || c.implementsMarshalJSONType(runtime.PtrTo(typ)):
return c.marshalJSONCode(typ)
case !typ.Implements(marshalTextType) && runtime.PtrTo(typ).Implements(marshalTextType):
return c.marshalTextCode(typ)
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go
index 20c93cbf70..b6f45a49b0 100644
--- a/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go
+++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go
@@ -4,6 +4,7 @@
package encoder
func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) {
+ initEncoder()
if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr {
codeSet, err := compileToGetCodeSetSlowPath(typeptr)
if err != nil {
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go
index 13ba23fdff..47b482f7fb 100644
--- a/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go
+++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go
@@ -10,6 +10,7 @@ import (
var setsMu sync.RWMutex
func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) {
+ initEncoder()
if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr {
codeSet, err := compileToGetCodeSetSlowPath(typeptr)
if err != nil {
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/encoder.go b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go
index 14eb6a0d64..b436f5b21f 100644
--- a/vendor/github.com/goccy/go-json/internal/encoder/encoder.go
+++ b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go
@@ -406,6 +406,11 @@ func AppendMarshalJSON(ctx *RuntimeContext, code *Opcode, b []byte, v interface{
rv = newV
}
}
+
+ if rv.Kind() == reflect.Ptr && rv.IsNil() {
+ return AppendNull(ctx, b), nil
+ }
+
v = rv.Interface()
var bb []byte
if (code.Flags & MarshalerContextFlags) != 0 {
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/int.go b/vendor/github.com/goccy/go-json/internal/encoder/int.go
index 85f0796098..8b5febeaa6 100644
--- a/vendor/github.com/goccy/go-json/internal/encoder/int.go
+++ b/vendor/github.com/goccy/go-json/internal/encoder/int.go
@@ -1,3 +1,27 @@
+// This files's processing codes are inspired by https://github.com/segmentio/encoding.
+// The license notation is as follows.
+//
+// # MIT License
+//
+// Copyright (c) 2019 Segment.io, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
package encoder
import (
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/string.go b/vendor/github.com/goccy/go-json/internal/encoder/string.go
index e4152b27c7..4abb84165e 100644
--- a/vendor/github.com/goccy/go-json/internal/encoder/string.go
+++ b/vendor/github.com/goccy/go-json/internal/encoder/string.go
@@ -1,3 +1,27 @@
+// This files's string processing codes are inspired by https://github.com/segmentio/encoding.
+// The license notation is as follows.
+//
+// # MIT License
+//
+// Copyright (c) 2019 Segment.io, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
package encoder
import (
diff --git a/vendor/github.com/goccy/go-json/internal/runtime/rtype.go b/vendor/github.com/goccy/go-json/internal/runtime/rtype.go
index 4db10debe1..37cfe35a1f 100644
--- a/vendor/github.com/goccy/go-json/internal/runtime/rtype.go
+++ b/vendor/github.com/goccy/go-json/internal/runtime/rtype.go
@@ -252,7 +252,6 @@ func IfaceIndir(*Type) bool
//go:noescape
func RType2Type(t *Type) reflect.Type
-//go:nolint structcheck
type emptyInterface struct {
_ *Type
ptr unsafe.Pointer
diff --git a/vendor/github.com/goccy/go-json/internal/runtime/type.go b/vendor/github.com/goccy/go-json/internal/runtime/type.go
index 0167cd2c01..4b693cb0bb 100644
--- a/vendor/github.com/goccy/go-json/internal/runtime/type.go
+++ b/vendor/github.com/goccy/go-json/internal/runtime/type.go
@@ -2,6 +2,7 @@ package runtime
import (
"reflect"
+ "sync"
"unsafe"
)
@@ -23,8 +24,8 @@ type TypeAddr struct {
}
var (
- typeAddr *TypeAddr
- alreadyAnalyzed bool
+ typeAddr *TypeAddr
+ once sync.Once
)
//go:linkname typelinks reflect.typelinks
@@ -34,67 +35,64 @@ func typelinks() ([]unsafe.Pointer, [][]int32)
func rtypeOff(unsafe.Pointer, int32) unsafe.Pointer
func AnalyzeTypeAddr() *TypeAddr {
- defer func() {
- alreadyAnalyzed = true
- }()
- if alreadyAnalyzed {
- return typeAddr
- }
- sections, offsets := typelinks()
- if len(sections) != 1 {
- return nil
- }
- if len(offsets) != 1 {
- return nil
- }
- section := sections[0]
- offset := offsets[0]
- var (
- min uintptr = uintptr(^uint(0))
- max uintptr = 0
- isAligned64 = true
- isAligned32 = true
- )
- for i := 0; i < len(offset); i++ {
- typ := (*Type)(rtypeOff(section, offset[i]))
- addr := uintptr(unsafe.Pointer(typ))
- if min > addr {
- min = addr
+ once.Do(func() {
+ sections, offsets := typelinks()
+ if len(sections) != 1 {
+ return
}
- if max < addr {
- max = addr
+ if len(offsets) != 1 {
+ return
}
- if typ.Kind() == reflect.Ptr {
- addr = uintptr(unsafe.Pointer(typ.Elem()))
+ section := sections[0]
+ offset := offsets[0]
+ var (
+ min uintptr = uintptr(^uint(0))
+ max uintptr = 0
+ isAligned64 = true
+ isAligned32 = true
+ )
+ for i := 0; i < len(offset); i++ {
+ typ := (*Type)(rtypeOff(section, offset[i]))
+ addr := uintptr(unsafe.Pointer(typ))
if min > addr {
min = addr
}
if max < addr {
max = addr
}
+ if typ.Kind() == reflect.Ptr {
+ addr = uintptr(unsafe.Pointer(typ.Elem()))
+ if min > addr {
+ min = addr
+ }
+ if max < addr {
+ max = addr
+ }
+ }
+ isAligned64 = isAligned64 && (addr-min)&63 == 0
+ isAligned32 = isAligned32 && (addr-min)&31 == 0
+ }
+ addrRange := max - min
+ if addrRange == 0 {
+ return
+ }
+ var addrShift uintptr
+ if isAligned64 {
+ addrShift = 6
+ } else if isAligned32 {
+ addrShift = 5
}
- isAligned64 = isAligned64 && (addr-min)&63 == 0
- isAligned32 = isAligned32 && (addr-min)&31 == 0
- }
- addrRange := max - min
- if addrRange == 0 {
- return nil
- }
- var addrShift uintptr
- if isAligned64 {
- addrShift = 6
- } else if isAligned32 {
- addrShift = 5
- }
- cacheSize := addrRange >> addrShift
- if cacheSize > maxAcceptableTypeAddrRange {
- return nil
- }
- typeAddr = &TypeAddr{
- BaseTypeAddr: min,
- MaxTypeAddr: max,
- AddrRange: addrRange,
- AddrShift: addrShift,
- }
+ cacheSize := addrRange >> addrShift
+ if cacheSize > maxAcceptableTypeAddrRange {
+ return
+ }
+ typeAddr = &TypeAddr{
+ BaseTypeAddr: min,
+ MaxTypeAddr: max,
+ AddrRange: addrRange,
+ AddrShift: addrShift,
+ }
+ })
+
return typeAddr
}
diff --git a/vendor/github.com/goccy/go-json/json.go b/vendor/github.com/goccy/go-json/json.go
index 413cb20bf3..fb18065a23 100644
--- a/vendor/github.com/goccy/go-json/json.go
+++ b/vendor/github.com/goccy/go-json/json.go
@@ -89,31 +89,31 @@ type UnmarshalerContext interface {
//
// Examples of struct field tags and their meanings:
//
-// // Field appears in JSON as key "myName".
-// Field int `json:"myName"`
+// // Field appears in JSON as key "myName".
+// Field int `json:"myName"`
//
-// // Field appears in JSON as key "myName" and
-// // the field is omitted from the object if its value is empty,
-// // as defined above.
-// Field int `json:"myName,omitempty"`
+// // Field appears in JSON as key "myName" and
+// // the field is omitted from the object if its value is empty,
+// // as defined above.
+// Field int `json:"myName,omitempty"`
//
-// // Field appears in JSON as key "Field" (the default), but
-// // the field is skipped if empty.
-// // Note the leading comma.
-// Field int `json:",omitempty"`
+// // Field appears in JSON as key "Field" (the default), but
+// // the field is skipped if empty.
+// // Note the leading comma.
+// Field int `json:",omitempty"`
//
-// // Field is ignored by this package.
-// Field int `json:"-"`
+// // Field is ignored by this package.
+// Field int `json:"-"`
//
-// // Field appears in JSON as key "-".
-// Field int `json:"-,"`
+// // Field appears in JSON as key "-".
+// Field int `json:"-,"`
//
// The "string" option signals that a field is stored as JSON inside a
// JSON-encoded string. It applies only to fields of string, floating point,
// integer, or boolean types. This extra level of encoding is sometimes used
// when communicating with JavaScript programs:
//
-// Int64String int64 `json:",string"`
+// Int64String int64 `json:",string"`
//
// The key name will be used if it's a non-empty string consisting of
// only Unicode letters, digits, and ASCII punctuation except quotation
@@ -166,7 +166,6 @@ type UnmarshalerContext interface {
// JSON cannot represent cyclic data structures and Marshal does not
// handle them. Passing cyclic structures to Marshal will result in
// an infinite recursion.
-//
func Marshal(v interface{}) ([]byte, error) {
return MarshalWithOption(v)
}
@@ -264,14 +263,13 @@ func MarshalIndentWithOption(v interface{}, prefix, indent string, optFuncs ...E
//
// The JSON null value unmarshals into an interface, map, pointer, or slice
// by setting that Go value to nil. Because null is often used in JSON to mean
-// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
+// “not present,” unmarshaling a JSON null into any other Go type has no effect
// on the value and produces no error.
//
// When unmarshaling quoted strings, invalid UTF-8 or
// invalid UTF-16 surrogate pairs are not treated as an error.
// Instead, they are replaced by the Unicode replacement
// character U+FFFD.
-//
func Unmarshal(data []byte, v interface{}) error {
return unmarshal(data, v)
}
@@ -299,7 +297,6 @@ func UnmarshalNoEscape(data []byte, v interface{}, optFuncs ...DecodeOptionFunc)
// Number, for JSON numbers
// string, for JSON string literals
// nil, for JSON null
-//
type Token = json.Token
// A Number represents a JSON number literal.
diff --git a/vendor/github.com/gorilla/mux/.editorconfig b/vendor/github.com/gorilla/mux/.editorconfig
deleted file mode 100644
index c6b74c3e0d..0000000000
--- a/vendor/github.com/gorilla/mux/.editorconfig
+++ /dev/null
@@ -1,20 +0,0 @@
-; https://editorconfig.org/
-
-root = true
-
-[*]
-insert_final_newline = true
-charset = utf-8
-trim_trailing_whitespace = true
-indent_style = space
-indent_size = 2
-
-[{Makefile,go.mod,go.sum,*.go,.gitmodules}]
-indent_style = tab
-indent_size = 4
-
-[*.md]
-indent_size = 4
-trim_trailing_whitespace = false
-
-eclint_indent_style = unset
\ No newline at end of file
diff --git a/vendor/github.com/gorilla/mux/.gitignore b/vendor/github.com/gorilla/mux/.gitignore
deleted file mode 100644
index 84039fec68..0000000000
--- a/vendor/github.com/gorilla/mux/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-coverage.coverprofile
diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE
deleted file mode 100644
index bb9d80bc9b..0000000000
--- a/vendor/github.com/gorilla/mux/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2023 The Gorilla Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/mux/Makefile b/vendor/github.com/gorilla/mux/Makefile
deleted file mode 100644
index 98f5ab75f9..0000000000
--- a/vendor/github.com/gorilla/mux/Makefile
+++ /dev/null
@@ -1,34 +0,0 @@
-GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '')
-GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest
-
-GO_SEC=$(shell which gosec 2> /dev/null || echo '')
-GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest
-
-GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '')
-GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest
-
-.PHONY: golangci-lint
-golangci-lint:
- $(if $(GO_LINT), ,go install $(GO_LINT_URI))
- @echo "##### Running golangci-lint"
- golangci-lint run -v
-
-.PHONY: gosec
-gosec:
- $(if $(GO_SEC), ,go install $(GO_SEC_URI))
- @echo "##### Running gosec"
- gosec ./...
-
-.PHONY: govulncheck
-govulncheck:
- $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI))
- @echo "##### Running govulncheck"
- govulncheck ./...
-
-.PHONY: verify
-verify: golangci-lint gosec govulncheck
-
-.PHONY: test
-test:
- @echo "##### Running tests"
- go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./...
\ No newline at end of file
diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md
deleted file mode 100644
index 382513d57c..0000000000
--- a/vendor/github.com/gorilla/mux/README.md
+++ /dev/null
@@ -1,812 +0,0 @@
-# gorilla/mux
-
-
-[](https://codecov.io/github/gorilla/mux)
-[](https://godoc.org/github.com/gorilla/mux)
-[](https://sourcegraph.com/github.com/gorilla/mux?badge)
-
-
-
-
-Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
-their respective handler.
-
-The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
-
-* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
-* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
-* URL hosts, paths and query values can have variables with an optional regular expression.
-* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
-* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
-
----
-
-* [Install](#install)
-* [Examples](#examples)
-* [Matching Routes](#matching-routes)
-* [Static Files](#static-files)
-* [Serving Single Page Applications](#serving-single-page-applications) (e.g. React, Vue, Ember.js, etc.)
-* [Registered URLs](#registered-urls)
-* [Walking Routes](#walking-routes)
-* [Graceful Shutdown](#graceful-shutdown)
-* [Middleware](#middleware)
-* [Handling CORS Requests](#handling-cors-requests)
-* [Testing Handlers](#testing-handlers)
-* [Full Example](#full-example)
-
----
-
-## Install
-
-With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain:
-
-```sh
-go get -u github.com/gorilla/mux
-```
-
-## Examples
-
-Let's start registering a couple of URL paths and handlers:
-
-```go
-func main() {
- r := mux.NewRouter()
- r.HandleFunc("/", HomeHandler)
- r.HandleFunc("/products", ProductsHandler)
- r.HandleFunc("/articles", ArticlesHandler)
- http.Handle("/", r)
-}
-```
-
-Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters.
-
-Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example:
-
-```go
-r := mux.NewRouter()
-r.HandleFunc("/products/{key}", ProductHandler)
-r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
-r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
-```
-
-The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
-
-```go
-func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- w.WriteHeader(http.StatusOK)
- fmt.Fprintf(w, "Category: %v\n", vars["category"])
-}
-```
-
-And this is all you need to know about the basic usage. More advanced options are explained below.
-
-### Matching Routes
-
-Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables:
-
-```go
-r := mux.NewRouter()
-// Only matches if domain is "www.example.com".
-r.Host("www.example.com")
-// Matches a dynamic subdomain.
-r.Host("{subdomain:[a-z]+}.example.com")
-```
-
-There are several other matchers that can be added. To match path prefixes:
-
-```go
-r.PathPrefix("/products/")
-```
-
-...or HTTP methods:
-
-```go
-r.Methods("GET", "POST")
-```
-
-...or URL schemes:
-
-```go
-r.Schemes("https")
-```
-
-...or header values:
-
-```go
-r.Headers("X-Requested-With", "XMLHttpRequest")
-```
-
-...or query values:
-
-```go
-r.Queries("key", "value")
-```
-
-...or to use a custom matcher function:
-
-```go
-r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
- return r.ProtoMajor == 0
-})
-```
-
-...and finally, it is possible to combine several matchers in a single route:
-
-```go
-r.HandleFunc("/products", ProductsHandler).
- Host("www.example.com").
- Methods("GET").
- Schemes("http")
-```
-
-Routes are tested in the order they were added to the router. If two routes match, the first one wins:
-
-```go
-r := mux.NewRouter()
-r.HandleFunc("/specific", specificHandler)
-r.PathPrefix("/").Handler(catchAllHandler)
-```
-
-Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
-
-For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
-
-```go
-r := mux.NewRouter()
-s := r.Host("www.example.com").Subrouter()
-```
-
-Then register routes in the subrouter:
-
-```go
-s.HandleFunc("/products/", ProductsHandler)
-s.HandleFunc("/products/{key}", ProductHandler)
-s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
-```
-
-The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route.
-
-Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter.
-
-There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths:
-
-```go
-r := mux.NewRouter()
-s := r.PathPrefix("/products").Subrouter()
-// "/products/"
-s.HandleFunc("/", ProductsHandler)
-// "/products/{key}/"
-s.HandleFunc("/{key}/", ProductHandler)
-// "/products/{key}/details"
-s.HandleFunc("/{key}/details", ProductDetailsHandler)
-```
-
-
-### Static Files
-
-Note that the path provided to `PathPrefix()` represents a "wildcard": calling
-`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
-request that matches "/static/\*". This makes it easy to serve static files with mux:
-
-```go
-func main() {
- var dir string
-
- flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
- flag.Parse()
- r := mux.NewRouter()
-
- // This will serve files under http://localhost:8000/static/
- r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
-
- srv := &http.Server{
- Handler: r,
- Addr: "127.0.0.1:8000",
- // Good practice: enforce timeouts for servers you create!
- WriteTimeout: 15 * time.Second,
- ReadTimeout: 15 * time.Second,
- }
-
- log.Fatal(srv.ListenAndServe())
-}
-```
-
-### Serving Single Page Applications
-
-Most of the time it makes sense to serve your SPA on a separate web server from your API,
-but sometimes it's desirable to serve them both from one place. It's possible to write a simple
-handler for serving your SPA (for use with React Router's [BrowserRouter](https://reacttraining.com/react-router/web/api/BrowserRouter) for example), and leverage
-mux's powerful routing for your API endpoints.
-
-```go
-package main
-
-import (
- "encoding/json"
- "log"
- "net/http"
- "os"
- "path/filepath"
- "time"
-
- "github.com/gorilla/mux"
-)
-
-// spaHandler implements the http.Handler interface, so we can use it
-// to respond to HTTP requests. The path to the static directory and
-// path to the index file within that static directory are used to
-// serve the SPA in the given static directory.
-type spaHandler struct {
- staticPath string
- indexPath string
-}
-
-// ServeHTTP inspects the URL path to locate a file within the static dir
-// on the SPA handler. If a file is found, it will be served. If not, the
-// file located at the index path on the SPA handler will be served. This
-// is suitable behavior for serving an SPA (single page application).
-func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- // Join internally call path.Clean to prevent directory traversal
- path := filepath.Join(h.staticPath, r.URL.Path)
-
- // check whether a file exists or is a directory at the given path
- fi, err := os.Stat(path)
- if os.IsNotExist(err) || fi.IsDir() {
- // file does not exist or path is a directory, serve index.html
- http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath))
- return
- }
-
- if err != nil {
- // if we got an error (that wasn't that the file doesn't exist) stating the
- // file, return a 500 internal server error and stop
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
-
- // otherwise, use http.FileServer to serve the static file
- http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r)
-}
-
-func main() {
- router := mux.NewRouter()
-
- router.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) {
- // an example API handler
- json.NewEncoder(w).Encode(map[string]bool{"ok": true})
- })
-
- spa := spaHandler{staticPath: "build", indexPath: "index.html"}
- router.PathPrefix("/").Handler(spa)
-
- srv := &http.Server{
- Handler: router,
- Addr: "127.0.0.1:8000",
- // Good practice: enforce timeouts for servers you create!
- WriteTimeout: 15 * time.Second,
- ReadTimeout: 15 * time.Second,
- }
-
- log.Fatal(srv.ListenAndServe())
-}
-```
-
-### Registered URLs
-
-Now let's see how to build registered URLs.
-
-Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example:
-
-```go
-r := mux.NewRouter()
-r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
- Name("article")
-```
-
-To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do:
-
-```go
-url, err := r.Get("article").URL("category", "technology", "id", "42")
-```
-
-...and the result will be a `url.URL` with the following path:
-
-```
-"/articles/technology/42"
-```
-
-This also works for host and query value variables:
-
-```go
-r := mux.NewRouter()
-r.Host("{subdomain}.example.com").
- Path("/articles/{category}/{id:[0-9]+}").
- Queries("filter", "{filter}").
- HandlerFunc(ArticleHandler).
- Name("article")
-
-// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla"
-url, err := r.Get("article").URL("subdomain", "news",
- "category", "technology",
- "id", "42",
- "filter", "gorilla")
-```
-
-All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
-
-Regex support also exists for matching Headers within a route. For example, we could do:
-
-```go
-r.HeadersRegexp("Content-Type", "application/(text|json)")
-```
-
-...and the route will match both requests with a Content-Type of `application/json` as well as `application/text`
-
-There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
-
-```go
-// "http://news.example.com/"
-host, err := r.Get("article").URLHost("subdomain", "news")
-
-// "/articles/technology/42"
-path, err := r.Get("article").URLPath("category", "technology", "id", "42")
-```
-
-And if you use subrouters, host and path defined separately can be built as well:
-
-```go
-r := mux.NewRouter()
-s := r.Host("{subdomain}.example.com").Subrouter()
-s.Path("/articles/{category}/{id:[0-9]+}").
- HandlerFunc(ArticleHandler).
- Name("article")
-
-// "http://news.example.com/articles/technology/42"
-url, err := r.Get("article").URL("subdomain", "news",
- "category", "technology",
- "id", "42")
-```
-
-To find all the required variables for a given route when calling `URL()`, the method `GetVarNames()` is available:
-```go
-r := mux.NewRouter()
-r.Host("{domain}").
- Path("/{group}/{item_id}").
- Queries("some_data1", "{some_data1}").
- Queries("some_data2", "{some_data2}").
- Name("article")
-
-// Will print [domain group item_id some_data1 some_data2]
-fmt.Println(r.Get("article").GetVarNames())
-
-```
-### Walking Routes
-
-The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example,
-the following prints all of the registered routes:
-
-```go
-package main
-
-import (
- "fmt"
- "net/http"
- "strings"
-
- "github.com/gorilla/mux"
-)
-
-func handler(w http.ResponseWriter, r *http.Request) {
- return
-}
-
-func main() {
- r := mux.NewRouter()
- r.HandleFunc("/", handler)
- r.HandleFunc("/products", handler).Methods("POST")
- r.HandleFunc("/articles", handler).Methods("GET")
- r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
- r.HandleFunc("/authors", handler).Queries("surname", "{surname}")
- err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
- pathTemplate, err := route.GetPathTemplate()
- if err == nil {
- fmt.Println("ROUTE:", pathTemplate)
- }
- pathRegexp, err := route.GetPathRegexp()
- if err == nil {
- fmt.Println("Path regexp:", pathRegexp)
- }
- queriesTemplates, err := route.GetQueriesTemplates()
- if err == nil {
- fmt.Println("Queries templates:", strings.Join(queriesTemplates, ","))
- }
- queriesRegexps, err := route.GetQueriesRegexp()
- if err == nil {
- fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ","))
- }
- methods, err := route.GetMethods()
- if err == nil {
- fmt.Println("Methods:", strings.Join(methods, ","))
- }
- fmt.Println()
- return nil
- })
-
- if err != nil {
- fmt.Println(err)
- }
-
- http.Handle("/", r)
-}
-```
-
-### Graceful Shutdown
-
-Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`:
-
-```go
-package main
-
-import (
- "context"
- "flag"
- "log"
- "net/http"
- "os"
- "os/signal"
- "time"
-
- "github.com/gorilla/mux"
-)
-
-func main() {
- var wait time.Duration
- flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m")
- flag.Parse()
-
- r := mux.NewRouter()
- // Add your routes as needed
-
- srv := &http.Server{
- Addr: "0.0.0.0:8080",
- // Good practice to set timeouts to avoid Slowloris attacks.
- WriteTimeout: time.Second * 15,
- ReadTimeout: time.Second * 15,
- IdleTimeout: time.Second * 60,
- Handler: r, // Pass our instance of gorilla/mux in.
- }
-
- // Run our server in a goroutine so that it doesn't block.
- go func() {
- if err := srv.ListenAndServe(); err != nil {
- log.Println(err)
- }
- }()
-
- c := make(chan os.Signal, 1)
- // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C)
- // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught.
- signal.Notify(c, os.Interrupt)
-
- // Block until we receive our signal.
- <-c
-
- // Create a deadline to wait for.
- ctx, cancel := context.WithTimeout(context.Background(), wait)
- defer cancel()
- // Doesn't block if no connections, but will otherwise wait
- // until the timeout deadline.
- srv.Shutdown(ctx)
- // Optionally, you could run srv.Shutdown in a goroutine and block on
- // <-ctx.Done() if your application should wait for other services
- // to finalize based on context cancellation.
- log.Println("shutting down")
- os.Exit(0)
-}
-```
-
-### Middleware
-
-Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters.
-Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking.
-
-Mux middlewares are defined using the de facto standard type:
-
-```go
-type MiddlewareFunc func(http.Handler) http.Handler
-```
-
-Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers.
-
-A very basic middleware which logs the URI of the request being handled could be written as:
-
-```go
-func loggingMiddleware(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- // Do stuff here
- log.Println(r.RequestURI)
- // Call the next handler, which can be another middleware in the chain, or the final handler.
- next.ServeHTTP(w, r)
- })
-}
-```
-
-Middlewares can be added to a router using `Router.Use()`:
-
-```go
-r := mux.NewRouter()
-r.HandleFunc("/", handler)
-r.Use(loggingMiddleware)
-```
-
-A more complex authentication middleware, which maps session token to users, could be written as:
-
-```go
-// Define our struct
-type authenticationMiddleware struct {
- tokenUsers map[string]string
-}
-
-// Initialize it somewhere
-func (amw *authenticationMiddleware) Populate() {
- amw.tokenUsers["00000000"] = "user0"
- amw.tokenUsers["aaaaaaaa"] = "userA"
- amw.tokenUsers["05f717e5"] = "randomUser"
- amw.tokenUsers["deadbeef"] = "user0"
-}
-
-// Middleware function, which will be called for each request
-func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- token := r.Header.Get("X-Session-Token")
-
- if user, found := amw.tokenUsers[token]; found {
- // We found the token in our map
- log.Printf("Authenticated user %s\n", user)
- // Pass down the request to the next middleware (or final handler)
- next.ServeHTTP(w, r)
- } else {
- // Write an error and stop the handler chain
- http.Error(w, "Forbidden", http.StatusForbidden)
- }
- })
-}
-```
-
-```go
-r := mux.NewRouter()
-r.HandleFunc("/", handler)
-
-amw := authenticationMiddleware{tokenUsers: make(map[string]string)}
-amw.Populate()
-
-r.Use(amw.Middleware)
-```
-
-Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it.
-
-### Handling CORS Requests
-
-[CORSMethodMiddleware](https://godoc.org/github.com/gorilla/mux#CORSMethodMiddleware) intends to make it easier to strictly set the `Access-Control-Allow-Methods` response header.
-
-* You will still need to use your own CORS handler to set the other CORS headers such as `Access-Control-Allow-Origin`
-* The middleware will set the `Access-Control-Allow-Methods` header to all the method matchers (e.g. `r.Methods(http.MethodGet, http.MethodPut, http.MethodOptions)` -> `Access-Control-Allow-Methods: GET,PUT,OPTIONS`) on a route
-* If you do not specify any methods, then:
-> _Important_: there must be an `OPTIONS` method matcher for the middleware to set the headers.
-
-Here is an example of using `CORSMethodMiddleware` along with a custom `OPTIONS` handler to set all the required CORS headers:
-
-```go
-package main
-
-import (
- "net/http"
- "github.com/gorilla/mux"
-)
-
-func main() {
- r := mux.NewRouter()
-
- // IMPORTANT: you must specify an OPTIONS method matcher for the middleware to set CORS headers
- r.HandleFunc("/foo", fooHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodOptions)
- r.Use(mux.CORSMethodMiddleware(r))
-
- http.ListenAndServe(":8080", r)
-}
-
-func fooHandler(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Access-Control-Allow-Origin", "*")
- if r.Method == http.MethodOptions {
- return
- }
-
- w.Write([]byte("foo"))
-}
-```
-
-And an request to `/foo` using something like:
-
-```bash
-curl localhost:8080/foo -v
-```
-
-Would look like:
-
-```bash
-* Trying ::1...
-* TCP_NODELAY set
-* Connected to localhost (::1) port 8080 (#0)
-> GET /foo HTTP/1.1
-> Host: localhost:8080
-> User-Agent: curl/7.59.0
-> Accept: */*
->
-< HTTP/1.1 200 OK
-< Access-Control-Allow-Methods: GET,PUT,PATCH,OPTIONS
-< Access-Control-Allow-Origin: *
-< Date: Fri, 28 Jun 2019 20:13:30 GMT
-< Content-Length: 3
-< Content-Type: text/plain; charset=utf-8
-<
-* Connection #0 to host localhost left intact
-foo
-```
-
-### Testing Handlers
-
-Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_.
-
-First, our simple HTTP handler:
-
-```go
-// endpoints.go
-package main
-
-func HealthCheckHandler(w http.ResponseWriter, r *http.Request) {
- // A very simple health check.
- w.Header().Set("Content-Type", "application/json")
- w.WriteHeader(http.StatusOK)
-
- // In the future we could report back on the status of our DB, or our cache
- // (e.g. Redis) by performing a simple PING, and include them in the response.
- io.WriteString(w, `{"alive": true}`)
-}
-
-func main() {
- r := mux.NewRouter()
- r.HandleFunc("/health", HealthCheckHandler)
-
- log.Fatal(http.ListenAndServe("localhost:8080", r))
-}
-```
-
-Our test code:
-
-```go
-// endpoints_test.go
-package main
-
-import (
- "net/http"
- "net/http/httptest"
- "testing"
-)
-
-func TestHealthCheckHandler(t *testing.T) {
- // Create a request to pass to our handler. We don't have any query parameters for now, so we'll
- // pass 'nil' as the third parameter.
- req, err := http.NewRequest("GET", "/health", nil)
- if err != nil {
- t.Fatal(err)
- }
-
- // We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.
- rr := httptest.NewRecorder()
- handler := http.HandlerFunc(HealthCheckHandler)
-
- // Our handlers satisfy http.Handler, so we can call their ServeHTTP method
- // directly and pass in our Request and ResponseRecorder.
- handler.ServeHTTP(rr, req)
-
- // Check the status code is what we expect.
- if status := rr.Code; status != http.StatusOK {
- t.Errorf("handler returned wrong status code: got %v want %v",
- status, http.StatusOK)
- }
-
- // Check the response body is what we expect.
- expected := `{"alive": true}`
- if rr.Body.String() != expected {
- t.Errorf("handler returned unexpected body: got %v want %v",
- rr.Body.String(), expected)
- }
-}
-```
-
-In the case that our routes have [variables](#examples), we can pass those in the request. We could write
-[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple
-possible route variables as needed.
-
-```go
-// endpoints.go
-func main() {
- r := mux.NewRouter()
- // A route with a route variable:
- r.HandleFunc("/metrics/{type}", MetricsHandler)
-
- log.Fatal(http.ListenAndServe("localhost:8080", r))
-}
-```
-
-Our test file, with a table-driven test of `routeVariables`:
-
-```go
-// endpoints_test.go
-func TestMetricsHandler(t *testing.T) {
- tt := []struct{
- routeVariable string
- shouldPass bool
- }{
- {"goroutines", true},
- {"heap", true},
- {"counters", true},
- {"queries", true},
- {"adhadaeqm3k", false},
- }
-
- for _, tc := range tt {
- path := fmt.Sprintf("/metrics/%s", tc.routeVariable)
- req, err := http.NewRequest("GET", path, nil)
- if err != nil {
- t.Fatal(err)
- }
-
- rr := httptest.NewRecorder()
-
- // To add the vars to the context,
- // we need to create a router through which we can pass the request.
- router := mux.NewRouter()
- router.HandleFunc("/metrics/{type}", MetricsHandler)
- router.ServeHTTP(rr, req)
-
- // In this case, our MetricsHandler returns a non-200 response
- // for a route variable it doesn't know about.
- if rr.Code == http.StatusOK && !tc.shouldPass {
- t.Errorf("handler should have failed on routeVariable %s: got %v want %v",
- tc.routeVariable, rr.Code, http.StatusOK)
- }
- }
-}
-```
-
-## Full Example
-
-Here's a complete, runnable example of a small `mux` based server:
-
-```go
-package main
-
-import (
- "net/http"
- "log"
- "github.com/gorilla/mux"
-)
-
-func YourHandler(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte("Gorilla!\n"))
-}
-
-func main() {
- r := mux.NewRouter()
- // Routes consist of a path and a handler function.
- r.HandleFunc("/", YourHandler)
-
- // Bind to a port and pass our router in
- log.Fatal(http.ListenAndServe(":8000", r))
-}
-```
-
-## License
-
-BSD licensed. See the LICENSE file for details.
diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go
deleted file mode 100644
index 80601351fd..0000000000
--- a/vendor/github.com/gorilla/mux/doc.go
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright 2012 The Gorilla Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package mux implements a request router and dispatcher.
-
-The name mux stands for "HTTP request multiplexer". Like the standard
-http.ServeMux, mux.Router matches incoming requests against a list of
-registered routes and calls a handler for the route that matches the URL
-or other conditions. The main features are:
-
- - Requests can be matched based on URL host, path, path prefix, schemes,
- header and query values, HTTP methods or using custom matchers.
- - URL hosts, paths and query values can have variables with an optional
- regular expression.
- - Registered URLs can be built, or "reversed", which helps maintaining
- references to resources.
- - Routes can be used as subrouters: nested routes are only tested if the
- parent route matches. This is useful to define groups of routes that
- share common conditions like a host, a path prefix or other repeated
- attributes. As a bonus, this optimizes request matching.
- - It implements the http.Handler interface so it is compatible with the
- standard http.ServeMux.
-
-Let's start registering a couple of URL paths and handlers:
-
- func main() {
- r := mux.NewRouter()
- r.HandleFunc("/", HomeHandler)
- r.HandleFunc("/products", ProductsHandler)
- r.HandleFunc("/articles", ArticlesHandler)
- http.Handle("/", r)
- }
-
-Here we register three routes mapping URL paths to handlers. This is
-equivalent to how http.HandleFunc() works: if an incoming request URL matches
-one of the paths, the corresponding handler is called passing
-(http.ResponseWriter, *http.Request) as parameters.
-
-Paths can have variables. They are defined using the format {name} or
-{name:pattern}. If a regular expression pattern is not defined, the matched
-variable will be anything until the next slash. For example:
-
- r := mux.NewRouter()
- r.HandleFunc("/products/{key}", ProductHandler)
- r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
- r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
-
-Groups can be used inside patterns, as long as they are non-capturing (?:re). For example:
-
- r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler)
-
-The names are used to create a map of route variables which can be retrieved
-calling mux.Vars():
-
- vars := mux.Vars(request)
- category := vars["category"]
-
-Note that if any capturing groups are present, mux will panic() during parsing. To prevent
-this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to
-"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably
-when capturing groups were present.
-
-And this is all you need to know about the basic usage. More advanced options
-are explained below.
-
-Routes can also be restricted to a domain or subdomain. Just define a host
-pattern to be matched. They can also have variables:
-
- r := mux.NewRouter()
- // Only matches if domain is "www.example.com".
- r.Host("www.example.com")
- // Matches a dynamic subdomain.
- r.Host("{subdomain:[a-z]+}.domain.com")
-
-There are several other matchers that can be added. To match path prefixes:
-
- r.PathPrefix("/products/")
-
-...or HTTP methods:
-
- r.Methods("GET", "POST")
-
-...or URL schemes:
-
- r.Schemes("https")
-
-...or header values:
-
- r.Headers("X-Requested-With", "XMLHttpRequest")
-
-...or query values:
-
- r.Queries("key", "value")
-
-...or to use a custom matcher function:
-
- r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
- return r.ProtoMajor == 0
- })
-
-...and finally, it is possible to combine several matchers in a single route:
-
- r.HandleFunc("/products", ProductsHandler).
- Host("www.example.com").
- Methods("GET").
- Schemes("http")
-
-Setting the same matching conditions again and again can be boring, so we have
-a way to group several routes that share the same requirements.
-We call it "subrouting".
-
-For example, let's say we have several URLs that should only match when the
-host is "www.example.com". Create a route for that host and get a "subrouter"
-from it:
-
- r := mux.NewRouter()
- s := r.Host("www.example.com").Subrouter()
-
-Then register routes in the subrouter:
-
- s.HandleFunc("/products/", ProductsHandler)
- s.HandleFunc("/products/{key}", ProductHandler)
- s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
-
-The three URL paths we registered above will only be tested if the domain is
-"www.example.com", because the subrouter is tested first. This is not
-only convenient, but also optimizes request matching. You can create
-subrouters combining any attribute matchers accepted by a route.
-
-Subrouters can be used to create domain or path "namespaces": you define
-subrouters in a central place and then parts of the app can register its
-paths relatively to a given subrouter.
-
-There's one more thing about subroutes. When a subrouter has a path prefix,
-the inner routes use it as base for their paths:
-
- r := mux.NewRouter()
- s := r.PathPrefix("/products").Subrouter()
- // "/products/"
- s.HandleFunc("/", ProductsHandler)
- // "/products/{key}/"
- s.HandleFunc("/{key}/", ProductHandler)
- // "/products/{key}/details"
- s.HandleFunc("/{key}/details", ProductDetailsHandler)
-
-Note that the path provided to PathPrefix() represents a "wildcard": calling
-PathPrefix("/static/").Handler(...) means that the handler will be passed any
-request that matches "/static/*". This makes it easy to serve static files with mux:
-
- func main() {
- var dir string
-
- flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
- flag.Parse()
- r := mux.NewRouter()
-
- // This will serve files under http://localhost:8000/static/
- r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
-
- srv := &http.Server{
- Handler: r,
- Addr: "127.0.0.1:8000",
- // Good practice: enforce timeouts for servers you create!
- WriteTimeout: 15 * time.Second,
- ReadTimeout: 15 * time.Second,
- }
-
- log.Fatal(srv.ListenAndServe())
- }
-
-Now let's see how to build registered URLs.
-
-Routes can be named. All routes that define a name can have their URLs built,
-or "reversed". We define a name calling Name() on a route. For example:
-
- r := mux.NewRouter()
- r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
- Name("article")
-
-To build a URL, get the route and call the URL() method, passing a sequence of
-key/value pairs for the route variables. For the previous route, we would do:
-
- url, err := r.Get("article").URL("category", "technology", "id", "42")
-
-...and the result will be a url.URL with the following path:
-
- "/articles/technology/42"
-
-This also works for host and query value variables:
-
- r := mux.NewRouter()
- r.Host("{subdomain}.domain.com").
- Path("/articles/{category}/{id:[0-9]+}").
- Queries("filter", "{filter}").
- HandlerFunc(ArticleHandler).
- Name("article")
-
- // url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla"
- url, err := r.Get("article").URL("subdomain", "news",
- "category", "technology",
- "id", "42",
- "filter", "gorilla")
-
-All variables defined in the route are required, and their values must
-conform to the corresponding patterns. These requirements guarantee that a
-generated URL will always match a registered route -- the only exception is
-for explicitly defined "build-only" routes which never match.
-
-Regex support also exists for matching Headers within a route. For example, we could do:
-
- r.HeadersRegexp("Content-Type", "application/(text|json)")
-
-...and the route will match both requests with a Content-Type of `application/json` as well as
-`application/text`
-
-There's also a way to build only the URL host or path for a route:
-use the methods URLHost() or URLPath() instead. For the previous route,
-we would do:
-
- // "http://news.domain.com/"
- host, err := r.Get("article").URLHost("subdomain", "news")
-
- // "/articles/technology/42"
- path, err := r.Get("article").URLPath("category", "technology", "id", "42")
-
-And if you use subrouters, host and path defined separately can be built
-as well:
-
- r := mux.NewRouter()
- s := r.Host("{subdomain}.domain.com").Subrouter()
- s.Path("/articles/{category}/{id:[0-9]+}").
- HandlerFunc(ArticleHandler).
- Name("article")
-
- // "http://news.domain.com/articles/technology/42"
- url, err := r.Get("article").URL("subdomain", "news",
- "category", "technology",
- "id", "42")
-
-Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking.
-
- type MiddlewareFunc func(http.Handler) http.Handler
-
-Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created).
-
-A very basic middleware which logs the URI of the request being handled could be written as:
-
- func simpleMw(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- // Do stuff here
- log.Println(r.RequestURI)
- // Call the next handler, which can be another middleware in the chain, or the final handler.
- next.ServeHTTP(w, r)
- })
- }
-
-Middlewares can be added to a router using `Router.Use()`:
-
- r := mux.NewRouter()
- r.HandleFunc("/", handler)
- r.Use(simpleMw)
-
-A more complex authentication middleware, which maps session token to users, could be written as:
-
- // Define our struct
- type authenticationMiddleware struct {
- tokenUsers map[string]string
- }
-
- // Initialize it somewhere
- func (amw *authenticationMiddleware) Populate() {
- amw.tokenUsers["00000000"] = "user0"
- amw.tokenUsers["aaaaaaaa"] = "userA"
- amw.tokenUsers["05f717e5"] = "randomUser"
- amw.tokenUsers["deadbeef"] = "user0"
- }
-
- // Middleware function, which will be called for each request
- func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- token := r.Header.Get("X-Session-Token")
-
- if user, found := amw.tokenUsers[token]; found {
- // We found the token in our map
- log.Printf("Authenticated user %s\n", user)
- next.ServeHTTP(w, r)
- } else {
- http.Error(w, "Forbidden", http.StatusForbidden)
- }
- })
- }
-
- r := mux.NewRouter()
- r.HandleFunc("/", handler)
-
- amw := authenticationMiddleware{tokenUsers: make(map[string]string)}
- amw.Populate()
-
- r.Use(amw.Middleware)
-
-Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to.
-*/
-package mux
diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go
deleted file mode 100644
index cb51c565eb..0000000000
--- a/vendor/github.com/gorilla/mux/middleware.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package mux
-
-import (
- "net/http"
- "strings"
-)
-
-// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler.
-// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed
-// to it, and then calls the handler passed as parameter to the MiddlewareFunc.
-type MiddlewareFunc func(http.Handler) http.Handler
-
-// middleware interface is anything which implements a MiddlewareFunc named Middleware.
-type middleware interface {
- Middleware(handler http.Handler) http.Handler
-}
-
-// Middleware allows MiddlewareFunc to implement the middleware interface.
-func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler {
- return mw(handler)
-}
-
-// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
-func (r *Router) Use(mwf ...MiddlewareFunc) {
- for _, fn := range mwf {
- r.middlewares = append(r.middlewares, fn)
- }
-}
-
-// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
-func (r *Router) useInterface(mw middleware) {
- r.middlewares = append(r.middlewares, mw)
-}
-
-// CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header
-// on requests for routes that have an OPTIONS method matcher to all the method matchers on
-// the route. Routes that do not explicitly handle OPTIONS requests will not be processed
-// by the middleware. See examples for usage.
-func CORSMethodMiddleware(r *Router) MiddlewareFunc {
- return func(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- allMethods, err := getAllMethodsForRoute(r, req)
- if err == nil {
- for _, v := range allMethods {
- if v == http.MethodOptions {
- w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ","))
- }
- }
- }
-
- next.ServeHTTP(w, req)
- })
- }
-}
-
-// getAllMethodsForRoute returns all the methods from method matchers matching a given
-// request.
-func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) {
- var allMethods []string
-
- for _, route := range r.routes {
- var match RouteMatch
- if route.Match(req, &match) || match.MatchErr == ErrMethodMismatch {
- methods, err := route.GetMethods()
- if err != nil {
- return nil, err
- }
-
- allMethods = append(allMethods, methods...)
- }
- }
-
- return allMethods, nil
-}
diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go
deleted file mode 100644
index 1e089906fa..0000000000
--- a/vendor/github.com/gorilla/mux/mux.go
+++ /dev/null
@@ -1,608 +0,0 @@
-// Copyright 2012 The Gorilla Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mux
-
-import (
- "context"
- "errors"
- "fmt"
- "net/http"
- "path"
- "regexp"
-)
-
-var (
- // ErrMethodMismatch is returned when the method in the request does not match
- // the method defined against the route.
- ErrMethodMismatch = errors.New("method is not allowed")
- // ErrNotFound is returned when no route match is found.
- ErrNotFound = errors.New("no matching route was found")
-)
-
-// NewRouter returns a new router instance.
-func NewRouter() *Router {
- return &Router{namedRoutes: make(map[string]*Route)}
-}
-
-// Router registers routes to be matched and dispatches a handler.
-//
-// It implements the http.Handler interface, so it can be registered to serve
-// requests:
-//
-// var router = mux.NewRouter()
-//
-// func main() {
-// http.Handle("/", router)
-// }
-//
-// Or, for Google App Engine, register it in a init() function:
-//
-// func init() {
-// http.Handle("/", router)
-// }
-//
-// This will send all incoming requests to the router.
-type Router struct {
- // Configurable Handler to be used when no route matches.
- // This can be used to render your own 404 Not Found errors.
- NotFoundHandler http.Handler
-
- // Configurable Handler to be used when the request method does not match the route.
- // This can be used to render your own 405 Method Not Allowed errors.
- MethodNotAllowedHandler http.Handler
-
- // Routes to be matched, in order.
- routes []*Route
-
- // Routes by name for URL building.
- namedRoutes map[string]*Route
-
- // If true, do not clear the request context after handling the request.
- //
- // Deprecated: No effect, since the context is stored on the request itself.
- KeepContext bool
-
- // Slice of middlewares to be called after a match is found
- middlewares []middleware
-
- // configuration shared with `Route`
- routeConf
-}
-
-// common route configuration shared between `Router` and `Route`
-type routeConf struct {
- // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
- useEncodedPath bool
-
- // If true, when the path pattern is "/path/", accessing "/path" will
- // redirect to the former and vice versa.
- strictSlash bool
-
- // If true, when the path pattern is "/path//to", accessing "/path//to"
- // will not redirect
- skipClean bool
-
- // Manager for the variables from host and path.
- regexp routeRegexpGroup
-
- // List of matchers.
- matchers []matcher
-
- // The scheme used when building URLs.
- buildScheme string
-
- buildVarsFunc BuildVarsFunc
-}
-
-// returns an effective deep copy of `routeConf`
-func copyRouteConf(r routeConf) routeConf {
- c := r
-
- if r.regexp.path != nil {
- c.regexp.path = copyRouteRegexp(r.regexp.path)
- }
-
- if r.regexp.host != nil {
- c.regexp.host = copyRouteRegexp(r.regexp.host)
- }
-
- c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries))
- for _, q := range r.regexp.queries {
- c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q))
- }
-
- c.matchers = make([]matcher, len(r.matchers))
- copy(c.matchers, r.matchers)
-
- return c
-}
-
-func copyRouteRegexp(r *routeRegexp) *routeRegexp {
- c := *r
- return &c
-}
-
-// Match attempts to match the given request against the router's registered routes.
-//
-// If the request matches a route of this router or one of its subrouters the Route,
-// Handler, and Vars fields of the the match argument are filled and this function
-// returns true.
-//
-// If the request does not match any of this router's or its subrouters' routes
-// then this function returns false. If available, a reason for the match failure
-// will be filled in the match argument's MatchErr field. If the match failure type
-// (eg: not found) has a registered handler, the handler is assigned to the Handler
-// field of the match argument.
-func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
- for _, route := range r.routes {
- if route.Match(req, match) {
- // Build middleware chain if no error was found
- if match.MatchErr == nil {
- for i := len(r.middlewares) - 1; i >= 0; i-- {
- match.Handler = r.middlewares[i].Middleware(match.Handler)
- }
- }
- return true
- }
- }
-
- if match.MatchErr == ErrMethodMismatch {
- if r.MethodNotAllowedHandler != nil {
- match.Handler = r.MethodNotAllowedHandler
- return true
- }
-
- return false
- }
-
- // Closest match for a router (includes sub-routers)
- if r.NotFoundHandler != nil {
- match.Handler = r.NotFoundHandler
- match.MatchErr = ErrNotFound
- return true
- }
-
- match.MatchErr = ErrNotFound
- return false
-}
-
-// ServeHTTP dispatches the handler registered in the matched route.
-//
-// When there is a match, the route variables can be retrieved calling
-// mux.Vars(request).
-func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- if !r.skipClean {
- path := req.URL.Path
- if r.useEncodedPath {
- path = req.URL.EscapedPath()
- }
- // Clean path to canonical form and redirect.
- if p := cleanPath(path); p != path {
-
- // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query.
- // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue:
- // http://code.google.com/p/go/issues/detail?id=5252
- url := *req.URL
- url.Path = p
- p = url.String()
-
- w.Header().Set("Location", p)
- w.WriteHeader(http.StatusMovedPermanently)
- return
- }
- }
- var match RouteMatch
- var handler http.Handler
- if r.Match(req, &match) {
- handler = match.Handler
- req = requestWithVars(req, match.Vars)
- req = requestWithRoute(req, match.Route)
- }
-
- if handler == nil && match.MatchErr == ErrMethodMismatch {
- handler = methodNotAllowedHandler()
- }
-
- if handler == nil {
- handler = http.NotFoundHandler()
- }
-
- handler.ServeHTTP(w, req)
-}
-
-// Get returns a route registered with the given name.
-func (r *Router) Get(name string) *Route {
- return r.namedRoutes[name]
-}
-
-// GetRoute returns a route registered with the given name. This method
-// was renamed to Get() and remains here for backwards compatibility.
-func (r *Router) GetRoute(name string) *Route {
- return r.namedRoutes[name]
-}
-
-// StrictSlash defines the trailing slash behavior for new routes. The initial
-// value is false.
-//
-// When true, if the route path is "/path/", accessing "/path" will perform a redirect
-// to the former and vice versa. In other words, your application will always
-// see the path as specified in the route.
-//
-// When false, if the route path is "/path", accessing "/path/" will not match
-// this route and vice versa.
-//
-// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for
-// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed
-// request will be made as a GET by most clients. Use middleware or client settings
-// to modify this behaviour as needed.
-//
-// Special case: when a route sets a path prefix using the PathPrefix() method,
-// strict slash is ignored for that route because the redirect behavior can't
-// be determined from a prefix alone. However, any subrouters created from that
-// route inherit the original StrictSlash setting.
-func (r *Router) StrictSlash(value bool) *Router {
- r.strictSlash = value
- return r
-}
-
-// SkipClean defines the path cleaning behaviour for new routes. The initial
-// value is false. Users should be careful about which routes are not cleaned
-//
-// When true, if the route path is "/path//to", it will remain with the double
-// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/
-//
-// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will
-// become /fetch/http/xkcd.com/534
-func (r *Router) SkipClean(value bool) *Router {
- r.skipClean = value
- return r
-}
-
-// UseEncodedPath tells the router to match the encoded original path
-// to the routes.
-// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
-//
-// If not called, the router will match the unencoded path to the routes.
-// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
-func (r *Router) UseEncodedPath() *Router {
- r.useEncodedPath = true
- return r
-}
-
-// ----------------------------------------------------------------------------
-// Route factories
-// ----------------------------------------------------------------------------
-
-// NewRoute registers an empty route.
-func (r *Router) NewRoute() *Route {
- // initialize a route with a copy of the parent router's configuration
- route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
- r.routes = append(r.routes, route)
- return route
-}
-
-// Name registers a new route with a name.
-// See Route.Name().
-func (r *Router) Name(name string) *Route {
- return r.NewRoute().Name(name)
-}
-
-// Handle registers a new route with a matcher for the URL path.
-// See Route.Path() and Route.Handler().
-func (r *Router) Handle(path string, handler http.Handler) *Route {
- return r.NewRoute().Path(path).Handler(handler)
-}
-
-// HandleFunc registers a new route with a matcher for the URL path.
-// See Route.Path() and Route.HandlerFunc().
-func (r *Router) HandleFunc(path string, f func(http.ResponseWriter,
- *http.Request)) *Route {
- return r.NewRoute().Path(path).HandlerFunc(f)
-}
-
-// Headers registers a new route with a matcher for request header values.
-// See Route.Headers().
-func (r *Router) Headers(pairs ...string) *Route {
- return r.NewRoute().Headers(pairs...)
-}
-
-// Host registers a new route with a matcher for the URL host.
-// See Route.Host().
-func (r *Router) Host(tpl string) *Route {
- return r.NewRoute().Host(tpl)
-}
-
-// MatcherFunc registers a new route with a custom matcher function.
-// See Route.MatcherFunc().
-func (r *Router) MatcherFunc(f MatcherFunc) *Route {
- return r.NewRoute().MatcherFunc(f)
-}
-
-// Methods registers a new route with a matcher for HTTP methods.
-// See Route.Methods().
-func (r *Router) Methods(methods ...string) *Route {
- return r.NewRoute().Methods(methods...)
-}
-
-// Path registers a new route with a matcher for the URL path.
-// See Route.Path().
-func (r *Router) Path(tpl string) *Route {
- return r.NewRoute().Path(tpl)
-}
-
-// PathPrefix registers a new route with a matcher for the URL path prefix.
-// See Route.PathPrefix().
-func (r *Router) PathPrefix(tpl string) *Route {
- return r.NewRoute().PathPrefix(tpl)
-}
-
-// Queries registers a new route with a matcher for URL query values.
-// See Route.Queries().
-func (r *Router) Queries(pairs ...string) *Route {
- return r.NewRoute().Queries(pairs...)
-}
-
-// Schemes registers a new route with a matcher for URL schemes.
-// See Route.Schemes().
-func (r *Router) Schemes(schemes ...string) *Route {
- return r.NewRoute().Schemes(schemes...)
-}
-
-// BuildVarsFunc registers a new route with a custom function for modifying
-// route variables before building a URL.
-func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {
- return r.NewRoute().BuildVarsFunc(f)
-}
-
-// Walk walks the router and all its sub-routers, calling walkFn for each route
-// in the tree. The routes are walked in the order they were added. Sub-routers
-// are explored depth-first.
-func (r *Router) Walk(walkFn WalkFunc) error {
- return r.walk(walkFn, []*Route{})
-}
-
-// SkipRouter is used as a return value from WalkFuncs to indicate that the
-// router that walk is about to descend down to should be skipped.
-var SkipRouter = errors.New("skip this router")
-
-// WalkFunc is the type of the function called for each route visited by Walk.
-// At every invocation, it is given the current route, and the current router,
-// and a list of ancestor routes that lead to the current route.
-type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
-
-func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
- for _, t := range r.routes {
- err := walkFn(t, r, ancestors)
- if err == SkipRouter {
- continue
- }
- if err != nil {
- return err
- }
- for _, sr := range t.matchers {
- if h, ok := sr.(*Router); ok {
- ancestors = append(ancestors, t)
- err := h.walk(walkFn, ancestors)
- if err != nil {
- return err
- }
- ancestors = ancestors[:len(ancestors)-1]
- }
- }
- if h, ok := t.handler.(*Router); ok {
- ancestors = append(ancestors, t)
- err := h.walk(walkFn, ancestors)
- if err != nil {
- return err
- }
- ancestors = ancestors[:len(ancestors)-1]
- }
- }
- return nil
-}
-
-// ----------------------------------------------------------------------------
-// Context
-// ----------------------------------------------------------------------------
-
-// RouteMatch stores information about a matched route.
-type RouteMatch struct {
- Route *Route
- Handler http.Handler
- Vars map[string]string
-
- // MatchErr is set to appropriate matching error
- // It is set to ErrMethodMismatch if there is a mismatch in
- // the request method and route method
- MatchErr error
-}
-
-type contextKey int
-
-const (
- varsKey contextKey = iota
- routeKey
-)
-
-// Vars returns the route variables for the current request, if any.
-func Vars(r *http.Request) map[string]string {
- if rv := r.Context().Value(varsKey); rv != nil {
- return rv.(map[string]string)
- }
- return nil
-}
-
-// CurrentRoute returns the matched route for the current request, if any.
-// This only works when called inside the handler of the matched route
-// because the matched route is stored in the request context which is cleared
-// after the handler returns.
-func CurrentRoute(r *http.Request) *Route {
- if rv := r.Context().Value(routeKey); rv != nil {
- return rv.(*Route)
- }
- return nil
-}
-
-func requestWithVars(r *http.Request, vars map[string]string) *http.Request {
- ctx := context.WithValue(r.Context(), varsKey, vars)
- return r.WithContext(ctx)
-}
-
-func requestWithRoute(r *http.Request, route *Route) *http.Request {
- ctx := context.WithValue(r.Context(), routeKey, route)
- return r.WithContext(ctx)
-}
-
-// ----------------------------------------------------------------------------
-// Helpers
-// ----------------------------------------------------------------------------
-
-// cleanPath returns the canonical path for p, eliminating . and .. elements.
-// Borrowed from the net/http package.
-func cleanPath(p string) string {
- if p == "" {
- return "/"
- }
- if p[0] != '/' {
- p = "/" + p
- }
- np := path.Clean(p)
- // path.Clean removes trailing slash except for root;
- // put the trailing slash back if necessary.
- if p[len(p)-1] == '/' && np != "/" {
- np += "/"
- }
-
- return np
-}
-
-// uniqueVars returns an error if two slices contain duplicated strings.
-func uniqueVars(s1, s2 []string) error {
- for _, v1 := range s1 {
- for _, v2 := range s2 {
- if v1 == v2 {
- return fmt.Errorf("mux: duplicated route variable %q", v2)
- }
- }
- }
- return nil
-}
-
-// checkPairs returns the count of strings passed in, and an error if
-// the count is not an even number.
-func checkPairs(pairs ...string) (int, error) {
- length := len(pairs)
- if length%2 != 0 {
- return length, fmt.Errorf(
- "mux: number of parameters must be multiple of 2, got %v", pairs)
- }
- return length, nil
-}
-
-// mapFromPairsToString converts variadic string parameters to a
-// string to string map.
-func mapFromPairsToString(pairs ...string) (map[string]string, error) {
- length, err := checkPairs(pairs...)
- if err != nil {
- return nil, err
- }
- m := make(map[string]string, length/2)
- for i := 0; i < length; i += 2 {
- m[pairs[i]] = pairs[i+1]
- }
- return m, nil
-}
-
-// mapFromPairsToRegex converts variadic string parameters to a
-// string to regex map.
-func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
- length, err := checkPairs(pairs...)
- if err != nil {
- return nil, err
- }
- m := make(map[string]*regexp.Regexp, length/2)
- for i := 0; i < length; i += 2 {
- regex, err := regexp.Compile(pairs[i+1])
- if err != nil {
- return nil, err
- }
- m[pairs[i]] = regex
- }
- return m, nil
-}
-
-// matchInArray returns true if the given string value is in the array.
-func matchInArray(arr []string, value string) bool {
- for _, v := range arr {
- if v == value {
- return true
- }
- }
- return false
-}
-
-// matchMapWithString returns true if the given key/value pairs exist in a given map.
-func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool {
- for k, v := range toCheck {
- // Check if key exists.
- if canonicalKey {
- k = http.CanonicalHeaderKey(k)
- }
- if values := toMatch[k]; values == nil {
- return false
- } else if v != "" {
- // If value was defined as an empty string we only check that the
- // key exists. Otherwise we also check for equality.
- valueExists := false
- for _, value := range values {
- if v == value {
- valueExists = true
- break
- }
- }
- if !valueExists {
- return false
- }
- }
- }
- return true
-}
-
-// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against
-// the given regex
-func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool {
- for k, v := range toCheck {
- // Check if key exists.
- if canonicalKey {
- k = http.CanonicalHeaderKey(k)
- }
- if values := toMatch[k]; values == nil {
- return false
- } else if v != nil {
- // If value was defined as an empty string we only check that the
- // key exists. Otherwise we also check for equality.
- valueExists := false
- for _, value := range values {
- if v.MatchString(value) {
- valueExists = true
- break
- }
- }
- if !valueExists {
- return false
- }
- }
- }
- return true
-}
-
-// methodNotAllowed replies to the request with an HTTP status code 405.
-func methodNotAllowed(w http.ResponseWriter, r *http.Request) {
- w.WriteHeader(http.StatusMethodNotAllowed)
-}
-
-// methodNotAllowedHandler returns a simple request handler
-// that replies to each request with a status code 405.
-func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) }
diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go
deleted file mode 100644
index 5d05cfa0e9..0000000000
--- a/vendor/github.com/gorilla/mux/regexp.go
+++ /dev/null
@@ -1,388 +0,0 @@
-// Copyright 2012 The Gorilla Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mux
-
-import (
- "bytes"
- "fmt"
- "net/http"
- "net/url"
- "regexp"
- "strconv"
- "strings"
-)
-
-type routeRegexpOptions struct {
- strictSlash bool
- useEncodedPath bool
-}
-
-type regexpType int
-
-const (
- regexpTypePath regexpType = iota
- regexpTypeHost
- regexpTypePrefix
- regexpTypeQuery
-)
-
-// newRouteRegexp parses a route template and returns a routeRegexp,
-// used to match a host, a path or a query string.
-//
-// It will extract named variables, assemble a regexp to be matched, create
-// a "reverse" template to build URLs and compile regexps to validate variable
-// values used in URL building.
-//
-// Previously we accepted only Python-like identifiers for variable
-// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
-// name and pattern can't be empty, and names can't contain a colon.
-func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) {
- // Check if it is well-formed.
- idxs, errBraces := braceIndices(tpl)
- if errBraces != nil {
- return nil, errBraces
- }
- // Backup the original.
- template := tpl
- // Now let's parse it.
- defaultPattern := "[^/]+"
- if typ == regexpTypeQuery {
- defaultPattern = ".*"
- } else if typ == regexpTypeHost {
- defaultPattern = "[^.]+"
- }
- // Only match strict slash if not matching
- if typ != regexpTypePath {
- options.strictSlash = false
- }
- // Set a flag for strictSlash.
- endSlash := false
- if options.strictSlash && strings.HasSuffix(tpl, "/") {
- tpl = tpl[:len(tpl)-1]
- endSlash = true
- }
- varsN := make([]string, len(idxs)/2)
- varsR := make([]*regexp.Regexp, len(idxs)/2)
- pattern := bytes.NewBufferString("")
- pattern.WriteByte('^')
- reverse := bytes.NewBufferString("")
- var end int
- var err error
- for i := 0; i < len(idxs); i += 2 {
- // Set all values we are interested in.
- raw := tpl[end:idxs[i]]
- end = idxs[i+1]
- parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2)
- name := parts[0]
- patt := defaultPattern
- if len(parts) == 2 {
- patt = parts[1]
- }
- // Name or pattern can't be empty.
- if name == "" || patt == "" {
- return nil, fmt.Errorf("mux: missing name or pattern in %q",
- tpl[idxs[i]:end])
- }
- // Build the regexp pattern.
- fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt)
-
- // Build the reverse template.
- fmt.Fprintf(reverse, "%s%%s", raw)
-
- // Append variable name and compiled pattern.
- varsN[i/2] = name
- varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
- if err != nil {
- return nil, err
- }
- }
- // Add the remaining.
- raw := tpl[end:]
- pattern.WriteString(regexp.QuoteMeta(raw))
- if options.strictSlash {
- pattern.WriteString("[/]?")
- }
- if typ == regexpTypeQuery {
- // Add the default pattern if the query value is empty
- if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
- pattern.WriteString(defaultPattern)
- }
- }
- if typ != regexpTypePrefix {
- pattern.WriteByte('$')
- }
-
- var wildcardHostPort bool
- if typ == regexpTypeHost {
- if !strings.Contains(pattern.String(), ":") {
- wildcardHostPort = true
- }
- }
- reverse.WriteString(raw)
- if endSlash {
- reverse.WriteByte('/')
- }
- // Compile full regexp.
- reg, errCompile := regexp.Compile(pattern.String())
- if errCompile != nil {
- return nil, errCompile
- }
-
- // Check for capturing groups which used to work in older versions
- if reg.NumSubexp() != len(idxs)/2 {
- panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) +
- "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)")
- }
-
- // Done!
- return &routeRegexp{
- template: template,
- regexpType: typ,
- options: options,
- regexp: reg,
- reverse: reverse.String(),
- varsN: varsN,
- varsR: varsR,
- wildcardHostPort: wildcardHostPort,
- }, nil
-}
-
-// routeRegexp stores a regexp to match a host or path and information to
-// collect and validate route variables.
-type routeRegexp struct {
- // The unmodified template.
- template string
- // The type of match
- regexpType regexpType
- // Options for matching
- options routeRegexpOptions
- // Expanded regexp.
- regexp *regexp.Regexp
- // Reverse template.
- reverse string
- // Variable names.
- varsN []string
- // Variable regexps (validators).
- varsR []*regexp.Regexp
- // Wildcard host-port (no strict port match in hostname)
- wildcardHostPort bool
-}
-
-// Match matches the regexp against the URL host or path.
-func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
- if r.regexpType == regexpTypeHost {
- host := getHost(req)
- if r.wildcardHostPort {
- // Don't be strict on the port match
- if i := strings.Index(host, ":"); i != -1 {
- host = host[:i]
- }
- }
- return r.regexp.MatchString(host)
- }
-
- if r.regexpType == regexpTypeQuery {
- return r.matchQueryString(req)
- }
- path := req.URL.Path
- if r.options.useEncodedPath {
- path = req.URL.EscapedPath()
- }
- return r.regexp.MatchString(path)
-}
-
-// url builds a URL part using the given values.
-func (r *routeRegexp) url(values map[string]string) (string, error) {
- urlValues := make([]interface{}, len(r.varsN))
- for k, v := range r.varsN {
- value, ok := values[v]
- if !ok {
- return "", fmt.Errorf("mux: missing route variable %q", v)
- }
- if r.regexpType == regexpTypeQuery {
- value = url.QueryEscape(value)
- }
- urlValues[k] = value
- }
- rv := fmt.Sprintf(r.reverse, urlValues...)
- if !r.regexp.MatchString(rv) {
- // The URL is checked against the full regexp, instead of checking
- // individual variables. This is faster but to provide a good error
- // message, we check individual regexps if the URL doesn't match.
- for k, v := range r.varsN {
- if !r.varsR[k].MatchString(values[v]) {
- return "", fmt.Errorf(
- "mux: variable %q doesn't match, expected %q", values[v],
- r.varsR[k].String())
- }
- }
- }
- return rv, nil
-}
-
-// getURLQuery returns a single query parameter from a request URL.
-// For a URL with foo=bar&baz=ding, we return only the relevant key
-// value pair for the routeRegexp.
-func (r *routeRegexp) getURLQuery(req *http.Request) string {
- if r.regexpType != regexpTypeQuery {
- return ""
- }
- templateKey := strings.SplitN(r.template, "=", 2)[0]
- val, ok := findFirstQueryKey(req.URL.RawQuery, templateKey)
- if ok {
- return templateKey + "=" + val
- }
- return ""
-}
-
-// findFirstQueryKey returns the same result as (*url.URL).Query()[key][0].
-// If key was not found, empty string and false is returned.
-func findFirstQueryKey(rawQuery, key string) (value string, ok bool) {
- query := []byte(rawQuery)
- for len(query) > 0 {
- foundKey := query
- if i := bytes.IndexAny(foundKey, "&;"); i >= 0 {
- foundKey, query = foundKey[:i], foundKey[i+1:]
- } else {
- query = query[:0]
- }
- if len(foundKey) == 0 {
- continue
- }
- var value []byte
- if i := bytes.IndexByte(foundKey, '='); i >= 0 {
- foundKey, value = foundKey[:i], foundKey[i+1:]
- }
- if len(foundKey) < len(key) {
- // Cannot possibly be key.
- continue
- }
- keyString, err := url.QueryUnescape(string(foundKey))
- if err != nil {
- continue
- }
- if keyString != key {
- continue
- }
- valueString, err := url.QueryUnescape(string(value))
- if err != nil {
- continue
- }
- return valueString, true
- }
- return "", false
-}
-
-func (r *routeRegexp) matchQueryString(req *http.Request) bool {
- return r.regexp.MatchString(r.getURLQuery(req))
-}
-
-// braceIndices returns the first level curly brace indices from a string.
-// It returns an error in case of unbalanced braces.
-func braceIndices(s string) ([]int, error) {
- var level, idx int
- var idxs []int
- for i := 0; i < len(s); i++ {
- switch s[i] {
- case '{':
- if level++; level == 1 {
- idx = i
- }
- case '}':
- if level--; level == 0 {
- idxs = append(idxs, idx, i+1)
- } else if level < 0 {
- return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
- }
- }
- }
- if level != 0 {
- return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
- }
- return idxs, nil
-}
-
-// varGroupName builds a capturing group name for the indexed variable.
-func varGroupName(idx int) string {
- return "v" + strconv.Itoa(idx)
-}
-
-// ----------------------------------------------------------------------------
-// routeRegexpGroup
-// ----------------------------------------------------------------------------
-
-// routeRegexpGroup groups the route matchers that carry variables.
-type routeRegexpGroup struct {
- host *routeRegexp
- path *routeRegexp
- queries []*routeRegexp
-}
-
-// setMatch extracts the variables from the URL once a route matches.
-func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
- // Store host variables.
- if v.host != nil {
- host := getHost(req)
- if v.host.wildcardHostPort {
- // Don't be strict on the port match
- if i := strings.Index(host, ":"); i != -1 {
- host = host[:i]
- }
- }
- matches := v.host.regexp.FindStringSubmatchIndex(host)
- if len(matches) > 0 {
- extractVars(host, matches, v.host.varsN, m.Vars)
- }
- }
- path := req.URL.Path
- if r.useEncodedPath {
- path = req.URL.EscapedPath()
- }
- // Store path variables.
- if v.path != nil {
- matches := v.path.regexp.FindStringSubmatchIndex(path)
- if len(matches) > 0 {
- extractVars(path, matches, v.path.varsN, m.Vars)
- // Check if we should redirect.
- if v.path.options.strictSlash {
- p1 := strings.HasSuffix(path, "/")
- p2 := strings.HasSuffix(v.path.template, "/")
- if p1 != p2 {
- u, _ := url.Parse(req.URL.String())
- if p1 {
- u.Path = u.Path[:len(u.Path)-1]
- } else {
- u.Path += "/"
- }
- m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently)
- }
- }
- }
- }
- // Store query string variables.
- for _, q := range v.queries {
- queryURL := q.getURLQuery(req)
- matches := q.regexp.FindStringSubmatchIndex(queryURL)
- if len(matches) > 0 {
- extractVars(queryURL, matches, q.varsN, m.Vars)
- }
- }
-}
-
-// getHost tries its best to return the request host.
-// According to section 14.23 of RFC 2616 the Host header
-// can include the port number if the default value of 80 is not used.
-func getHost(r *http.Request) string {
- if r.URL.IsAbs() {
- return r.URL.Host
- }
- return r.Host
-}
-
-func extractVars(input string, matches []int, names []string, output map[string]string) {
- for i, name := range names {
- output[name] = input[matches[2*i+2]:matches[2*i+3]]
- }
-}
diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go
deleted file mode 100644
index e8f11df221..0000000000
--- a/vendor/github.com/gorilla/mux/route.go
+++ /dev/null
@@ -1,765 +0,0 @@
-// Copyright 2012 The Gorilla Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mux
-
-import (
- "errors"
- "fmt"
- "net/http"
- "net/url"
- "regexp"
- "strings"
-)
-
-// Route stores information to match a request and build URLs.
-type Route struct {
- // Request handler for the route.
- handler http.Handler
- // If true, this route never matches: it is only used to build URLs.
- buildOnly bool
- // The name used to build URLs.
- name string
- // Error resulted from building a route.
- err error
-
- // "global" reference to all named routes
- namedRoutes map[string]*Route
-
- // config possibly passed in from `Router`
- routeConf
-}
-
-// SkipClean reports whether path cleaning is enabled for this route via
-// Router.SkipClean.
-func (r *Route) SkipClean() bool {
- return r.skipClean
-}
-
-// Match matches the route against the request.
-func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
- if r.buildOnly || r.err != nil {
- return false
- }
-
- var matchErr error
-
- // Match everything.
- for _, m := range r.matchers {
- if matched := m.Match(req, match); !matched {
- if _, ok := m.(methodMatcher); ok {
- matchErr = ErrMethodMismatch
- continue
- }
-
- // Ignore ErrNotFound errors. These errors arise from match call
- // to Subrouters.
- //
- // This prevents subsequent matching subrouters from failing to
- // run middleware. If not ignored, the middleware would see a
- // non-nil MatchErr and be skipped, even when there was a
- // matching route.
- if match.MatchErr == ErrNotFound {
- match.MatchErr = nil
- }
-
- matchErr = nil // nolint:ineffassign
- return false
- } else {
- // Multiple routes may share the same path but use different HTTP methods. For instance:
- // Route 1: POST "/users/{id}".
- // Route 2: GET "/users/{id}", parameters: "id": "[0-9]+".
- //
- // The router must handle these cases correctly. For a GET request to "/users/abc" with "id" as "-2",
- // The router should return a "Not Found" error as no route fully matches this request.
- if match.MatchErr == ErrMethodMismatch {
- match.MatchErr = nil
- }
- }
- }
-
- if matchErr != nil {
- match.MatchErr = matchErr
- return false
- }
-
- if match.MatchErr == ErrMethodMismatch && r.handler != nil {
- // We found a route which matches request method, clear MatchErr
- match.MatchErr = nil
- // Then override the mis-matched handler
- match.Handler = r.handler
- }
-
- // Yay, we have a match. Let's collect some info about it.
- if match.Route == nil {
- match.Route = r
- }
- if match.Handler == nil {
- match.Handler = r.handler
- }
- if match.Vars == nil {
- match.Vars = make(map[string]string)
- }
-
- // Set variables.
- r.regexp.setMatch(req, match, r)
- return true
-}
-
-// ----------------------------------------------------------------------------
-// Route attributes
-// ----------------------------------------------------------------------------
-
-// GetError returns an error resulted from building the route, if any.
-func (r *Route) GetError() error {
- return r.err
-}
-
-// BuildOnly sets the route to never match: it is only used to build URLs.
-func (r *Route) BuildOnly() *Route {
- r.buildOnly = true
- return r
-}
-
-// Handler --------------------------------------------------------------------
-
-// Handler sets a handler for the route.
-func (r *Route) Handler(handler http.Handler) *Route {
- if r.err == nil {
- r.handler = handler
- }
- return r
-}
-
-// HandlerFunc sets a handler function for the route.
-func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {
- return r.Handler(http.HandlerFunc(f))
-}
-
-// GetHandler returns the handler for the route, if any.
-func (r *Route) GetHandler() http.Handler {
- return r.handler
-}
-
-// Name -----------------------------------------------------------------------
-
-// Name sets the name for the route, used to build URLs.
-// It is an error to call Name more than once on a route.
-func (r *Route) Name(name string) *Route {
- if r.name != "" {
- r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
- r.name, name)
- }
- if r.err == nil {
- r.name = name
- r.namedRoutes[name] = r
- }
- return r
-}
-
-// GetName returns the name for the route, if any.
-func (r *Route) GetName() string {
- return r.name
-}
-
-// ----------------------------------------------------------------------------
-// Matchers
-// ----------------------------------------------------------------------------
-
-// matcher types try to match a request.
-type matcher interface {
- Match(*http.Request, *RouteMatch) bool
-}
-
-// addMatcher adds a matcher to the route.
-func (r *Route) addMatcher(m matcher) *Route {
- if r.err == nil {
- r.matchers = append(r.matchers, m)
- }
- return r
-}
-
-// addRegexpMatcher adds a host or path matcher and builder to a route.
-func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error {
- if r.err != nil {
- return r.err
- }
- if typ == regexpTypePath || typ == regexpTypePrefix {
- if len(tpl) > 0 && tpl[0] != '/' {
- return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
- }
- if r.regexp.path != nil {
- tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
- }
- }
- rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{
- strictSlash: r.strictSlash,
- useEncodedPath: r.useEncodedPath,
- })
- if err != nil {
- return err
- }
- for _, q := range r.regexp.queries {
- if err = uniqueVars(rr.varsN, q.varsN); err != nil {
- return err
- }
- }
- if typ == regexpTypeHost {
- if r.regexp.path != nil {
- if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
- return err
- }
- }
- r.regexp.host = rr
- } else {
- if r.regexp.host != nil {
- if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {
- return err
- }
- }
- if typ == regexpTypeQuery {
- r.regexp.queries = append(r.regexp.queries, rr)
- } else {
- r.regexp.path = rr
- }
- }
- r.addMatcher(rr)
- return nil
-}
-
-// Headers --------------------------------------------------------------------
-
-// headerMatcher matches the request against header values.
-type headerMatcher map[string]string
-
-func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
- return matchMapWithString(m, r.Header, true)
-}
-
-// Headers adds a matcher for request header values.
-// It accepts a sequence of key/value pairs to be matched. For example:
-//
-// r := mux.NewRouter().NewRoute()
-// r.Headers("Content-Type", "application/json",
-// "X-Requested-With", "XMLHttpRequest")
-//
-// The above route will only match if both request header values match.
-// If the value is an empty string, it will match any value if the key is set.
-func (r *Route) Headers(pairs ...string) *Route {
- if r.err == nil {
- var headers map[string]string
- headers, r.err = mapFromPairsToString(pairs...)
- return r.addMatcher(headerMatcher(headers))
- }
- return r
-}
-
-// headerRegexMatcher matches the request against the route given a regex for the header
-type headerRegexMatcher map[string]*regexp.Regexp
-
-func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
- return matchMapWithRegex(m, r.Header, true)
-}
-
-// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
-// support. For example:
-//
-// r := mux.NewRouter().NewRoute()
-// r.HeadersRegexp("Content-Type", "application/(text|json)",
-// "X-Requested-With", "XMLHttpRequest")
-//
-// The above route will only match if both the request header matches both regular expressions.
-// If the value is an empty string, it will match any value if the key is set.
-// Use the start and end of string anchors (^ and $) to match an exact value.
-func (r *Route) HeadersRegexp(pairs ...string) *Route {
- if r.err == nil {
- var headers map[string]*regexp.Regexp
- headers, r.err = mapFromPairsToRegex(pairs...)
- return r.addMatcher(headerRegexMatcher(headers))
- }
- return r
-}
-
-// Host -----------------------------------------------------------------------
-
-// Host adds a matcher for the URL host.
-// It accepts a template with zero or more URL variables enclosed by {}.
-// Variables can define an optional regexp pattern to be matched:
-//
-// - {name} matches anything until the next dot.
-//
-// - {name:pattern} matches the given regexp pattern.
-//
-// For example:
-//
-// r := mux.NewRouter().NewRoute()
-// r.Host("www.example.com")
-// r.Host("{subdomain}.domain.com")
-// r.Host("{subdomain:[a-z]+}.domain.com")
-//
-// Variable names must be unique in a given route. They can be retrieved
-// calling mux.Vars(request).
-func (r *Route) Host(tpl string) *Route {
- r.err = r.addRegexpMatcher(tpl, regexpTypeHost)
- return r
-}
-
-// MatcherFunc ----------------------------------------------------------------
-
-// MatcherFunc is the function signature used by custom matchers.
-type MatcherFunc func(*http.Request, *RouteMatch) bool
-
-// Match returns the match for a given request.
-func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
- return m(r, match)
-}
-
-// MatcherFunc adds a custom function to be used as request matcher.
-func (r *Route) MatcherFunc(f MatcherFunc) *Route {
- return r.addMatcher(f)
-}
-
-// Methods --------------------------------------------------------------------
-
-// methodMatcher matches the request against HTTP methods.
-type methodMatcher []string
-
-func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {
- return matchInArray(m, r.Method)
-}
-
-// Methods adds a matcher for HTTP methods.
-// It accepts a sequence of one or more methods to be matched, e.g.:
-// "GET", "POST", "PUT".
-func (r *Route) Methods(methods ...string) *Route {
- for k, v := range methods {
- methods[k] = strings.ToUpper(v)
- }
- return r.addMatcher(methodMatcher(methods))
-}
-
-// Path -----------------------------------------------------------------------
-
-// Path adds a matcher for the URL path.
-// It accepts a template with zero or more URL variables enclosed by {}. The
-// template must start with a "/".
-// Variables can define an optional regexp pattern to be matched:
-//
-// - {name} matches anything until the next slash.
-//
-// - {name:pattern} matches the given regexp pattern.
-//
-// For example:
-//
-// r := mux.NewRouter().NewRoute()
-// r.Path("/products/").Handler(ProductsHandler)
-// r.Path("/products/{key}").Handler(ProductsHandler)
-// r.Path("/articles/{category}/{id:[0-9]+}").
-// Handler(ArticleHandler)
-//
-// Variable names must be unique in a given route. They can be retrieved
-// calling mux.Vars(request).
-func (r *Route) Path(tpl string) *Route {
- r.err = r.addRegexpMatcher(tpl, regexpTypePath)
- return r
-}
-
-// PathPrefix -----------------------------------------------------------------
-
-// PathPrefix adds a matcher for the URL path prefix. This matches if the given
-// template is a prefix of the full URL path. See Route.Path() for details on
-// the tpl argument.
-//
-// Note that it does not treat slashes specially ("/foobar/" will be matched by
-// the prefix "/foo") so you may want to use a trailing slash here.
-//
-// Also note that the setting of Router.StrictSlash() has no effect on routes
-// with a PathPrefix matcher.
-func (r *Route) PathPrefix(tpl string) *Route {
- r.err = r.addRegexpMatcher(tpl, regexpTypePrefix)
- return r
-}
-
-// Query ----------------------------------------------------------------------
-
-// Queries adds a matcher for URL query values.
-// It accepts a sequence of key/value pairs. Values may define variables.
-// For example:
-//
-// r := mux.NewRouter().NewRoute()
-// r.Queries("foo", "bar", "id", "{id:[0-9]+}")
-//
-// The above route will only match if the URL contains the defined queries
-// values, e.g.: ?foo=bar&id=42.
-//
-// If the value is an empty string, it will match any value if the key is set.
-//
-// Variables can define an optional regexp pattern to be matched:
-//
-// - {name} matches anything until the next slash.
-//
-// - {name:pattern} matches the given regexp pattern.
-func (r *Route) Queries(pairs ...string) *Route {
- length := len(pairs)
- if length%2 != 0 {
- r.err = fmt.Errorf(
- "mux: number of parameters must be multiple of 2, got %v", pairs)
- return nil
- }
- for i := 0; i < length; i += 2 {
- if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil {
- return r
- }
- }
-
- return r
-}
-
-// Schemes --------------------------------------------------------------------
-
-// schemeMatcher matches the request against URL schemes.
-type schemeMatcher []string
-
-func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
- scheme := r.URL.Scheme
- // https://golang.org/pkg/net/http/#Request
- // "For [most] server requests, fields other than Path and RawQuery will be
- // empty."
- // Since we're an http muxer, the scheme is either going to be http or https
- // though, so we can just set it based on the tls termination state.
- if scheme == "" {
- if r.TLS == nil {
- scheme = "http"
- } else {
- scheme = "https"
- }
- }
- return matchInArray(m, scheme)
-}
-
-// Schemes adds a matcher for URL schemes.
-// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
-// If the request's URL has a scheme set, it will be matched against.
-// Generally, the URL scheme will only be set if a previous handler set it,
-// such as the ProxyHeaders handler from gorilla/handlers.
-// If unset, the scheme will be determined based on the request's TLS
-// termination state.
-// The first argument to Schemes will be used when constructing a route URL.
-func (r *Route) Schemes(schemes ...string) *Route {
- for k, v := range schemes {
- schemes[k] = strings.ToLower(v)
- }
- if len(schemes) > 0 {
- r.buildScheme = schemes[0]
- }
- return r.addMatcher(schemeMatcher(schemes))
-}
-
-// BuildVarsFunc --------------------------------------------------------------
-
-// BuildVarsFunc is the function signature used by custom build variable
-// functions (which can modify route variables before a route's URL is built).
-type BuildVarsFunc func(map[string]string) map[string]string
-
-// BuildVarsFunc adds a custom function to be used to modify build variables
-// before a route's URL is built.
-func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
- if r.buildVarsFunc != nil {
- // compose the old and new functions
- old := r.buildVarsFunc
- r.buildVarsFunc = func(m map[string]string) map[string]string {
- return f(old(m))
- }
- } else {
- r.buildVarsFunc = f
- }
- return r
-}
-
-// Subrouter ------------------------------------------------------------------
-
-// Subrouter creates a subrouter for the route.
-//
-// It will test the inner routes only if the parent route matched. For example:
-//
-// r := mux.NewRouter().NewRoute()
-// s := r.Host("www.example.com").Subrouter()
-// s.HandleFunc("/products/", ProductsHandler)
-// s.HandleFunc("/products/{key}", ProductHandler)
-// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
-//
-// Here, the routes registered in the subrouter won't be tested if the host
-// doesn't match.
-func (r *Route) Subrouter() *Router {
- // initialize a subrouter with a copy of the parent route's configuration
- router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
- r.addMatcher(router)
- return router
-}
-
-// ----------------------------------------------------------------------------
-// URL building
-// ----------------------------------------------------------------------------
-
-// URL builds a URL for the route.
-//
-// It accepts a sequence of key/value pairs for the route variables. For
-// example, given this route:
-//
-// r := mux.NewRouter()
-// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
-// Name("article")
-//
-// ...a URL for it can be built using:
-//
-// url, err := r.Get("article").URL("category", "technology", "id", "42")
-//
-// ...which will return an url.URL with the following path:
-//
-// "/articles/technology/42"
-//
-// This also works for host variables:
-//
-// r := mux.NewRouter()
-// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
-// Host("{subdomain}.domain.com").
-// Name("article")
-//
-// // url.String() will be "http://news.domain.com/articles/technology/42"
-// url, err := r.Get("article").URL("subdomain", "news",
-// "category", "technology",
-// "id", "42")
-//
-// The scheme of the resulting url will be the first argument that was passed to Schemes:
-//
-// // url.String() will be "https://example.com"
-// r := mux.NewRouter().NewRoute()
-// url, err := r.Host("example.com")
-// .Schemes("https", "http").URL()
-//
-// All variables defined in the route are required, and their values must
-// conform to the corresponding patterns.
-func (r *Route) URL(pairs ...string) (*url.URL, error) {
- if r.err != nil {
- return nil, r.err
- }
- values, err := r.prepareVars(pairs...)
- if err != nil {
- return nil, err
- }
- var scheme, host, path string
- queries := make([]string, 0, len(r.regexp.queries))
- if r.regexp.host != nil {
- if host, err = r.regexp.host.url(values); err != nil {
- return nil, err
- }
- scheme = "http"
- if r.buildScheme != "" {
- scheme = r.buildScheme
- }
- }
- if r.regexp.path != nil {
- if path, err = r.regexp.path.url(values); err != nil {
- return nil, err
- }
- }
- for _, q := range r.regexp.queries {
- var query string
- if query, err = q.url(values); err != nil {
- return nil, err
- }
- queries = append(queries, query)
- }
- return &url.URL{
- Scheme: scheme,
- Host: host,
- Path: path,
- RawQuery: strings.Join(queries, "&"),
- }, nil
-}
-
-// URLHost builds the host part of the URL for a route. See Route.URL().
-//
-// The route must have a host defined.
-func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
- if r.err != nil {
- return nil, r.err
- }
- if r.regexp.host == nil {
- return nil, errors.New("mux: route doesn't have a host")
- }
- values, err := r.prepareVars(pairs...)
- if err != nil {
- return nil, err
- }
- host, err := r.regexp.host.url(values)
- if err != nil {
- return nil, err
- }
- u := &url.URL{
- Scheme: "http",
- Host: host,
- }
- if r.buildScheme != "" {
- u.Scheme = r.buildScheme
- }
- return u, nil
-}
-
-// URLPath builds the path part of the URL for a route. See Route.URL().
-//
-// The route must have a path defined.
-func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
- if r.err != nil {
- return nil, r.err
- }
- if r.regexp.path == nil {
- return nil, errors.New("mux: route doesn't have a path")
- }
- values, err := r.prepareVars(pairs...)
- if err != nil {
- return nil, err
- }
- path, err := r.regexp.path.url(values)
- if err != nil {
- return nil, err
- }
- return &url.URL{
- Path: path,
- }, nil
-}
-
-// GetPathTemplate returns the template used to build the
-// route match.
-// This is useful for building simple REST API documentation and for instrumentation
-// against third-party services.
-// An error will be returned if the route does not define a path.
-func (r *Route) GetPathTemplate() (string, error) {
- if r.err != nil {
- return "", r.err
- }
- if r.regexp.path == nil {
- return "", errors.New("mux: route doesn't have a path")
- }
- return r.regexp.path.template, nil
-}
-
-// GetPathRegexp returns the expanded regular expression used to match route path.
-// This is useful for building simple REST API documentation and for instrumentation
-// against third-party services.
-// An error will be returned if the route does not define a path.
-func (r *Route) GetPathRegexp() (string, error) {
- if r.err != nil {
- return "", r.err
- }
- if r.regexp.path == nil {
- return "", errors.New("mux: route does not have a path")
- }
- return r.regexp.path.regexp.String(), nil
-}
-
-// GetQueriesRegexp returns the expanded regular expressions used to match the
-// route queries.
-// This is useful for building simple REST API documentation and for instrumentation
-// against third-party services.
-// An error will be returned if the route does not have queries.
-func (r *Route) GetQueriesRegexp() ([]string, error) {
- if r.err != nil {
- return nil, r.err
- }
- if r.regexp.queries == nil {
- return nil, errors.New("mux: route doesn't have queries")
- }
- queries := make([]string, 0, len(r.regexp.queries))
- for _, query := range r.regexp.queries {
- queries = append(queries, query.regexp.String())
- }
- return queries, nil
-}
-
-// GetQueriesTemplates returns the templates used to build the
-// query matching.
-// This is useful for building simple REST API documentation and for instrumentation
-// against third-party services.
-// An error will be returned if the route does not define queries.
-func (r *Route) GetQueriesTemplates() ([]string, error) {
- if r.err != nil {
- return nil, r.err
- }
- if r.regexp.queries == nil {
- return nil, errors.New("mux: route doesn't have queries")
- }
- queries := make([]string, 0, len(r.regexp.queries))
- for _, query := range r.regexp.queries {
- queries = append(queries, query.template)
- }
- return queries, nil
-}
-
-// GetMethods returns the methods the route matches against
-// This is useful for building simple REST API documentation and for instrumentation
-// against third-party services.
-// An error will be returned if route does not have methods.
-func (r *Route) GetMethods() ([]string, error) {
- if r.err != nil {
- return nil, r.err
- }
- for _, m := range r.matchers {
- if methods, ok := m.(methodMatcher); ok {
- return []string(methods), nil
- }
- }
- return nil, errors.New("mux: route doesn't have methods")
-}
-
-// GetHostTemplate returns the template used to build the
-// route match.
-// This is useful for building simple REST API documentation and for instrumentation
-// against third-party services.
-// An error will be returned if the route does not define a host.
-func (r *Route) GetHostTemplate() (string, error) {
- if r.err != nil {
- return "", r.err
- }
- if r.regexp.host == nil {
- return "", errors.New("mux: route doesn't have a host")
- }
- return r.regexp.host.template, nil
-}
-
-// GetVarNames returns the names of all variables added by regexp matchers
-// These can be used to know which route variables should be passed into r.URL()
-func (r *Route) GetVarNames() ([]string, error) {
- if r.err != nil {
- return nil, r.err
- }
- var varNames []string
- if r.regexp.host != nil {
- varNames = append(varNames, r.regexp.host.varsN...)
- }
- if r.regexp.path != nil {
- varNames = append(varNames, r.regexp.path.varsN...)
- }
- for _, regx := range r.regexp.queries {
- varNames = append(varNames, regx.varsN...)
- }
- return varNames, nil
-}
-
-// prepareVars converts the route variable pairs into a map. If the route has a
-// BuildVarsFunc, it is invoked.
-func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
- m, err := mapFromPairsToString(pairs...)
- if err != nil {
- return nil, err
- }
- return r.buildVars(m), nil
-}
-
-func (r *Route) buildVars(m map[string]string) map[string]string {
- if r.buildVarsFunc != nil {
- m = r.buildVarsFunc(m)
- }
- return m
-}
diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go
deleted file mode 100644
index 5f5c496de0..0000000000
--- a/vendor/github.com/gorilla/mux/test_helpers.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2012 The Gorilla Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mux
-
-import "net/http"
-
-// SetURLVars sets the URL variables for the given request, to be accessed via
-// mux.Vars for testing route behaviour. Arguments are not modified, a shallow
-// copy is returned.
-//
-// This API should only be used for testing purposes; it provides a way to
-// inject variables into the request context. Alternatively, URL variables
-// can be set by making a route that captures the required variables,
-// starting a server and sending the request to that server.
-func SetURLVars(r *http.Request, val map[string]string) *http.Request {
- return requestWithVars(r, val)
-}
diff --git a/vendor/github.com/lestrrat-go/blackmagic/blackmagic.go b/vendor/github.com/lestrrat-go/blackmagic/blackmagic.go
index aa5704a21a..9c98ac8483 100644
--- a/vendor/github.com/lestrrat-go/blackmagic/blackmagic.go
+++ b/vendor/github.com/lestrrat-go/blackmagic/blackmagic.go
@@ -5,6 +5,20 @@ import (
"reflect"
)
+type errInvalidValue struct{}
+
+func (*errInvalidValue) Error() string {
+ return "invalid value (probably an untyped nil)"
+}
+
+// InvalidValueError is a sentinel error that can be used to
+// indicate that a value is invalid. This can happen when the
+// source value is an untyped nil, and we have no further information
+// about the type of the value, obstructing the assignment.
+func InvalidValueError() error {
+ return &errInvalidValue{}
+}
+
// AssignField is a convenience function to assign a value to
// an optional struct field. In Go, an optional struct field is
// usually denoted by a pointer to T instead of T:
@@ -32,7 +46,7 @@ func AssignOptionalField(dst, src interface{}) error {
if !dstRV.Elem().CanSet() {
return fmt.Errorf(`dst (%T) is not assignable`, dstRV.Elem().Interface())
}
- if !reflect.PtrTo(srcRV.Type()).AssignableTo(dstRV.Elem().Type()) {
+ if !reflect.PointerTo(srcRV.Type()).AssignableTo(dstRV.Elem().Type()) {
return fmt.Errorf(`cannot assign src (%T) to dst (%T)`, src, dst)
}
@@ -50,15 +64,15 @@ func AssignIfCompatible(dst, src interface{}) error {
orv := reflect.ValueOf(src) // save this value for error reporting
result := orv
- // t can be a pointer or a slice, and the code will slightly change
+ // src can be a pointer or a slice, and the code will slightly change
// depending on this
- var isPtr bool
- var isSlice bool
+ var srcIsPtr bool
+ var srcIsSlice bool
switch result.Kind() {
case reflect.Ptr:
- isPtr = true
+ srcIsPtr = true
case reflect.Slice:
- isSlice = true
+ srcIsSlice = true
}
rv := reflect.ValueOf(dst)
@@ -66,17 +80,38 @@ func AssignIfCompatible(dst, src interface{}) error {
return fmt.Errorf(`destination argument to AssignIfCompatible() must be a pointer: %T`, dst)
}
- actualDst := rv.Elem()
+ actualDst := rv
+ for {
+ if !actualDst.IsValid() {
+ return fmt.Errorf(`could not find a valid destination for AssignIfCompatible() (%T)`, dst)
+ }
+ if actualDst.CanSet() {
+ break
+ }
+ actualDst = actualDst.Elem()
+ }
+
switch actualDst.Kind() {
case reflect.Interface:
// If it's an interface, we can just assign the pointer to the interface{}
default:
// If it's a pointer to the struct we're looking for, we need to set
// the de-referenced struct
- if !isSlice && isPtr {
+ if !srcIsSlice && srcIsPtr {
result = result.Elem()
}
}
+
+ if !result.IsValid() {
+ // At this point there's nothing we can do. return an error
+ return fmt.Errorf(`source value is invalid (%T): %w`, src, InvalidValueError())
+ }
+
+ if actualDst.Kind() == reflect.Ptr {
+ actualDst.Set(result.Addr())
+ return nil
+ }
+
if !result.Type().AssignableTo(actualDst.Type()) {
return fmt.Errorf(`argument to AssignIfCompatible() must be compatible with %T (was %T)`, orv.Interface(), dst)
}
diff --git a/vendor/github.com/lestrrat-go/dsig-secp256k1/.gitignore b/vendor/github.com/lestrrat-go/dsig-secp256k1/.gitignore
new file mode 100644
index 0000000000..aaadf736e5
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/dsig-secp256k1/.gitignore
@@ -0,0 +1,32 @@
+# If you prefer the allow list template instead of the deny list, see community template:
+# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
+#
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Code coverage profiles and other test artifacts
+*.out
+coverage.*
+*.coverprofile
+profile.cov
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
+
+# Go workspace file
+go.work
+go.work.sum
+
+# env file
+.env
+
+# Editor/IDE
+# .idea/
+# .vscode/
diff --git a/vendor/github.com/lestrrat-go/dsig-secp256k1/Changes b/vendor/github.com/lestrrat-go/dsig-secp256k1/Changes
new file mode 100644
index 0000000000..4dd588a006
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/dsig-secp256k1/Changes
@@ -0,0 +1,5 @@
+Changes
+=======
+
+v1.0.0 18 Aug 2025
+* Initial release
\ No newline at end of file
diff --git a/vendor/github.com/lestrrat-go/dsig-secp256k1/LICENSE b/vendor/github.com/lestrrat-go/dsig-secp256k1/LICENSE
new file mode 100644
index 0000000000..1e1f5d199e
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/dsig-secp256k1/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2025 lestrrat-go
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/lestrrat-go/dsig-secp256k1/secp256k1.go b/vendor/github.com/lestrrat-go/dsig-secp256k1/secp256k1.go
new file mode 100644
index 0000000000..85650efa1c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/dsig-secp256k1/secp256k1.go
@@ -0,0 +1,29 @@
+package dsigsecp256k1
+
+import (
+ "crypto"
+
+ "github.com/decred/dcrd/dcrec/secp256k1/v4"
+ "github.com/lestrrat-go/dsig"
+)
+
+const ECDSAWithSecp256k1AndSHA256 = "ECDSA_WITH_SECP256K1_AND_SHA256"
+
+// init adds secp256k1 support when the dsig_secp256k1 build tag is used.
+func init() {
+ // Register ES256K (secp256k1 + SHA256) support using the new API
+ err := dsig.RegisterAlgorithm(ECDSAWithSecp256k1AndSHA256, dsig.AlgorithmInfo{
+ Family: dsig.ECDSA,
+ Meta: dsig.ECDSAFamilyMeta{
+ Hash: crypto.SHA256,
+ },
+ })
+ if err != nil {
+ panic("failed to register secp256k1 algorithm: " + err.Error())
+ }
+}
+
+// secp256k1Curve returns the secp256k1 curve.
+func Curve() *secp256k1.KoblitzCurve {
+ return secp256k1.S256()
+}
diff --git a/vendor/github.com/lestrrat-go/dsig/.gitignore b/vendor/github.com/lestrrat-go/dsig/.gitignore
new file mode 100644
index 0000000000..aaadf736e5
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/dsig/.gitignore
@@ -0,0 +1,32 @@
+# If you prefer the allow list template instead of the deny list, see community template:
+# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
+#
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Code coverage profiles and other test artifacts
+*.out
+coverage.*
+*.coverprofile
+profile.cov
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
+
+# Go workspace file
+go.work
+go.work.sum
+
+# env file
+.env
+
+# Editor/IDE
+# .idea/
+# .vscode/
diff --git a/vendor/github.com/lestrrat-go/dsig/Changes b/vendor/github.com/lestrrat-go/dsig/Changes
new file mode 100644
index 0000000000..bccce97613
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/dsig/Changes
@@ -0,0 +1,5 @@
+Changes
+=======
+
+v1.0.0 - 18 Aug 2025
+* Initial release
\ No newline at end of file
diff --git a/vendor/github.com/lestrrat-go/dsig/LICENSE b/vendor/github.com/lestrrat-go/dsig/LICENSE
new file mode 100644
index 0000000000..1e1f5d199e
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/dsig/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2025 lestrrat-go
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/lestrrat-go/dsig/README.md b/vendor/github.com/lestrrat-go/dsig/README.md
new file mode 100644
index 0000000000..37c194579e
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/dsig/README.md
@@ -0,0 +1,163 @@
+# github.com/lestrrat-go/dsig [](https://github.com/lestrrat-go/dsig/actions/workflows/ci.yml) [](https://pkg.go.dev/github.com/lestrrat-go/dsig) [](https://codecov.io/github/lestrrat-go/dsig?branch=v1)
+
+Go module providing low-level digital signature operations.
+
+While there are many standards for generating and verifying digital signatures, the core operations are virtually the same. This module implements the core functionality of digital signature generation / verifications in a framework agnostic way.
+
+# Features
+
+* RSA signatures (PKCS1v15 and PSS)
+* ECDSA signatures (P-256, P-384, P-521)
+* EdDSA signatures (Ed25519, Ed448)
+* HMAC signatures (SHA-256, SHA-384, SHA-512)
+* Support for crypto.Signer interface
+* Allows for dynamic additions of algorithms in limited cases.
+
+# SYNOPSIS
+
+
+```go
+package examples_test
+
+import (
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "fmt"
+
+ "github.com/lestrrat-go/dsig"
+)
+
+func Example() {
+ payload := []byte("hello world")
+
+ // RSA signing and verification
+ {
+ privKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ fmt.Printf("failed to generate RSA key: %s\n", err)
+ return
+ }
+
+ // Sign with RSA-PSS SHA256
+ signature, err := dsig.Sign(privKey, dsig.RSAPSSWithSHA256, payload, nil)
+ if err != nil {
+ fmt.Printf("failed to sign with RSA: %s\n", err)
+ return
+ }
+
+ // Verify with RSA-PSS SHA256
+ err = dsig.Verify(&privKey.PublicKey, dsig.RSAPSSWithSHA256, payload, signature)
+ if err != nil {
+ fmt.Printf("failed to verify RSA signature: %s\n", err)
+ return
+ }
+ }
+
+ // ECDSA signing and verification
+ {
+ privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ fmt.Printf("failed to generate ECDSA key: %s\n", err)
+ return
+ }
+
+ // Sign with ECDSA P-256 SHA256
+ signature, err := dsig.Sign(privKey, dsig.ECDSAWithP256AndSHA256, payload, nil)
+ if err != nil {
+ fmt.Printf("failed to sign with ECDSA: %s\n", err)
+ return
+ }
+
+ // Verify with ECDSA P-256 SHA256
+ err = dsig.Verify(&privKey.PublicKey, dsig.ECDSAWithP256AndSHA256, payload, signature)
+ if err != nil {
+ fmt.Printf("failed to verify ECDSA signature: %s\n", err)
+ return
+ }
+ }
+
+ // EdDSA signing and verification
+ {
+ pubKey, privKey, err := ed25519.GenerateKey(rand.Reader)
+ if err != nil {
+ fmt.Printf("failed to generate Ed25519 key: %s\n", err)
+ return
+ }
+
+ // Sign with EdDSA
+ signature, err := dsig.Sign(privKey, dsig.EdDSA, payload, nil)
+ if err != nil {
+ fmt.Printf("failed to sign with EdDSA: %s\n", err)
+ return
+ }
+
+ // Verify with EdDSA
+ err = dsig.Verify(pubKey, dsig.EdDSA, payload, signature)
+ if err != nil {
+ fmt.Printf("failed to verify EdDSA signature: %s\n", err)
+ return
+ }
+ }
+
+ // HMAC signing and verification
+ {
+ key := []byte("secret-key")
+
+ // Sign with HMAC SHA256
+ signature, err := dsig.Sign(key, dsig.HMACWithSHA256, payload, nil)
+ if err != nil {
+ fmt.Printf("failed to sign with HMAC: %s\n", err)
+ return
+ }
+
+ // Verify with HMAC SHA256
+ err = dsig.Verify(key, dsig.HMACWithSHA256, payload, signature)
+ if err != nil {
+ fmt.Printf("failed to verify HMAC signature: %s\n", err)
+ return
+ }
+ }
+ // OUTPUT:
+}
+```
+source: [examples/dsig_readme_example_test.go](https://github.com/lestrrat-go/dsig/blob/v1/examples/dsig_readme_example_test.go)
+
+
+# Supported Algorithms
+
+| Constant | Algorithm | Key Type |
+|----------|-----------|----------|
+| `HMACWithSHA256` | HMAC using SHA-256 | []byte |
+| `HMACWithSHA384` | HMAC using SHA-384 | []byte |
+| `HMACWithSHA512` | HMAC using SHA-512 | []byte |
+| `RSAPKCS1v15WithSHA256` | RSA PKCS#1 v1.5 using SHA-256 | *rsa.PrivateKey / *rsa.PublicKey |
+| `RSAPKCS1v15WithSHA384` | RSA PKCS#1 v1.5 using SHA-384 | *rsa.PrivateKey / *rsa.PublicKey |
+| `RSAPKCS1v15WithSHA512` | RSA PKCS#1 v1.5 using SHA-512 | *rsa.PrivateKey / *rsa.PublicKey |
+| `RSAPSSWithSHA256` | RSA PSS using SHA-256 | *rsa.PrivateKey / *rsa.PublicKey |
+| `RSAPSSWithSHA384` | RSA PSS using SHA-384 | *rsa.PrivateKey / *rsa.PublicKey |
+| `RSAPSSWithSHA512` | RSA PSS using SHA-512 | *rsa.PrivateKey / *rsa.PublicKey |
+| `ECDSAWithP256AndSHA256` | ECDSA using P-256 and SHA-256 | *ecdsa.PrivateKey / *ecdsa.PublicKey |
+| `ECDSAWithP384AndSHA384` | ECDSA using P-384 and SHA-384 | *ecdsa.PrivateKey / *ecdsa.PublicKey |
+| `ECDSAWithP521AndSHA512` | ECDSA using P-521 and SHA-512 | *ecdsa.PrivateKey / *ecdsa.PublicKey |
+| `EdDSA` | EdDSA using Ed25519 or Ed448 | ed25519.PrivateKey / ed25519.PublicKey |
+
+# Description
+
+This library provides low-level digital signature operations. It does minimal parameter validation for performance, uses strongly typed APIs, and has minimal dependencies.
+
+# Contributions
+
+## Issues
+
+For bug reports and feature requests, please include failing tests when possible.
+
+## Pull Requests
+
+Please include tests that exercise your changes.
+
+# Related Libraries
+
+* [github.com/lestrrat-go/jwx](https://github.com/lestrrat-go/jwx) - JOSE (JWA/JWE/JWK/JWS/JWT) implementation
\ No newline at end of file
diff --git a/vendor/github.com/lestrrat-go/dsig/algorithms.go b/vendor/github.com/lestrrat-go/dsig/algorithms.go
new file mode 100644
index 0000000000..0895c64764
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/dsig/algorithms.go
@@ -0,0 +1,37 @@
+package dsig
+
+// This file defines verbose algorithm name constants that can be mapped to by
+// different standards (RFC7518, FIDO, etc.) for interoperability.
+//
+// The algorithm names are intentionally verbose to avoid any ambiguity about
+// the exact cryptographic operations being performed.
+
+const (
+ // HMAC signature algorithms
+ // These use Hash-based Message Authentication Code with specified hash functions
+ HMACWithSHA256 = "HMAC_WITH_SHA256"
+ HMACWithSHA384 = "HMAC_WITH_SHA384"
+ HMACWithSHA512 = "HMAC_WITH_SHA512"
+
+ // RSA signature algorithms with PKCS#1 v1.5 padding
+ // These use RSA signatures with PKCS#1 v1.5 padding and specified hash functions
+ RSAPKCS1v15WithSHA256 = "RSA_PKCS1v15_WITH_SHA256"
+ RSAPKCS1v15WithSHA384 = "RSA_PKCS1v15_WITH_SHA384"
+ RSAPKCS1v15WithSHA512 = "RSA_PKCS1v15_WITH_SHA512"
+
+ // RSA signature algorithms with PSS padding
+ // These use RSA signatures with Probabilistic Signature Scheme (PSS) padding
+ RSAPSSWithSHA256 = "RSA_PSS_WITH_SHA256"
+ RSAPSSWithSHA384 = "RSA_PSS_WITH_SHA384"
+ RSAPSSWithSHA512 = "RSA_PSS_WITH_SHA512"
+
+ // ECDSA signature algorithms
+ // These use Elliptic Curve Digital Signature Algorithm with specified curves and hash functions
+ ECDSAWithP256AndSHA256 = "ECDSA_WITH_P256_AND_SHA256"
+ ECDSAWithP384AndSHA384 = "ECDSA_WITH_P384_AND_SHA384"
+ ECDSAWithP521AndSHA512 = "ECDSA_WITH_P521_AND_SHA512"
+
+ // EdDSA signature algorithms
+ // These use Edwards-curve Digital Signature Algorithm (supports Ed25519 and Ed448)
+ EdDSA = "EDDSA"
+)
\ No newline at end of file
diff --git a/vendor/github.com/lestrrat-go/dsig/crypto_signer.go b/vendor/github.com/lestrrat-go/dsig/crypto_signer.go
new file mode 100644
index 0000000000..f81666708b
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/dsig/crypto_signer.go
@@ -0,0 +1,45 @@
+package dsig
+
+import (
+ "crypto"
+ "crypto/rand"
+ "fmt"
+ "io"
+)
+
+// cryptosign is a low-level function that signs a payload using a crypto.Signer.
+// If hash is crypto.Hash(0), the payload is signed directly without hashing.
+// Otherwise, the payload is hashed using the specified hash function before signing.
+//
+// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader.
+func cryptosign(signer crypto.Signer, payload []byte, hash crypto.Hash, opts crypto.SignerOpts, rr io.Reader) ([]byte, error) {
+ if rr == nil {
+ rr = rand.Reader
+ }
+
+ var digest []byte
+ if hash == crypto.Hash(0) {
+ digest = payload
+ } else {
+ h := hash.New()
+ if _, err := h.Write(payload); err != nil {
+ return nil, fmt.Errorf(`failed to write payload to hash: %w`, err)
+ }
+ digest = h.Sum(nil)
+ }
+ return signer.Sign(rr, digest, opts)
+}
+
+// SignCryptoSigner generates a signature using a crypto.Signer interface.
+// This function can be used for hardware security modules, smart cards,
+// and other implementations of the crypto.Signer interface.
+//
+// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader.
+//
+// Returns the signature bytes or an error if signing fails.
+func SignCryptoSigner(signer crypto.Signer, raw []byte, h crypto.Hash, opts crypto.SignerOpts, rr io.Reader) ([]byte, error) {
+ if signer == nil {
+ return nil, fmt.Errorf("dsig.SignCryptoSigner: signer is nil")
+ }
+ return cryptosign(signer, raw, h, opts, rr)
+}
diff --git a/vendor/github.com/lestrrat-go/dsig/dsig.go b/vendor/github.com/lestrrat-go/dsig/dsig.go
new file mode 100644
index 0000000000..de6cbdec45
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/dsig/dsig.go
@@ -0,0 +1,224 @@
+// Package dsig provides digital signature operations for Go.
+// It contains low-level signature generation and verification tools that
+// can be used by other signing libraries
+//
+// The package follows these design principles:
+// 1. Does minimal checking of input parameters (for performance); callers need to ensure that the parameters are valid.
+// 2. All exported functions are strongly typed (i.e. they do not take `any` types unless they absolutely have to).
+// 3. Does not rely on other high-level packages (standalone, except for internal packages).
+package dsig
+
+import (
+ "crypto"
+ "crypto/sha256"
+ "crypto/sha512"
+ "fmt"
+ "hash"
+ "sync"
+)
+
+// Family represents the cryptographic algorithm family
+type Family int
+
+const (
+ InvalidFamily Family = iota
+ HMAC
+ RSA
+ ECDSA
+ EdDSAFamily
+ maxFamily
+)
+
+// String returns the string representation of the Family
+func (f Family) String() string {
+ switch f {
+ case HMAC:
+ return "HMAC"
+ case RSA:
+ return "RSA"
+ case ECDSA:
+ return "ECDSA"
+ case EdDSAFamily:
+ return "EdDSA"
+ default:
+ return "InvalidFamily"
+ }
+}
+
+// AlgorithmInfo contains metadata about a digital signature algorithm
+type AlgorithmInfo struct {
+ Family Family // The cryptographic family (HMAC, RSA, ECDSA, EdDSA)
+ Meta any // Family-specific metadata
+}
+
+// HMACFamilyMeta contains metadata specific to HMAC algorithms
+type HMACFamilyMeta struct {
+ HashFunc func() hash.Hash // Hash function constructor
+}
+
+// RSAFamilyMeta contains metadata specific to RSA algorithms
+type RSAFamilyMeta struct {
+ Hash crypto.Hash // Hash algorithm
+ PSS bool // Whether to use PSS padding (false = PKCS#1 v1.5)
+}
+
+// ECDSAFamilyMeta contains metadata specific to ECDSA algorithms
+type ECDSAFamilyMeta struct {
+ Hash crypto.Hash // Hash algorithm
+}
+
+// EdDSAFamilyMeta contains metadata specific to EdDSA algorithms
+// Currently EdDSA doesn't need specific metadata, but this provides extensibility
+type EdDSAFamilyMeta struct {
+ // Reserved for future use
+}
+
+var algorithms = make(map[string]AlgorithmInfo)
+var muAlgorithms sync.RWMutex
+
+// RegisterAlgorithm registers a new digital signature algorithm with the specified family and metadata.
+//
+// info.Meta should contain extra metadata for some algorithms. Currently HMAC, RSA,
+// and ECDSA family of algorithms need their respective metadata (HMACFamilyMeta,
+// RSAFamilyMeta, and ECDSAFamilyMeta). Metadata for other families are ignored.
+func RegisterAlgorithm(name string, info AlgorithmInfo) error {
+ muAlgorithms.Lock()
+ defer muAlgorithms.Unlock()
+
+ // Validate the metadata matches the family
+ switch info.Family {
+ case HMAC:
+ if _, ok := info.Meta.(HMACFamilyMeta); !ok {
+ return fmt.Errorf("invalid HMAC metadata for algorithm %s", name)
+ }
+ case RSA:
+ if _, ok := info.Meta.(RSAFamilyMeta); !ok {
+ return fmt.Errorf("invalid RSA metadata for algorithm %s", name)
+ }
+ case ECDSA:
+ if _, ok := info.Meta.(ECDSAFamilyMeta); !ok {
+ return fmt.Errorf("invalid ECDSA metadata for algorithm %s", name)
+ }
+ case EdDSAFamily:
+ // EdDSA metadata is optional for now
+ default:
+ return fmt.Errorf("unsupported algorithm family %s for algorithm %s", info.Family, name)
+ }
+
+ algorithms[name] = info
+ return nil
+}
+
+// GetAlgorithmInfo retrieves the algorithm information for a given algorithm name.
+// Returns the info and true if found, zero value and false if not found.
+func GetAlgorithmInfo(name string) (AlgorithmInfo, bool) {
+ muAlgorithms.RLock()
+ defer muAlgorithms.RUnlock()
+
+ info, ok := algorithms[name]
+ return info, ok
+}
+
+func init() {
+ // Register all standard algorithms with their metadata
+ toRegister := map[string]AlgorithmInfo{
+ // HMAC algorithms
+ HMACWithSHA256: {
+ Family: HMAC,
+ Meta: HMACFamilyMeta{
+ HashFunc: sha256.New,
+ },
+ },
+ HMACWithSHA384: {
+ Family: HMAC,
+ Meta: HMACFamilyMeta{
+ HashFunc: sha512.New384,
+ },
+ },
+ HMACWithSHA512: {
+ Family: HMAC,
+ Meta: HMACFamilyMeta{
+ HashFunc: sha512.New,
+ },
+ },
+
+ // RSA PKCS#1 v1.5 algorithms
+ RSAPKCS1v15WithSHA256: {
+ Family: RSA,
+ Meta: RSAFamilyMeta{
+ Hash: crypto.SHA256,
+ PSS: false,
+ },
+ },
+ RSAPKCS1v15WithSHA384: {
+ Family: RSA,
+ Meta: RSAFamilyMeta{
+ Hash: crypto.SHA384,
+ PSS: false,
+ },
+ },
+ RSAPKCS1v15WithSHA512: {
+ Family: RSA,
+ Meta: RSAFamilyMeta{
+ Hash: crypto.SHA512,
+ PSS: false,
+ },
+ },
+
+ // RSA PSS algorithms
+ RSAPSSWithSHA256: {
+ Family: RSA,
+ Meta: RSAFamilyMeta{
+ Hash: crypto.SHA256,
+ PSS: true,
+ },
+ },
+ RSAPSSWithSHA384: {
+ Family: RSA,
+ Meta: RSAFamilyMeta{
+ Hash: crypto.SHA384,
+ PSS: true,
+ },
+ },
+ RSAPSSWithSHA512: {
+ Family: RSA,
+ Meta: RSAFamilyMeta{
+ Hash: crypto.SHA512,
+ PSS: true,
+ },
+ },
+
+ // ECDSA algorithms
+ ECDSAWithP256AndSHA256: {
+ Family: ECDSA,
+ Meta: ECDSAFamilyMeta{
+ Hash: crypto.SHA256,
+ },
+ },
+ ECDSAWithP384AndSHA384: {
+ Family: ECDSA,
+ Meta: ECDSAFamilyMeta{
+ Hash: crypto.SHA384,
+ },
+ },
+ ECDSAWithP521AndSHA512: {
+ Family: ECDSA,
+ Meta: ECDSAFamilyMeta{
+ Hash: crypto.SHA512,
+ },
+ },
+
+ // EdDSA algorithm
+ EdDSA: {
+ Family: EdDSAFamily,
+ Meta: EdDSAFamilyMeta{},
+ },
+ }
+
+ for name, info := range toRegister {
+ if err := RegisterAlgorithm(name, info); err != nil {
+ panic(fmt.Sprintf("failed to register algorithm %s: %v", name, err))
+ }
+ }
+}
+
diff --git a/vendor/github.com/lestrrat-go/dsig/ecdsa.go b/vendor/github.com/lestrrat-go/dsig/ecdsa.go
new file mode 100644
index 0000000000..a04a266919
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/dsig/ecdsa.go
@@ -0,0 +1,200 @@
+package dsig
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "encoding/asn1"
+ "fmt"
+ "io"
+ "math/big"
+
+ "github.com/lestrrat-go/dsig/internal/ecutil"
+)
+
+
+func ecdsaGetSignerKey(key any) (*ecdsa.PrivateKey, crypto.Signer, bool, error) {
+ cs, isCryptoSigner := key.(crypto.Signer)
+ if isCryptoSigner {
+ if !isValidECDSAKey(key) {
+ return nil, nil, false, fmt.Errorf(`invalid key type %T for ECDSA algorithm`, key)
+ }
+
+ switch key.(type) {
+ case ecdsa.PrivateKey, *ecdsa.PrivateKey:
+ // if it's ecdsa.PrivateKey, it's more efficient to
+ // go through the non-crypto.Signer route. Set isCryptoSigner to false
+ isCryptoSigner = false
+ }
+ }
+
+ if isCryptoSigner {
+ return nil, cs, true, nil
+ }
+
+ privkey, ok := key.(*ecdsa.PrivateKey)
+ if !ok {
+ return nil, nil, false, fmt.Errorf(`invalid key type %T. *ecdsa.PrivateKey is required`, key)
+ }
+ return privkey, nil, false, nil
+}
+
+// UnpackASN1ECDSASignature unpacks an ASN.1 encoded ECDSA signature into r and s values.
+// This is typically used when working with crypto.Signer interfaces that return ASN.1 encoded signatures.
+func UnpackASN1ECDSASignature(signed []byte, r, s *big.Int) error {
+ // Okay, this is silly, but hear me out. When we use the
+ // crypto.Signer interface, the PrivateKey is hidden.
+ // But we need some information about the key (its bit size).
+ //
+ // So while silly, we're going to have to make another call
+ // here and fetch the Public key.
+ // (This probably means that this information should be cached somewhere)
+ var p struct {
+ R *big.Int // TODO: get this from a pool?
+ S *big.Int
+ }
+ if _, err := asn1.Unmarshal(signed, &p); err != nil {
+ return fmt.Errorf(`failed to unmarshal ASN1 encoded signature: %w`, err)
+ }
+
+ r.Set(p.R)
+ s.Set(p.S)
+ return nil
+}
+
+// UnpackECDSASignature unpacks a JWS-format ECDSA signature into r and s values.
+// The signature should be in the format specified by RFC 7515 (r||s as fixed-length byte arrays).
+func UnpackECDSASignature(signature []byte, pubkey *ecdsa.PublicKey, r, s *big.Int) error {
+ keySize := ecutil.CalculateKeySize(pubkey.Curve)
+ if len(signature) != keySize*2 {
+ return fmt.Errorf(`invalid signature length for curve %q`, pubkey.Curve.Params().Name)
+ }
+
+ r.SetBytes(signature[:keySize])
+ s.SetBytes(signature[keySize:])
+
+ return nil
+}
+
+// PackECDSASignature packs the r and s values from an ECDSA signature into a JWS-format byte slice.
+// The output format follows RFC 7515: r||s as fixed-length byte arrays.
+func PackECDSASignature(r *big.Int, sbig *big.Int, curveBits int) ([]byte, error) {
+ keyBytes := curveBits / 8
+ if curveBits%8 > 0 {
+ keyBytes++
+ }
+
+ // Serialize r and s into fixed-length bytes
+ rBytes := r.Bytes()
+ rBytesPadded := make([]byte, keyBytes)
+ copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
+
+ sBytes := sbig.Bytes()
+ sBytesPadded := make([]byte, keyBytes)
+ copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
+
+ // Output as r||s
+ return append(rBytesPadded, sBytesPadded...), nil
+}
+
+// SignECDSA generates an ECDSA signature for the given payload using the specified private key and hash.
+// The raw parameter should be the pre-computed signing input (typically header.payload).
+//
+// rr is an io.Reader that provides randomness for signing. if rr is nil, it defaults to rand.Reader.
+func SignECDSA(key *ecdsa.PrivateKey, payload []byte, h crypto.Hash, rr io.Reader) ([]byte, error) {
+ if !isValidECDSAKey(key) {
+ return nil, fmt.Errorf(`invalid key type %T for ECDSA algorithm`, key)
+ }
+ hh := h.New()
+ if _, err := hh.Write(payload); err != nil {
+ return nil, fmt.Errorf(`failed to write payload using ecdsa: %w`, err)
+ }
+ digest := hh.Sum(nil)
+
+ if rr == nil {
+ rr = rand.Reader
+ }
+
+ // Sign and get r, s values
+ r, s, err := ecdsa.Sign(rr, key, digest)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to sign payload using ecdsa: %w`, err)
+ }
+
+ return PackECDSASignature(r, s, key.Curve.Params().BitSize)
+}
+
+// SignECDSACryptoSigner generates an ECDSA signature using a crypto.Signer interface.
+// This function works with hardware security modules and other crypto.Signer implementations.
+// The signature is converted from ASN.1 format to JWS format (r||s).
+//
+// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader.
+func SignECDSACryptoSigner(signer crypto.Signer, raw []byte, h crypto.Hash, rr io.Reader) ([]byte, error) {
+ signed, err := SignCryptoSigner(signer, raw, h, h, rr)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to sign payload using crypto.Signer: %w`, err)
+ }
+
+ return signECDSACryptoSigner(signer, signed)
+}
+
+func signECDSACryptoSigner(signer crypto.Signer, signed []byte) ([]byte, error) {
+ cpub := signer.Public()
+ pubkey, ok := cpub.(*ecdsa.PublicKey)
+ if !ok {
+ return nil, fmt.Errorf(`expected *ecdsa.PublicKey, got %T`, pubkey)
+ }
+ curveBits := pubkey.Curve.Params().BitSize
+
+ var r, s big.Int
+ if err := UnpackASN1ECDSASignature(signed, &r, &s); err != nil {
+ return nil, fmt.Errorf(`failed to unpack ASN1 encoded signature: %w`, err)
+ }
+
+ return PackECDSASignature(&r, &s, curveBits)
+}
+
+func ecdsaVerify(key *ecdsa.PublicKey, buf []byte, h crypto.Hash, r, s *big.Int) error {
+ hasher := h.New()
+ hasher.Write(buf)
+ digest := hasher.Sum(nil)
+ if !ecdsa.Verify(key, digest, r, s) {
+ return NewVerificationError("invalid ECDSA signature")
+ }
+ return nil
+}
+
+// VerifyECDSA verifies an ECDSA signature for the given payload.
+// This function verifies the signature using the specified public key and hash algorithm.
+// The payload parameter should be the pre-computed signing input (typically header.payload).
+func VerifyECDSA(key *ecdsa.PublicKey, payload, signature []byte, h crypto.Hash) error {
+ var r, s big.Int
+ if err := UnpackECDSASignature(signature, key, &r, &s); err != nil {
+ return fmt.Errorf("dsig.VerifyECDSA: failed to unpack ECDSA signature: %w", err)
+ }
+
+ return ecdsaVerify(key, payload, h, &r, &s)
+}
+
+// VerifyECDSACryptoSigner verifies an ECDSA signature for crypto.Signer implementations.
+// This function is useful for verifying signatures created by hardware security modules
+// or other implementations of the crypto.Signer interface.
+// The payload parameter should be the pre-computed signing input (typically header.payload).
+func VerifyECDSACryptoSigner(signer crypto.Signer, payload, signature []byte, h crypto.Hash) error {
+ var pubkey *ecdsa.PublicKey
+ switch cpub := signer.Public(); cpub := cpub.(type) {
+ case ecdsa.PublicKey:
+ pubkey = &cpub
+ case *ecdsa.PublicKey:
+ pubkey = cpub
+ default:
+ return fmt.Errorf(`dsig.VerifyECDSACryptoSigner: expected *ecdsa.PublicKey, got %T`, cpub)
+ }
+
+ var r, s big.Int
+ if err := UnpackECDSASignature(signature, pubkey, &r, &s); err != nil {
+ return fmt.Errorf("dsig.VerifyECDSACryptoSigner: failed to unpack ASN.1 encoded ECDSA signature: %w", err)
+ }
+
+ return ecdsaVerify(pubkey, payload, h, &r, &s)
+}
diff --git a/vendor/github.com/lestrrat-go/dsig/eddsa.go b/vendor/github.com/lestrrat-go/dsig/eddsa.go
new file mode 100644
index 0000000000..6562da37b8
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/dsig/eddsa.go
@@ -0,0 +1,44 @@
+package dsig
+
+import (
+ "crypto"
+ "crypto/ed25519"
+ "fmt"
+)
+
+func eddsaGetSigner(key any) (crypto.Signer, error) {
+ // The ed25519.PrivateKey object implements crypto.Signer, so we should
+ // simply accept a crypto.Signer here.
+ signer, ok := key.(crypto.Signer)
+ if ok {
+ if !isValidEDDSAKey(key) {
+ return nil, fmt.Errorf(`invalid key type %T for EdDSA algorithm`, key)
+ }
+ return signer, nil
+ }
+
+ // This fallback exists for cases when users give us a pointer instead of non-pointer, etc.
+ privkey, ok := key.(ed25519.PrivateKey)
+ if !ok {
+ return nil, fmt.Errorf(`failed to retrieve ed25519.PrivateKey out of %T`, key)
+ }
+ return privkey, nil
+}
+
+// SignEdDSA generates an EdDSA (Ed25519) signature for the given payload.
+// The raw parameter should be the pre-computed signing input (typically header.payload).
+// EdDSA is deterministic and doesn't require additional hashing of the input.
+func SignEdDSA(key ed25519.PrivateKey, payload []byte) ([]byte, error) {
+ return ed25519.Sign(key, payload), nil
+}
+
+// VerifyEdDSA verifies an EdDSA (Ed25519) signature for the given payload.
+// This function verifies the signature using Ed25519 verification algorithm.
+// The payload parameter should be the pre-computed signing input (typically header.payload).
+// EdDSA is deterministic and provides strong security guarantees without requiring hash function selection.
+func VerifyEdDSA(key ed25519.PublicKey, payload, signature []byte) error {
+ if !ed25519.Verify(key, payload, signature) {
+ return fmt.Errorf("invalid EdDSA signature")
+ }
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/dsig/hmac.go b/vendor/github.com/lestrrat-go/dsig/hmac.go
new file mode 100644
index 0000000000..8b2612279d
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/dsig/hmac.go
@@ -0,0 +1,45 @@
+package dsig
+
+import (
+ "crypto/hmac"
+ "fmt"
+ "hash"
+)
+
+func toHMACKey(dst *[]byte, key any) error {
+ keyBytes, ok := key.([]byte)
+ if !ok {
+ return fmt.Errorf(`dsig.toHMACKey: invalid key type %T. []byte is required`, key)
+ }
+
+ if len(keyBytes) == 0 {
+ return fmt.Errorf(`dsig.toHMACKey: missing key while signing payload`)
+ }
+
+ *dst = keyBytes
+ return nil
+}
+
+// SignHMAC generates an HMAC signature for the given payload using the specified hash function and key.
+// The raw parameter should be the pre-computed signing input (typically header.payload).
+func SignHMAC(key, payload []byte, hfunc func() hash.Hash) ([]byte, error) {
+ h := hmac.New(hfunc, key)
+ if _, err := h.Write(payload); err != nil {
+ return nil, fmt.Errorf(`failed to write payload using hmac: %w`, err)
+ }
+ return h.Sum(nil), nil
+}
+
+// VerifyHMAC verifies an HMAC signature for the given payload.
+// This function verifies the signature using the specified key and hash function.
+// The payload parameter should be the pre-computed signing input (typically header.payload).
+func VerifyHMAC(key, payload, signature []byte, hfunc func() hash.Hash) error {
+ expected, err := SignHMAC(key, payload, hfunc)
+ if err != nil {
+ return fmt.Errorf("failed to sign payload for verification: %w", err)
+ }
+ if !hmac.Equal(signature, expected) {
+ return NewVerificationError("invalid HMAC signature")
+ }
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/dsig/internal/ecutil/ecutil.go b/vendor/github.com/lestrrat-go/dsig/internal/ecutil/ecutil.go
new file mode 100644
index 0000000000..cf0bd4ac48
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/dsig/internal/ecutil/ecutil.go
@@ -0,0 +1,76 @@
+// Package ecutil defines tools that help with elliptic curve related
+// computation
+package ecutil
+
+import (
+ "crypto/elliptic"
+ "math/big"
+ "sync"
+)
+
+const (
+ // size of buffer that needs to be allocated for EC521 curve
+ ec521BufferSize = 66 // (521 / 8) + 1
+)
+
+var ecpointBufferPool = sync.Pool{
+ New: func() any {
+ // In most cases the curve bit size will be less than this length
+ // so allocate the maximum, and keep reusing
+ buf := make([]byte, 0, ec521BufferSize)
+ return &buf
+ },
+}
+
+func getCrvFixedBuffer(size int) []byte {
+ //nolint:forcetypeassert
+ buf := *(ecpointBufferPool.Get().(*[]byte))
+ if size > ec521BufferSize && cap(buf) < size {
+ buf = append(buf, make([]byte, size-cap(buf))...)
+ }
+ return buf[:size]
+}
+
+// ReleaseECPointBuffer releases the []byte buffer allocated.
+func ReleaseECPointBuffer(buf []byte) {
+ buf = buf[:cap(buf)]
+ buf[0] = 0x0
+ for i := 1; i < len(buf); i *= 2 {
+ copy(buf[i:], buf[:i])
+ }
+ buf = buf[:0]
+ ecpointBufferPool.Put(&buf)
+}
+
+func CalculateKeySize(crv elliptic.Curve) int {
+ // We need to create a buffer that fits the entire curve.
+ // If the curve size is 66, that fits in 9 bytes. If the curve
+ // size is 64, it fits in 8 bytes.
+ bits := crv.Params().BitSize
+
+ // For most common cases we know before hand what the byte length
+ // is going to be. optimize
+ var inBytes int
+ switch bits {
+ case 224, 256, 384: // TODO: use constant?
+ inBytes = bits / 8
+ case 521:
+ inBytes = ec521BufferSize
+ default:
+ inBytes = bits / 8
+ if (bits % 8) != 0 {
+ inBytes++
+ }
+ }
+
+ return inBytes
+}
+
+// AllocECPointBuffer allocates a buffer for the given point in the given
+// curve. This buffer should be released using the ReleaseECPointBuffer
+// function.
+func AllocECPointBuffer(v *big.Int, crv elliptic.Curve) []byte {
+ buf := getCrvFixedBuffer(CalculateKeySize(crv))
+ v.FillBytes(buf)
+ return buf
+}
diff --git a/vendor/github.com/lestrrat-go/dsig/rsa.go b/vendor/github.com/lestrrat-go/dsig/rsa.go
new file mode 100644
index 0000000000..a339fe5b78
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/dsig/rsa.go
@@ -0,0 +1,63 @@
+package dsig
+
+import (
+ "crypto"
+ "crypto/rsa"
+ "fmt"
+ "io"
+)
+
+func rsaGetSignerCryptoSignerKey(key any) (crypto.Signer, bool, error) {
+ if !isValidRSAKey(key) {
+ return nil, false, fmt.Errorf(`invalid key type %T for RSA algorithm`, key)
+ }
+ cs, isCryptoSigner := key.(crypto.Signer)
+ if isCryptoSigner {
+ return cs, true, nil
+ }
+ return nil, false, nil
+}
+
+// rsaPSSOptions returns the PSS options for RSA-PSS signatures with the specified hash.
+// The salt length is set to equal the hash length as per RFC 7518.
+func rsaPSSOptions(h crypto.Hash) rsa.PSSOptions {
+ return rsa.PSSOptions{
+ Hash: h,
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ }
+}
+
+// SignRSA generates an RSA signature for the given payload using the specified private key and options.
+// The raw parameter should be the pre-computed signing input (typically header.payload).
+// If pss is true, RSA-PSS is used; otherwise, PKCS#1 v1.5 is used.
+//
+// The rr parameter is an optional io.Reader that can be used to provide randomness for signing.
+// If rr is nil, it defaults to rand.Reader.
+func SignRSA(key *rsa.PrivateKey, payload []byte, h crypto.Hash, pss bool, rr io.Reader) ([]byte, error) {
+ if !isValidRSAKey(key) {
+ return nil, fmt.Errorf(`invalid key type %T for RSA algorithm`, key)
+ }
+ var opts crypto.SignerOpts = h
+ if pss {
+ rsaopts := rsaPSSOptions(h)
+ opts = &rsaopts
+ }
+ return cryptosign(key, payload, h, opts, rr)
+}
+
+// VerifyRSA verifies an RSA signature for the given payload and header.
+// This function constructs the signing input by encoding the header and payload according to JWS specification,
+// then verifies the signature using the specified public key and hash algorithm.
+// If pss is true, RSA-PSS verification is used; otherwise, PKCS#1 v1.5 verification is used.
+func VerifyRSA(key *rsa.PublicKey, payload, signature []byte, h crypto.Hash, pss bool) error {
+ if !isValidRSAKey(key) {
+ return fmt.Errorf(`invalid key type %T for RSA algorithm`, key)
+ }
+ hasher := h.New()
+ hasher.Write(payload)
+ digest := hasher.Sum(nil)
+ if pss {
+ return rsa.VerifyPSS(key, h, digest, signature, &rsa.PSSOptions{Hash: h, SaltLength: rsa.PSSSaltLengthEqualsHash})
+ }
+ return rsa.VerifyPKCS1v15(key, h, digest, signature)
+}
diff --git a/vendor/github.com/lestrrat-go/dsig/sign.go b/vendor/github.com/lestrrat-go/dsig/sign.go
new file mode 100644
index 0000000000..e2a6bde290
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/dsig/sign.go
@@ -0,0 +1,100 @@
+package dsig
+
+import (
+ "crypto"
+ "crypto/rsa"
+ "fmt"
+ "io"
+)
+
+// Sign generates a digital signature using the specified key and algorithm.
+//
+// This function loads the signer registered in the dsig package _ONLY_.
+// It does not support custom signers that the user might have registered.
+//
+// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader.
+// Not all algorithms require this parameter, but it is included for consistency.
+// 99% of the time, you can pass nil for rr, and it will work fine.
+func Sign(key any, alg string, payload []byte, rr io.Reader) ([]byte, error) {
+ info, ok := GetAlgorithmInfo(alg)
+ if !ok {
+ return nil, fmt.Errorf(`dsig.Sign: unsupported signature algorithm %q`, alg)
+ }
+
+ switch info.Family {
+ case HMAC:
+ return dispatchHMACSign(key, info, payload)
+ case RSA:
+ return dispatchRSASign(key, info, payload, rr)
+ case ECDSA:
+ return dispatchECDSASign(key, info, payload, rr)
+ case EdDSAFamily:
+ return dispatchEdDSASign(key, info, payload, rr)
+ default:
+ return nil, fmt.Errorf(`dsig.Sign: unsupported signature family %q`, info.Family)
+ }
+}
+
+func dispatchHMACSign(key any, info AlgorithmInfo, payload []byte) ([]byte, error) {
+ meta, ok := info.Meta.(HMACFamilyMeta)
+ if !ok {
+ return nil, fmt.Errorf(`dsig.Sign: invalid HMAC metadata`)
+ }
+
+ var hmackey []byte
+ if err := toHMACKey(&hmackey, key); err != nil {
+ return nil, fmt.Errorf(`dsig.Sign: %w`, err)
+ }
+ return SignHMAC(hmackey, payload, meta.HashFunc)
+}
+
+func dispatchRSASign(key any, info AlgorithmInfo, payload []byte, rr io.Reader) ([]byte, error) {
+ meta, ok := info.Meta.(RSAFamilyMeta)
+ if !ok {
+ return nil, fmt.Errorf(`dsig.Sign: invalid RSA metadata`)
+ }
+
+ cs, isCryptoSigner, err := rsaGetSignerCryptoSignerKey(key)
+ if err != nil {
+ return nil, fmt.Errorf(`dsig.Sign: %w`, err)
+ }
+ if isCryptoSigner {
+ var options crypto.SignerOpts = meta.Hash
+ if meta.PSS {
+ rsaopts := rsaPSSOptions(meta.Hash)
+ options = &rsaopts
+ }
+ return SignCryptoSigner(cs, payload, meta.Hash, options, rr)
+ }
+
+ privkey, ok := key.(*rsa.PrivateKey)
+ if !ok {
+ return nil, fmt.Errorf(`dsig.Sign: invalid key type %T. *rsa.PrivateKey is required`, key)
+ }
+ return SignRSA(privkey, payload, meta.Hash, meta.PSS, rr)
+}
+
+func dispatchEdDSASign(key any, _ AlgorithmInfo, payload []byte, rr io.Reader) ([]byte, error) {
+ signer, err := eddsaGetSigner(key)
+ if err != nil {
+ return nil, fmt.Errorf(`dsig.Sign: %w`, err)
+ }
+
+ return SignCryptoSigner(signer, payload, crypto.Hash(0), crypto.Hash(0), rr)
+}
+
+func dispatchECDSASign(key any, info AlgorithmInfo, payload []byte, rr io.Reader) ([]byte, error) {
+ meta, ok := info.Meta.(ECDSAFamilyMeta)
+ if !ok {
+ return nil, fmt.Errorf(`dsig.Sign: invalid ECDSA metadata`)
+ }
+
+ privkey, cs, isCryptoSigner, err := ecdsaGetSignerKey(key)
+ if err != nil {
+ return nil, fmt.Errorf(`dsig.Sign: %w`, err)
+ }
+ if isCryptoSigner {
+ return SignECDSACryptoSigner(cs, payload, meta.Hash, rr)
+ }
+ return SignECDSA(privkey, payload, meta.Hash, rr)
+}
diff --git a/vendor/github.com/lestrrat-go/dsig/validation.go b/vendor/github.com/lestrrat-go/dsig/validation.go
new file mode 100644
index 0000000000..17682d8538
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/dsig/validation.go
@@ -0,0 +1,66 @@
+package dsig
+
+import (
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+)
+
+// isValidRSAKey validates that the provided key type is appropriate for RSA algorithms.
+// It returns false if the key is clearly incompatible (e.g., ECDSA or EdDSA keys).
+func isValidRSAKey(key any) bool {
+ switch key.(type) {
+ case
+ ecdsa.PrivateKey, *ecdsa.PrivateKey,
+ ed25519.PrivateKey:
+ // these are NOT ok for RSA algorithms
+ return false
+ }
+ return true
+}
+
+// isValidECDSAKey validates that the provided key type is appropriate for ECDSA algorithms.
+// It returns false if the key is clearly incompatible (e.g., RSA or EdDSA keys).
+func isValidECDSAKey(key any) bool {
+ switch key.(type) {
+ case
+ ed25519.PrivateKey,
+ rsa.PrivateKey, *rsa.PrivateKey:
+ // these are NOT ok for ECDSA algorithms
+ return false
+ }
+ return true
+}
+
+// isValidEDDSAKey validates that the provided key type is appropriate for EdDSA algorithms.
+// It returns false if the key is clearly incompatible (e.g., RSA or ECDSA keys).
+func isValidEDDSAKey(key any) bool {
+ switch key.(type) {
+ case
+ ecdsa.PrivateKey, *ecdsa.PrivateKey,
+ rsa.PrivateKey, *rsa.PrivateKey:
+ // these are NOT ok for EdDSA algorithms
+ return false
+ }
+ return true
+}
+
+// VerificationError represents an error that occurred during signature verification.
+type VerificationError struct {
+ message string
+}
+
+func (e *VerificationError) Error() string {
+ return e.message
+}
+
+// NewVerificationError creates a new verification error with the given message.
+func NewVerificationError(message string) error {
+ return &VerificationError{message: message}
+}
+
+// IsVerificationError checks if the given error is a verification error.
+func IsVerificationError(err error) bool {
+ _, ok := err.(*VerificationError)
+ return ok
+}
diff --git a/vendor/github.com/lestrrat-go/dsig/verify.go b/vendor/github.com/lestrrat-go/dsig/verify.go
new file mode 100644
index 0000000000..86085b0a37
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/dsig/verify.go
@@ -0,0 +1,134 @@
+package dsig
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "fmt"
+)
+
+// Verify verifies a digital signature using the specified key and algorithm.
+//
+// This function loads the verifier registered in the dsig package _ONLY_.
+// It does not support custom verifiers that the user might have registered.
+func Verify(key any, alg string, payload, signature []byte) error {
+ info, ok := GetAlgorithmInfo(alg)
+ if !ok {
+ return fmt.Errorf(`dsig.Verify: unsupported signature algorithm %q`, alg)
+ }
+
+ switch info.Family {
+ case HMAC:
+ return dispatchHMACVerify(key, info, payload, signature)
+ case RSA:
+ return dispatchRSAVerify(key, info, payload, signature)
+ case ECDSA:
+ return dispatchECDSAVerify(key, info, payload, signature)
+ case EdDSAFamily:
+ return dispatchEdDSAVerify(key, info, payload, signature)
+ default:
+ return fmt.Errorf(`dsig.Verify: unsupported signature family %q`, info.Family)
+ }
+}
+
+func dispatchHMACVerify(key any, info AlgorithmInfo, payload, signature []byte) error {
+ meta, ok := info.Meta.(HMACFamilyMeta)
+ if !ok {
+ return fmt.Errorf(`dsig.Verify: invalid HMAC metadata`)
+ }
+
+ var hmackey []byte
+ if err := toHMACKey(&hmackey, key); err != nil {
+ return fmt.Errorf(`dsig.Verify: %w`, err)
+ }
+ return VerifyHMAC(hmackey, payload, signature, meta.HashFunc)
+}
+
+func dispatchRSAVerify(key any, info AlgorithmInfo, payload, signature []byte) error {
+ meta, ok := info.Meta.(RSAFamilyMeta)
+ if !ok {
+ return fmt.Errorf(`dsig.Verify: invalid RSA metadata`)
+ }
+
+ var pubkey *rsa.PublicKey
+
+ if cs, ok := key.(crypto.Signer); ok {
+ cpub := cs.Public()
+ switch cpub := cpub.(type) {
+ case rsa.PublicKey:
+ pubkey = &cpub
+ case *rsa.PublicKey:
+ pubkey = cpub
+ default:
+ return fmt.Errorf(`dsig.Verify: failed to retrieve rsa.PublicKey out of crypto.Signer %T`, key)
+ }
+ } else {
+ var ok bool
+ pubkey, ok = key.(*rsa.PublicKey)
+ if !ok {
+ return fmt.Errorf(`dsig.Verify: failed to retrieve *rsa.PublicKey out of %T`, key)
+ }
+ }
+
+ return VerifyRSA(pubkey, payload, signature, meta.Hash, meta.PSS)
+}
+
+func dispatchECDSAVerify(key any, info AlgorithmInfo, payload, signature []byte) error {
+ meta, ok := info.Meta.(ECDSAFamilyMeta)
+ if !ok {
+ return fmt.Errorf(`dsig.Verify: invalid ECDSA metadata`)
+ }
+
+ pubkey, cs, isCryptoSigner, err := ecdsaGetVerifierKey(key)
+ if err != nil {
+ return fmt.Errorf(`dsig.Verify: %w`, err)
+ }
+ if isCryptoSigner {
+ return VerifyECDSACryptoSigner(cs, payload, signature, meta.Hash)
+ }
+ return VerifyECDSA(pubkey, payload, signature, meta.Hash)
+}
+
+func dispatchEdDSAVerify(key any, _ AlgorithmInfo, payload, signature []byte) error {
+ var pubkey ed25519.PublicKey
+ signer, ok := key.(crypto.Signer)
+ if ok {
+ v := signer.Public()
+ pubkey, ok = v.(ed25519.PublicKey)
+ if !ok {
+ return fmt.Errorf(`dsig.Verify: expected crypto.Signer.Public() to return ed25519.PublicKey, but got %T`, v)
+ }
+ } else {
+ var ok bool
+ pubkey, ok = key.(ed25519.PublicKey)
+ if !ok {
+ return fmt.Errorf(`dsig.Verify: failed to retrieve ed25519.PublicKey out of %T`, key)
+ }
+ }
+
+ return VerifyEdDSA(pubkey, payload, signature)
+}
+
+func ecdsaGetVerifierKey(key any) (*ecdsa.PublicKey, crypto.Signer, bool, error) {
+ cs, isCryptoSigner := key.(crypto.Signer)
+ if isCryptoSigner {
+ switch key.(type) {
+ case ecdsa.PublicKey, *ecdsa.PublicKey:
+ // if it's ecdsa.PublicKey, it's more efficient to
+ // go through the non-crypto.Signer route. Set isCryptoSigner to false
+ isCryptoSigner = false
+ }
+ }
+
+ if isCryptoSigner {
+ return nil, cs, true, nil
+ }
+
+ pubkey, ok := key.(*ecdsa.PublicKey)
+ if !ok {
+ return nil, nil, false, fmt.Errorf(`invalid key type %T. *ecdsa.PublicKey is required`, key)
+ }
+
+ return pubkey, nil, false, nil
+}
diff --git a/vendor/github.com/lestrrat-go/httprc/v3/.gitignore b/vendor/github.com/lestrrat-go/httprc/v3/.gitignore
new file mode 100644
index 0000000000..66fd13c903
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/v3/.gitignore
@@ -0,0 +1,15 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
diff --git a/vendor/github.com/lestrrat-go/httprc/v3/.golangci.yml b/vendor/github.com/lestrrat-go/httprc/v3/.golangci.yml
new file mode 100644
index 0000000000..b3af8cfe12
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/v3/.golangci.yml
@@ -0,0 +1,95 @@
+version: "2"
+linters:
+ default: all
+ disable:
+ - cyclop
+ - depguard
+ - dupl
+ - errorlint
+ - exhaustive
+ - forbidigo
+ - funcorder
+ - funlen
+ - gochecknoglobals
+ - gochecknoinits
+ - gocognit
+ - gocritic
+ - gocyclo
+ - godot
+ - godox
+ - gosec
+ - gosmopolitan
+ - govet
+ - inamedparam
+ - ireturn
+ - lll
+ - maintidx
+ - makezero
+ - mnd
+ - nakedret
+ - nestif
+ - nlreturn
+ - noinlineerr
+ - nonamedreturns
+ - paralleltest
+ - tagliatelle
+ - testpackage
+ - thelper
+ - varnamelen
+ - wrapcheck
+ - wsl
+ - wsl_v5
+ settings:
+ govet:
+ disable:
+ - shadow
+ - fieldalignment
+ enable-all: true
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ rules:
+ - linters:
+ - staticcheck
+ path: /*.go
+ text: 'ST1003: should not use underscores in package names'
+ - linters:
+ - revive
+ path: /*.go
+ text: don't use an underscore in package name
+ - linters:
+ - contextcheck
+ - exhaustruct
+ path: /*.go
+ - linters:
+ - errcheck
+ path: /main.go
+ - linters:
+ - errcheck
+ - errchkjson
+ - forcetypeassert
+ path: /*_test.go
+ - linters:
+ - forbidigo
+ path: /*_example_test.go
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+issues:
+ max-issues-per-linter: 0
+ max-same-issues: 0
+formatters:
+ enable:
+ - gofmt
+ - goimports
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/vendor/github.com/lestrrat-go/httprc/v3/Changes b/vendor/github.com/lestrrat-go/httprc/v3/Changes
new file mode 100644
index 0000000000..4dc6f9f4b1
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/v3/Changes
@@ -0,0 +1,30 @@
+Changes
+=======
+
+v3.0.1 18 Aug 2025
+* Refresh() no longer requires the resource to be ready.
+
+v3.0.0 5 Jun 2025
+[Breaking Changes]
+ * The entire API has been re-imagined for Go versions that allow typed parameters
+
+v2.0.0 19 Feb 2024
+[Breaking Changes]
+ * `Fetcher` type is no longer available. You probably want to provide
+ a customg HTTP client instead via httprc.WithHTTPClient()).
+ *
+
+v1.0.4 19 Jul 2022
+ * Fix sloppy API breakage
+
+v1.0.3 19 Jul 2022
+ * Fix queue insertion in the middle of the queue (#7)
+
+v1.0.2 13 Jun 2022
+ * Properly release a lock when the fetch fails (#5)
+
+v1.0.1 29 Mar 2022
+ * Bump dependency for github.com/lestrrat-go/httpcc to v1.0.1
+
+v1.0.0 29 Mar 2022
+ * Initial release, refactored out of github.com/lestrrat-go/jwx
diff --git a/vendor/github.com/lestrrat-go/httprc/v3/LICENSE b/vendor/github.com/lestrrat-go/httprc/v3/LICENSE
new file mode 100644
index 0000000000..3e196892ca
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/v3/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2022 lestrrat
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/lestrrat-go/httprc/v3/README.md b/vendor/github.com/lestrrat-go/httprc/v3/README.md
new file mode 100644
index 0000000000..68239669a2
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/v3/README.md
@@ -0,0 +1,172 @@
+# github.com/lestrrat-go/httprc/v3  [](https://pkg.go.dev/github.com/lestrrat-go/httprc/v3)
+
+`httprc` is a HTTP "Refresh" Cache. Its aim is to cache a remote resource that
+can be fetched via HTTP, but keep the cached content up-to-date based on periodic
+refreshing.
+
+# Client
+
+A `httprc.Client` object is comprised of 3 parts: The user-facing controller API,
+the main controller loop, and set of workers that perform the actual fetching.
+
+The user-facing controller API is the object returned when you call `(httprc.Client).Start`.
+
+```go
+ctrl, _ := client.Start(ctx)
+```
+
+# Controller API
+
+The controller API gives you access to the controller backend that runs asynchronously.
+All methods take a `context.Context` object because they potentially block. You should
+be careful to use `context.WithTimeout` to properly set a timeout if you cannot tolerate
+a blocking operation.
+
+# Main Controller Loop
+
+The main controller loop is run asynchronously to the controller API. It is single threaded,
+and it has two reponsibilities.
+
+The first is to receive commands from the controller API,
+and appropriately modify the state of the goroutine, i.e. modify the list of resources
+it is watching, performing forced refreshes, etc.
+
+The other is to periodically wake up and go through the list of resources and re-fetch
+ones that are past their TTL (in reality, each resource carry a "next-check" time, not
+a TTL). The main controller loop itself does nothing more: it just kicks these checks periodically.
+
+The interval between fetches is changed dynamically based on either the metadata carried
+with the HTTP responses, such as `Cache-Control` and `Expires` headers, or a constant
+interval set by the user for a given resource. Between these values, the main controller loop
+will pick the shortest interval (but no less than 1 second) and checks if resources
+need updating based on that value.
+
+For example, if a resource A has an expiry of 10 minutes and if resource has an expiry of 5
+minutes, the main controller loop will attempt to wake up roughly every 5 minutes to check
+on the resources.
+
+When the controller loop detects that a resource needs to be checked for freshness,
+it will send the resource to the worker pool to be synced.
+
+# Interval calculation
+
+After the resource is synced, the next fetch is scheduled. The interval to the next
+fetch is calculated either by using constant intervals, or by heuristics using values
+from the `http.Response` object.
+
+If the constant interval is specified, no extra calculation is performed. If you specify
+a constant interval of 15 minutes, the resource will be checked every 15 minutes. This is
+predictable and reliable, but not necessarily efficient.
+
+If you do not specify a constant interval, the HTTP response is analyzed for
+values in `Cache-Control` and `Expires` headers. These values will be compared against
+a maximum and minimum interval values, which default to 30 days and 15 minutes, respectively.
+If the values obtained from the headers fall within that range, the value from the header is
+used. If the value is larger than the maximum, the maximum is used. If the value is lower
+than the minimum, the minimum is used.
+
+# SYNOPSIS
+
+
+```go
+package httprc_test
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "time"
+
+ "github.com/lestrrat-go/httprc/v3"
+)
+
+func ExampleClient() {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ type HelloWorld struct {
+ Hello string `json:"hello"`
+ }
+
+ srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ json.NewEncoder(w).Encode(map[string]string{"hello": "world"})
+ }))
+
+ options := []httprc.NewClientOption{
+ // By default the client will allow all URLs (which is what the option
+ // below is explicitly specifying). If you want to restrict what URLs
+ // are allowed, you can specify another whitelist.
+ //
+ // httprc.WithWhitelist(httprc.NewInsecureWhitelist()),
+ }
+ // If you would like to handle errors from asynchronous workers, you can specify a error sink.
+ // This is disabled in this example because the trace logs are dynamic
+ // and thus would interfere with the runnable example test.
+ // options = append(options, httprc.WithErrorSink(errsink.NewSlog(slog.New(slog.NewJSONHandler(os.Stdout, nil)))))
+
+ // If you would like to see the trace logs, you can specify a trace sink.
+ // This is disabled in this example because the trace logs are dynamic
+ // and thus would interfere with the runnable example test.
+ // options = append(options, httprc.WithTraceSink(tracesink.NewSlog(slog.New(slog.NewJSONHandler(os.Stdout, nil)))))
+
+ // Create a new client
+ cl := httprc.NewClient(options...)
+
+ // Start the client, and obtain a Controller object
+ ctrl, err := cl.Start(ctx)
+ if err != nil {
+ fmt.Println(err.Error())
+ return
+ }
+ // The following is required if you want to make sure that there are no
+ // dangling goroutines hanging around when you exit. For example, if you
+ // are running tests to check for goroutine leaks, you should call this
+ // function before the end of your test.
+ defer ctrl.Shutdown(time.Second)
+
+ // Create a new resource that is synchronized every so often
+ //
+ // By default the client will attempt to fetch the resource once
+ // as soon as it can, and then if no other metadata is provided,
+ // it will fetch the resource every 15 minutes.
+ //
+ // If the resource responds with a Cache-Control/Expires header,
+ // the client will attempt to respect that, and will try to fetch
+ // the resource again based on the values obatained from the headers.
+ r, err := httprc.NewResource[HelloWorld](srv.URL, httprc.JSONTransformer[HelloWorld]())
+ if err != nil {
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Add the resource to the controller, so that it starts fetching.
+ // By default, a call to `Add()` will block until the first fetch
+ // succeeds, via an implicit call to `r.Ready()`
+ // You can change this behavior if you specify the `WithWaitReady(false)`
+ // option.
+ ctrl.Add(ctx, r)
+
+ // if you specified `httprc.WithWaitReady(false)` option, the fetch will happen
+ // "soon", but you're not guaranteed that it will happen before the next
+ // call to `Lookup()`. If you want to make sure that the resource is ready,
+ // you can call `Ready()` like so:
+ /*
+ {
+ tctx, tcancel := context.WithTimeout(ctx, time.Second)
+ defer tcancel()
+ if err := r.Ready(tctx); err != nil {
+ fmt.Println(err.Error())
+ return
+ }
+ }
+ */
+ m := r.Resource()
+ fmt.Println(m.Hello)
+ // OUTPUT:
+ // world
+}
+```
+source: [client_example_test.go](https://github.com/lestrrat-go/httprc/blob/refs/heads/v3/client_example_test.go)
+
diff --git a/vendor/github.com/lestrrat-go/httprc/v3/backend.go b/vendor/github.com/lestrrat-go/httprc/v3/backend.go
new file mode 100644
index 0000000000..31f9fc07d3
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/v3/backend.go
@@ -0,0 +1,235 @@
+package httprc
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+)
+
+func (c *ctrlBackend) adjustInterval(ctx context.Context, req adjustIntervalRequest) {
+ interval := roundupToSeconds(time.Until(req.resource.Next()))
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: got adjust request (current tick interval=%s, next for %q=%s)", c.tickInterval, req.resource.URL(), interval))
+
+ if interval < time.Second {
+ interval = time.Second
+ }
+
+ if c.tickInterval < interval {
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: no adjusting required (time to next check %s > current tick interval %s)", interval, c.tickInterval))
+ } else {
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: adjusting tick interval to %s", interval))
+ c.tickInterval = interval
+ c.check.Reset(interval)
+ }
+}
+
+func (c *ctrlBackend) addResource(ctx context.Context, req addRequest) {
+ r := req.resource
+ if _, ok := c.items[r.URL()]; ok {
+ // Already exists
+ sendReply(ctx, req.reply, struct{}{}, errResourceAlreadyExists)
+ return
+ }
+ c.items[r.URL()] = r
+
+ if r.MaxInterval() == 0 {
+ r.SetMaxInterval(c.defaultMaxInterval)
+ }
+
+ if r.MinInterval() == 0 {
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: set minimum interval to %s", c.defaultMinInterval))
+ r.SetMinInterval(c.defaultMinInterval)
+ }
+
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: added resource %q", r.URL()))
+ sendReply(ctx, req.reply, struct{}{}, nil)
+ c.SetTickInterval(time.Nanosecond)
+}
+
+func (c *ctrlBackend) rmResource(ctx context.Context, req rmRequest) {
+ u := req.u
+ if _, ok := c.items[u]; !ok {
+ sendReply(ctx, req.reply, struct{}{}, errResourceNotFound)
+ return
+ }
+
+ delete(c.items, u)
+
+ minInterval := oneDay
+ for _, item := range c.items {
+ if d := item.MinInterval(); d < minInterval {
+ minInterval = d
+ }
+ }
+
+ close(req.reply)
+ c.check.Reset(minInterval)
+}
+
+func (c *ctrlBackend) refreshResource(ctx context.Context, req refreshRequest) {
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] START %q", req.u))
+ defer c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] END %q", req.u))
+ u := req.u
+
+ r, ok := c.items[u]
+ if !ok {
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] %s is not registered", req.u))
+ sendReply(ctx, req.reply, struct{}{}, errResourceNotFound)
+ return
+ }
+
+ // Note: We don't wait for r.Ready() here because refresh should work
+ // regardless of whether the resource has been fetched before. This allows
+ // refresh to work with resources registered using WithWaitReady(false).
+
+ r.SetNext(time.Unix(0, 0))
+ sendWorkerSynchronous(ctx, c.syncoutgoing, synchronousRequest{
+ resource: r,
+ reply: req.reply,
+ })
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] sync request for %s sent to worker pool", req.u))
+}
+
+func (c *ctrlBackend) lookupResource(ctx context.Context, req lookupRequest) {
+ u := req.u
+ r, ok := c.items[u]
+ if !ok {
+ sendReply(ctx, req.reply, nil, errResourceNotFound)
+ return
+ }
+ sendReply(ctx, req.reply, r, nil)
+}
+
+func (c *ctrlBackend) handleRequest(ctx context.Context, req any) {
+ switch req := req.(type) {
+ case adjustIntervalRequest:
+ c.adjustInterval(ctx, req)
+ case addRequest:
+ c.addResource(ctx, req)
+ case rmRequest:
+ c.rmResource(ctx, req)
+ case refreshRequest:
+ c.refreshResource(ctx, req)
+ case lookupRequest:
+ c.lookupResource(ctx, req)
+ default:
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: unknown request type %T", req))
+ }
+}
+
+func sendWorker(ctx context.Context, ch chan Resource, r Resource) {
+ r.SetBusy(true)
+ select {
+ case <-ctx.Done():
+ case ch <- r:
+ }
+}
+
+func sendWorkerSynchronous(ctx context.Context, ch chan synchronousRequest, r synchronousRequest) {
+ r.resource.SetBusy(true)
+ select {
+ case <-ctx.Done():
+ case ch <- r:
+ }
+}
+
+func sendReply[T any](ctx context.Context, ch chan backendResponse[T], v T, err error) {
+ defer close(ch)
+ select {
+ case <-ctx.Done():
+ case ch <- backendResponse[T]{payload: v, err: err}:
+ }
+}
+
+type ctrlBackend struct {
+ items map[string]Resource
+ outgoing chan Resource
+ syncoutgoing chan synchronousRequest
+ incoming chan any // incoming requests to the controller
+ traceSink TraceSink
+ tickInterval time.Duration
+ check *time.Ticker
+ defaultMaxInterval time.Duration
+ defaultMinInterval time.Duration
+}
+
+func (c *ctrlBackend) loop(ctx context.Context, readywg, donewg *sync.WaitGroup) {
+ c.traceSink.Put(ctx, "httprc controller: starting main controller loop")
+ readywg.Done()
+ defer c.traceSink.Put(ctx, "httprc controller: stopping main controller loop")
+ defer donewg.Done()
+ for {
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: waiting for request or tick (tick interval=%s)", c.tickInterval))
+ select {
+ case req := <-c.incoming:
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: got request %T", req))
+ c.handleRequest(ctx, req)
+ case t := <-c.check.C:
+ c.periodicCheck(ctx, t)
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+func (c *ctrlBackend) periodicCheck(ctx context.Context, t time.Time) {
+ c.traceSink.Put(ctx, "httprc controller: START periodic check")
+ defer c.traceSink.Put(ctx, "httprc controller: END periodic check")
+ var minNext time.Time
+ var dispatched int
+ minInterval := -1 * time.Second
+ for _, item := range c.items {
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: checking resource %q", item.URL()))
+
+ next := item.Next()
+ if minNext.IsZero() || next.Before(minNext) {
+ minNext = next
+ }
+
+ if interval := item.MinInterval(); minInterval < 0 || interval < minInterval {
+ minInterval = interval
+ }
+
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: resource %q isBusy=%t, next(%s).After(%s)=%t", item.URL(), item.IsBusy(), next, t, next.After(t)))
+ if item.IsBusy() || next.After(t) {
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: resource %q is busy or not ready yet, skipping", item.URL()))
+ continue
+ }
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: resource %q is ready, dispatching to worker pool", item.URL()))
+
+ dispatched++
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: dispatching resource %q to worker pool", item.URL()))
+ sendWorker(ctx, c.outgoing, item)
+ }
+
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: dispatched %d resources", dispatched))
+
+ // Next check is always at the earliest next check + 1 second.
+ // The extra second makes sure that we are _past_ the actual next check time
+ // so we can send the resource to the worker pool
+ if interval := time.Until(minNext); interval > 0 {
+ c.SetTickInterval(roundupToSeconds(interval) + time.Second)
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: resetting check intervanl to %s", c.tickInterval))
+ } else {
+ // if we got here, either we have no resources, or all resources are busy.
+ // In this state, it's possible that the interval is less than 1 second,
+ // because we previously set it to a small value for an immediate refresh.
+ // in this case, we want to reset it to a sane value
+ if c.tickInterval < time.Second {
+ c.SetTickInterval(minInterval)
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: resetting check intervanl to %s after forced refresh", c.tickInterval))
+ }
+ }
+
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: next check in %s", c.tickInterval))
+}
+
+func (c *ctrlBackend) SetTickInterval(d time.Duration) {
+ // TODO synchronize
+ if d <= 0 {
+ d = time.Second // ensure positive interval
+ }
+ c.tickInterval = d
+ c.check.Reset(d)
+}
diff --git a/vendor/github.com/lestrrat-go/httprc/v3/client.go b/vendor/github.com/lestrrat-go/httprc/v3/client.go
new file mode 100644
index 0000000000..75ac3fc188
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/v3/client.go
@@ -0,0 +1,183 @@
+package httprc
+
+import (
+ "context"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/lestrrat-go/httprc/v3/errsink"
+ "github.com/lestrrat-go/httprc/v3/proxysink"
+ "github.com/lestrrat-go/httprc/v3/tracesink"
+)
+
+// setupSink creates and starts a proxy for the given sink if it's not a Nop sink
+// Returns the sink to use and a cancel function that should be chained with the original cancel
+func setupSink[T any, S proxysink.Backend[T], NopType any](ctx context.Context, sink S, wg *sync.WaitGroup) (S, context.CancelFunc) {
+ if _, ok := any(sink).(NopType); ok {
+ return sink, func() {}
+ }
+
+ proxy := proxysink.New[T](sink)
+ wg.Add(1)
+ go func(ctx context.Context, wg *sync.WaitGroup, proxy *proxysink.Proxy[T]) {
+ defer wg.Done()
+ proxy.Run(ctx)
+ }(ctx, wg, proxy)
+
+ // proxy can be converted to one of the sink subtypes
+ s, ok := any(proxy).(S)
+ if !ok {
+ panic("type assertion failed: proxy cannot be converted to type S")
+ }
+ return s, proxy.Close
+}
+
+// Client is the main entry point for the httprc package.
+type Client struct {
+ mu sync.Mutex
+ httpcl HTTPClient
+ numWorkers int
+ running bool
+ errSink ErrorSink
+ traceSink TraceSink
+ wl Whitelist
+ defaultMaxInterval time.Duration
+ defaultMinInterval time.Duration
+}
+
+// NewClient creates a new `httprc.Client` object.
+//
+// By default ALL urls are allowed. This may not be suitable for you if
+// are using this in a production environment. You are encouraged to specify
+// a whitelist using the `WithWhitelist` option.
+func NewClient(options ...NewClientOption) *Client {
+ //nolint:staticcheck
+ var errSink ErrorSink = errsink.NewNop()
+ //nolint:staticcheck
+ var traceSink TraceSink = tracesink.NewNop()
+ var wl Whitelist = InsecureWhitelist{}
+ var httpcl HTTPClient = http.DefaultClient
+
+ defaultMinInterval := DefaultMinInterval
+ defaultMaxInterval := DefaultMaxInterval
+
+ numWorkers := DefaultWorkers
+ //nolint:forcetypeassert
+ for _, option := range options {
+ switch option.Ident() {
+ case identHTTPClient{}:
+ httpcl = option.Value().(HTTPClient)
+ case identWorkers{}:
+ numWorkers = option.Value().(int)
+ case identErrorSink{}:
+ errSink = option.Value().(ErrorSink)
+ case identTraceSink{}:
+ traceSink = option.Value().(TraceSink)
+ case identWhitelist{}:
+ wl = option.Value().(Whitelist)
+ }
+ }
+
+ if numWorkers <= 0 {
+ numWorkers = 1
+ }
+ return &Client{
+ httpcl: httpcl,
+ numWorkers: numWorkers,
+ errSink: errSink,
+ traceSink: traceSink,
+ wl: wl,
+
+ defaultMinInterval: defaultMinInterval,
+ defaultMaxInterval: defaultMaxInterval,
+ }
+}
+
+// Start sets the client into motion. It will start a number of worker goroutines,
+// and return a Controller object that you can use to control the execution of
+// the client.
+//
+// If you attempt to call Start more than once, it will return an error.
+func (c *Client) Start(octx context.Context) (Controller, error) {
+ c.mu.Lock()
+ if c.running {
+ c.mu.Unlock()
+ return nil, errAlreadyRunning
+ }
+ c.running = true
+ c.mu.Unlock()
+
+ // DON'T CANCEL THIS IN THIS METHOD! It's the responsibility of the
+ // controller to cancel this context.
+ ctx, cancel := context.WithCancel(octx)
+
+ var donewg sync.WaitGroup
+
+ // start proxy goroutines that will accept sink requests
+ // and forward them to the appropriate sink
+ errSink, errCancel := setupSink[error, ErrorSink, errsink.Nop](ctx, c.errSink, &donewg)
+ traceSink, traceCancel := setupSink[string, TraceSink, tracesink.Nop](ctx, c.traceSink, &donewg)
+
+ // Chain the cancel functions
+ ocancel := cancel
+ cancel = func() {
+ ocancel()
+ errCancel()
+ traceCancel()
+ }
+
+ chbuf := c.numWorkers + 1
+ incoming := make(chan any, chbuf)
+ outgoing := make(chan Resource, chbuf)
+ syncoutgoing := make(chan synchronousRequest, chbuf)
+
+ var readywg sync.WaitGroup
+ readywg.Add(c.numWorkers)
+ donewg.Add(c.numWorkers)
+ for range c.numWorkers {
+ wrk := worker{
+ incoming: incoming,
+ next: outgoing,
+ nextsync: syncoutgoing,
+ errSink: errSink,
+ traceSink: traceSink,
+ httpcl: c.httpcl,
+ }
+ go wrk.Run(ctx, &readywg, &donewg)
+ }
+
+ tickInterval := oneDay
+ ctrl := &controller{
+ cancel: cancel,
+ incoming: incoming,
+ shutdown: make(chan struct{}),
+ traceSink: traceSink,
+ wl: c.wl,
+ }
+
+ backend := &ctrlBackend{
+ items: make(map[string]Resource),
+ outgoing: outgoing,
+ syncoutgoing: syncoutgoing,
+ incoming: incoming,
+ traceSink: traceSink,
+ tickInterval: tickInterval,
+ check: time.NewTicker(tickInterval),
+
+ defaultMinInterval: c.defaultMinInterval,
+ defaultMaxInterval: c.defaultMaxInterval,
+ }
+ donewg.Add(1)
+ readywg.Add(1)
+ go backend.loop(ctx, &readywg, &donewg)
+
+ go func(wg *sync.WaitGroup, ch chan struct{}) {
+ wg.Wait()
+ close(ch)
+ }(&donewg, ctrl.shutdown)
+
+ readywg.Wait()
+
+ return ctrl, nil
+}
diff --git a/vendor/github.com/lestrrat-go/httprc/v3/controller.go b/vendor/github.com/lestrrat-go/httprc/v3/controller.go
new file mode 100644
index 0000000000..ae2eb218e4
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/v3/controller.go
@@ -0,0 +1,186 @@
+package httprc
+
+import (
+ "context"
+ "fmt"
+ "time"
+)
+
+type Controller interface {
+ // Add adds a new `http.Resource` to the controller. If the resource already exists,
+ // it will return an error.
+ Add(context.Context, Resource, ...AddOption) error
+
+ // Lookup a `httprc.Resource` by its URL. If the resource does not exist, it
+ // will return an error.
+ Lookup(context.Context, string) (Resource, error)
+
+ // Remove a `httprc.Resource` from the controller by its URL. If the resource does
+ // not exist, it will return an error.
+ Remove(context.Context, string) error
+
+ // Refresh forces a resource to be refreshed immediately. If the resource does
+ // not exist, or if the refresh fails, it will return an error.
+ Refresh(context.Context, string) error
+
+ ShutdownContext(context.Context) error
+ Shutdown(time.Duration) error
+}
+
+type controller struct {
+ cancel context.CancelFunc
+ incoming chan any // incoming requests to the controller
+ shutdown chan struct{}
+ traceSink TraceSink
+ wl Whitelist
+}
+
+// Shutdown is a convenience function that calls ShutdownContext with a
+// context that has a timeout of `timeout`.
+func (c *controller) Shutdown(timeout time.Duration) error {
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+ return c.ShutdownContext(ctx)
+}
+
+// ShutdownContext stops the client and all associated goroutines, and waits for them
+// to finish. If the context is canceled, the function will return immediately:
+// there fore you should not use the context you used to start the client (because
+// presumably it's already canceled).
+//
+// Waiting for the client shutdown will also ensure that all sinks are properly
+// flushed.
+func (c *controller) ShutdownContext(ctx context.Context) error {
+ c.cancel()
+
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-c.shutdown:
+ return nil
+ }
+}
+
+type ctrlRequest[T any] struct {
+ reply chan T
+ resource Resource
+ u string
+}
+type addRequest ctrlRequest[backendResponse[struct{}]]
+type rmRequest ctrlRequest[backendResponse[struct{}]]
+type refreshRequest ctrlRequest[backendResponse[struct{}]]
+type lookupRequest ctrlRequest[backendResponse[Resource]]
+type synchronousRequest ctrlRequest[backendResponse[struct{}]]
+type adjustIntervalRequest struct {
+ resource Resource
+}
+
+type backendResponse[T any] struct {
+ payload T
+ err error
+}
+
+func sendBackend[TReq any, TB any](ctx context.Context, backendCh chan any, v TReq, replyCh chan backendResponse[TB]) (TB, error) {
+ select {
+ case <-ctx.Done():
+ case backendCh <- v:
+ }
+
+ select {
+ case <-ctx.Done():
+ var zero TB
+ return zero, ctx.Err()
+ case res := <-replyCh:
+ return res.payload, res.err
+ }
+}
+
+// Lookup returns a resource by its URL. If the resource does not exist, it
+// will return an error.
+//
+// Unfortunately, due to the way typed parameters are handled in Go, we can only
+// return a Resource object (and not a ResourceBase[T] object). This means that
+// you will either need to use the `Resource.Get()` method or use a type
+// assertion to obtain a `ResourceBase[T]` to get to the actual object you are
+// looking for
+func (c *controller) Lookup(ctx context.Context, u string) (Resource, error) {
+ reply := make(chan backendResponse[Resource], 1)
+ req := lookupRequest{
+ reply: reply,
+ u: u,
+ }
+ return sendBackend[lookupRequest, Resource](ctx, c.incoming, req, reply)
+}
+
+// Add adds a new resource to the controller. If the resource already
+// exists, it will return an error.
+//
+// By default this function will automatically wait for the resource to be
+// fetched once (by calling `r.Ready()`). Note that the `r.Ready()` call will NOT
+// timeout unless you configure your context object with `context.WithTimeout`.
+// To disable waiting, you can specify the `WithWaitReady(false)` option.
+func (c *controller) Add(ctx context.Context, r Resource, options ...AddOption) error {
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: START Add(%q)", r.URL()))
+ defer c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: END Add(%q)", r.URL()))
+ waitReady := true
+ //nolint:forcetypeassert
+ for _, option := range options {
+ switch option.Ident() {
+ case identWaitReady{}:
+ waitReady = option.(addOption).Value().(bool)
+ }
+ }
+
+ if !c.wl.IsAllowed(r.URL()) {
+ return fmt.Errorf(`httprc.Controller.AddResource: cannot add %q: %w`, r.URL(), errBlockedByWhitelist)
+ }
+
+ reply := make(chan backendResponse[struct{}], 1)
+ req := addRequest{
+ reply: reply,
+ resource: r,
+ }
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: sending add request for %q to backend", r.URL()))
+ if _, err := sendBackend[addRequest, struct{}](ctx, c.incoming, req, reply); err != nil {
+ return err
+ }
+
+ if waitReady {
+ c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: waiting for resource %q to be ready", r.URL()))
+ if err := r.Ready(ctx); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Remove removes a resource from the controller. If the resource does
+// not exist, it will return an error.
+func (c *controller) Remove(ctx context.Context, u string) error {
+ reply := make(chan backendResponse[struct{}], 1)
+ req := rmRequest{
+ reply: reply,
+ u: u,
+ }
+ if _, err := sendBackend[rmRequest, struct{}](ctx, c.incoming, req, reply); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Refresh forces a resource to be refreshed immediately. If the resource does
+// not exist, or if the refresh fails, it will return an error.
+//
+// This function is synchronous, and will block until the resource has been refreshed.
+func (c *controller) Refresh(ctx context.Context, u string) error {
+ reply := make(chan backendResponse[struct{}], 1)
+ req := refreshRequest{
+ reply: reply,
+ u: u,
+ }
+
+ if _, err := sendBackend[refreshRequest, struct{}](ctx, c.incoming, req, reply); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/httprc/v3/errors.go b/vendor/github.com/lestrrat-go/httprc/v3/errors.go
new file mode 100644
index 0000000000..1152ba947f
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/v3/errors.go
@@ -0,0 +1,57 @@
+package httprc
+
+import "errors"
+
+var errResourceAlreadyExists = errors.New(`resource already exists`)
+
+func ErrResourceAlreadyExists() error {
+ return errResourceAlreadyExists
+}
+
+var errAlreadyRunning = errors.New(`client is already running`)
+
+func ErrAlreadyRunning() error {
+ return errAlreadyRunning
+}
+
+var errResourceNotFound = errors.New(`resource not found`)
+
+func ErrResourceNotFound() error {
+ return errResourceNotFound
+}
+
+var errTransformerRequired = errors.New(`transformer is required`)
+
+func ErrTransformerRequired() error {
+ return errTransformerRequired
+}
+
+var errURLCannotBeEmpty = errors.New(`URL cannot be empty`)
+
+func ErrURLCannotBeEmpty() error {
+ return errURLCannotBeEmpty
+}
+
+var errUnexpectedStatusCode = errors.New(`unexpected status code`)
+
+func ErrUnexpectedStatusCode() error {
+ return errUnexpectedStatusCode
+}
+
+var errTransformerFailed = errors.New(`failed to transform response body`)
+
+func ErrTransformerFailed() error {
+ return errTransformerFailed
+}
+
+var errRecoveredFromPanic = errors.New(`recovered from panic`)
+
+func ErrRecoveredFromPanic() error {
+ return errRecoveredFromPanic
+}
+
+var errBlockedByWhitelist = errors.New(`blocked by whitelist`)
+
+func ErrBlockedByWhitelist() error {
+ return errBlockedByWhitelist
+}
diff --git a/vendor/github.com/lestrrat-go/httprc/v3/errsink/errsink.go b/vendor/github.com/lestrrat-go/httprc/v3/errsink/errsink.go
new file mode 100644
index 0000000000..03d128ffcd
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/v3/errsink/errsink.go
@@ -0,0 +1,59 @@
+package errsink
+
+import (
+ "context"
+ "log/slog"
+)
+
+type Interface interface {
+ Put(context.Context, error)
+}
+
+// Nop is an ErrorSink that does nothing. It does not require
+// any initialization, so the zero value can be used.
+type Nop struct{}
+
+// NewNop returns a new NopErrorSink object. The constructor
+// is provided for consistency.
+func NewNop() Interface {
+ return Nop{}
+}
+
+// Put for NopErrorSink does nothing.
+func (Nop) Put(context.Context, error) {}
+
+type SlogLogger interface {
+ Log(context.Context, slog.Level, string, ...any)
+}
+
+type slogSink struct {
+ logger SlogLogger
+}
+
+// NewSlog returns a new ErrorSink that logs errors using the provided slog.Logger
+func NewSlog(l SlogLogger) Interface {
+ return &slogSink{
+ logger: l,
+ }
+}
+
+func (s *slogSink) Put(ctx context.Context, v error) {
+ s.logger.Log(ctx, slog.LevelError, v.Error())
+}
+
+// FuncSink is an ErrorSink that calls a function with the error.
+type FuncSink struct {
+ fn func(context.Context, error)
+}
+
+// NewFunc returns a new FuncSink that calls the provided function with errors.
+func NewFunc(fn func(context.Context, error)) Interface {
+ return &FuncSink{fn: fn}
+}
+
+// Put calls the function with the error.
+func (f *FuncSink) Put(ctx context.Context, err error) {
+ if f.fn != nil {
+ f.fn(ctx, err)
+ }
+}
diff --git a/vendor/github.com/lestrrat-go/httprc/v3/httprc.go b/vendor/github.com/lestrrat-go/httprc/v3/httprc.go
new file mode 100644
index 0000000000..fc324b771f
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/v3/httprc.go
@@ -0,0 +1,90 @@
+package httprc
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/lestrrat-go/httprc/v3/errsink"
+ "github.com/lestrrat-go/httprc/v3/tracesink"
+)
+
+// Buffer size constants
+const (
+ // ReadBufferSize is the default buffer size for reading HTTP responses (10MB)
+ ReadBufferSize = 1024 * 1024 * 10
+ // MaxBufferSize is the maximum allowed buffer size (1GB)
+ MaxBufferSize = 1024 * 1024 * 1000
+)
+
+// Client worker constants
+const (
+ // DefaultWorkers is the default number of worker goroutines
+ DefaultWorkers = 5
+)
+
+// Interval constants
+const (
+ // DefaultMaxInterval is the default maximum interval between fetches (30 days)
+ DefaultMaxInterval = 24 * time.Hour * 30
+ // DefaultMinInterval is the default minimum interval between fetches (15 minutes)
+ DefaultMinInterval = 15 * time.Minute
+ // oneDay is used internally for time calculations
+ oneDay = 24 * time.Hour
+)
+
+// utility to round up intervals to the nearest second
+func roundupToSeconds(d time.Duration) time.Duration {
+ if diff := d % time.Second; diff > 0 {
+ return d + time.Second - diff
+ }
+ return d
+}
+
+// ErrorSink is an interface that abstracts a sink for errors.
+type ErrorSink = errsink.Interface
+
+type TraceSink = tracesink.Interface
+
+// HTTPClient is an interface that abstracts a "net/http".Client, so that
+// users can provide their own implementation of the HTTP client, if need be.
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// Transformer is used to convert the body of an HTTP response into an appropriate
+// object of type T.
+type Transformer[T any] interface {
+ Transform(context.Context, *http.Response) (T, error)
+}
+
+// TransformFunc is a function type that implements the Transformer interface.
+type TransformFunc[T any] func(context.Context, *http.Response) (T, error)
+
+func (f TransformFunc[T]) Transform(ctx context.Context, res *http.Response) (T, error) {
+ return f(ctx, res)
+}
+
+// Resource is a single resource that can be retrieved via HTTP, and (possibly) transformed
+// into an arbitrary object type.
+//
+// Realistically, there is no need for third-parties to implement this interface. This exists
+// to provide a way to aggregate `httprc.ResourceBase` objects with different specialized types
+// into a single collection.
+//
+// See ResourceBase for details
+type Resource interface { //nolint:interfacebloat
+ Get(any) error
+ Next() time.Time
+ SetNext(time.Time)
+ URL() string
+ Sync(context.Context) error
+ ConstantInterval() time.Duration
+ MaxInterval() time.Duration
+ SetMaxInterval(time.Duration)
+ MinInterval() time.Duration
+ SetMinInterval(time.Duration)
+ IsBusy() bool
+ SetBusy(bool)
+ Ready(context.Context) error
+}
diff --git a/vendor/github.com/lestrrat-go/httprc/v3/options.go b/vendor/github.com/lestrrat-go/httprc/v3/options.go
new file mode 100644
index 0000000000..3f07b5671c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/v3/options.go
@@ -0,0 +1,144 @@
+package httprc
+
+import (
+ "time"
+
+ "github.com/lestrrat-go/option"
+)
+
+type NewClientOption interface {
+ option.Interface
+ newClientOption()
+}
+
+type newClientOption struct {
+ option.Interface
+}
+
+func (newClientOption) newClientOption() {}
+
+type identWorkers struct{}
+
+// WithWorkers specifies the number of concurrent workers to use for the client.
+// If n is less than or equal to 0, the client will use a single worker.
+func WithWorkers(n int) NewClientOption {
+ return newClientOption{option.New(identWorkers{}, n)}
+}
+
+type identErrorSink struct{}
+
+// WithErrorSink specifies the error sink to use for the client.
+// If not specified, the client will use a NopErrorSink.
+func WithErrorSink(sink ErrorSink) NewClientOption {
+ return newClientOption{option.New(identErrorSink{}, sink)}
+}
+
+type identTraceSink struct{}
+
+// WithTraceSink specifies the trace sink to use for the client.
+// If not specified, the client will use a NopTraceSink.
+func WithTraceSink(sink TraceSink) NewClientOption {
+ return newClientOption{option.New(identTraceSink{}, sink)}
+}
+
+type identWhitelist struct{}
+
+// WithWhitelist specifies the whitelist to use for the client.
+// If not specified, the client will use a BlockAllWhitelist.
+func WithWhitelist(wl Whitelist) NewClientOption {
+ return newClientOption{option.New(identWhitelist{}, wl)}
+}
+
+type NewResourceOption interface {
+ option.Interface
+ newResourceOption()
+}
+
+type newResourceOption struct {
+ option.Interface
+}
+
+func (newResourceOption) newResourceOption() {}
+
+type NewClientResourceOption interface {
+ option.Interface
+ newResourceOption()
+ newClientOption()
+}
+
+type newClientResourceOption struct {
+ option.Interface
+}
+
+func (newClientResourceOption) newResourceOption() {}
+func (newClientResourceOption) newClientOption() {}
+
+type identHTTPClient struct{}
+
+// WithHTTPClient specifies the HTTP client to use for the client.
+// If not specified, the client will use http.DefaultClient.
+//
+// This option can be passed to NewClient or NewResource.
+func WithHTTPClient(cl HTTPClient) NewClientResourceOption {
+ return newClientResourceOption{option.New(identHTTPClient{}, cl)}
+}
+
+type identMinimumInterval struct{}
+
+// WithMinInterval specifies the minimum interval between fetches.
+//
+// This option affects the dynamic calculation of the interval between fetches.
+// If the value calculated from the http.Response is less than the this value,
+// the client will use this value instead.
+func WithMinInterval(d time.Duration) NewResourceOption {
+ return newResourceOption{option.New(identMinimumInterval{}, d)}
+}
+
+type identMaximumInterval struct{}
+
+// WithMaxInterval specifies the maximum interval between fetches.
+//
+// This option affects the dynamic calculation of the interval between fetches.
+// If the value calculated from the http.Response is greater than the this value,
+// the client will use this value instead.
+func WithMaxInterval(d time.Duration) NewResourceOption {
+ return newResourceOption{option.New(identMaximumInterval{}, d)}
+}
+
+type identConstantInterval struct{}
+
+// WithConstantInterval specifies the interval between fetches. When you
+// specify this option, the client will fetch the resource at the specified
+// intervals, regardless of the response's Cache-Control or Expires headers.
+//
+// By default this option is disabled.
+func WithConstantInterval(d time.Duration) NewResourceOption {
+ return newResourceOption{option.New(identConstantInterval{}, d)}
+}
+
+type AddOption interface {
+ option.Interface
+ newAddOption()
+}
+
+type addOption struct {
+ option.Interface
+}
+
+func (addOption) newAddOption() {}
+
+type identWaitReady struct{}
+
+// WithWaitReady specifies whether the client should wait for the resource to be
+// ready before returning from the Add method.
+//
+// By default, the client will wait for the resource to be ready before returning.
+// If you specify this option with a value of false, the client will not wait for
+// the resource to be fully registered, which is usually not what you want.
+// This option exists to accommodate for cases where you for some reason want to
+// add a resource to the controller, but want to do something else before
+// you wait for it. Make sure to call `r.Ready()` later on to ensure that
+// the resource is ready before you try to access it.
+func WithWaitReady(b bool) AddOption {
+ return addOption{option.New(identWaitReady{}, b)}
+}
diff --git a/vendor/github.com/lestrrat-go/httprc/v3/proxysink/proxysink.go b/vendor/github.com/lestrrat-go/httprc/v3/proxysink/proxysink.go
new file mode 100644
index 0000000000..f290422d6c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/v3/proxysink/proxysink.go
@@ -0,0 +1,135 @@
+package proxysink
+
+import (
+ "context"
+ "sync"
+)
+
+type Backend[T any] interface {
+ Put(context.Context, T)
+}
+
+// Proxy is used to send values through a channel. This is used to
+// serialize calls to underlying sinks.
+type Proxy[T any] struct {
+ mu *sync.Mutex
+ cancel context.CancelFunc
+ ch chan T
+ cond *sync.Cond
+ pending []T
+ backend Backend[T]
+ closed bool
+}
+
+func New[T any](b Backend[T]) *Proxy[T] {
+ mu := &sync.Mutex{}
+ return &Proxy[T]{
+ ch: make(chan T, 1),
+ mu: mu,
+ cond: sync.NewCond(mu),
+ backend: b,
+ cancel: func() {},
+ }
+}
+
+func (p *Proxy[T]) Run(ctx context.Context) {
+ defer p.cond.Broadcast()
+
+ p.mu.Lock()
+ ctx, cancel := context.WithCancel(ctx)
+ p.cancel = cancel
+ p.mu.Unlock()
+
+ go p.controlloop(ctx)
+ go p.flushloop(ctx)
+
+ <-ctx.Done()
+}
+
+func (p *Proxy[T]) controlloop(ctx context.Context) {
+ defer p.cond.Broadcast()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case r := <-p.ch:
+ p.mu.Lock()
+ p.pending = append(p.pending, r)
+ p.mu.Unlock()
+ }
+ p.cond.Broadcast()
+ }
+}
+
+func (p *Proxy[T]) flushloop(ctx context.Context) {
+ const defaultPendingSize = 10
+ pending := make([]T, defaultPendingSize)
+ for {
+ select {
+ case <-ctx.Done():
+ p.mu.Lock()
+ if len(p.pending) <= 0 {
+ p.mu.Unlock()
+ return
+ }
+ default:
+ }
+
+ p.mu.Lock()
+ for len(p.pending) <= 0 {
+ select {
+ case <-ctx.Done():
+ p.mu.Unlock()
+ return
+ default:
+ p.cond.Wait()
+ }
+ }
+
+ // extract all pending values, and clear the shared slice
+ if cap(pending) < len(p.pending) {
+ pending = make([]T, len(p.pending))
+ } else {
+ pending = pending[:len(p.pending)]
+ }
+ copy(pending, p.pending)
+ if cap(p.pending) > defaultPendingSize {
+ p.pending = make([]T, 0, defaultPendingSize)
+ } else {
+ p.pending = p.pending[:0]
+ }
+ p.mu.Unlock()
+
+ for _, v := range pending {
+ // send to sink serially
+ p.backend.Put(ctx, v)
+ }
+ }
+}
+
+func (p *Proxy[T]) Put(ctx context.Context, v T) {
+ p.mu.Lock()
+ if p.closed {
+ p.mu.Unlock()
+ return
+ }
+ p.mu.Unlock()
+
+ select {
+ case <-ctx.Done():
+ return
+ case p.ch <- v:
+ return
+ }
+}
+
+func (p *Proxy[T]) Close() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ if !p.closed {
+ p.closed = true
+ }
+ p.cancel()
+ p.cond.Broadcast()
+}
diff --git a/vendor/github.com/lestrrat-go/httprc/v3/resource.go b/vendor/github.com/lestrrat-go/httprc/v3/resource.go
new file mode 100644
index 0000000000..e637f791fc
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/v3/resource.go
@@ -0,0 +1,359 @@
+package httprc
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/lestrrat-go/blackmagic"
+ "github.com/lestrrat-go/httpcc"
+ "github.com/lestrrat-go/httprc/v3/tracesink"
+)
+
+// ResourceBase is a generic Resource type
+type ResourceBase[T any] struct {
+ u string
+ ready chan struct{} // closed when the resource is ready (i.e. after first successful fetch)
+ once sync.Once
+ httpcl HTTPClient
+ t Transformer[T]
+ r atomic.Value
+ next atomic.Value
+ interval time.Duration
+ minInterval atomic.Int64
+ maxInterval atomic.Int64
+ busy atomic.Bool
+}
+
+// NewResource creates a new Resource object which after fetching the
+// resource from the URL, will transform the response body using the
+// provided Transformer to an object of type T.
+//
+// This function will return an error if the URL is not a valid URL
+// (i.e. it cannot be parsed by url.Parse), or if the transformer is nil.
+func NewResource[T any](s string, transformer Transformer[T], options ...NewResourceOption) (*ResourceBase[T], error) {
+ var httpcl HTTPClient
+ var interval time.Duration
+ minInterval := DefaultMinInterval
+ maxInterval := DefaultMaxInterval
+ //nolint:forcetypeassert
+ for _, option := range options {
+ switch option.Ident() {
+ case identHTTPClient{}:
+ httpcl = option.Value().(HTTPClient)
+ case identMinimumInterval{}:
+ minInterval = option.Value().(time.Duration)
+ case identMaximumInterval{}:
+ maxInterval = option.Value().(time.Duration)
+ case identConstantInterval{}:
+ interval = option.Value().(time.Duration)
+ }
+ }
+ if transformer == nil {
+ return nil, fmt.Errorf(`httprc.NewResource: %w`, errTransformerRequired)
+ }
+
+ if s == "" {
+ return nil, fmt.Errorf(`httprc.NewResource: %w`, errURLCannotBeEmpty)
+ }
+
+ if _, err := url.Parse(s); err != nil {
+ return nil, fmt.Errorf(`httprc.NewResource: %w`, err)
+ }
+ r := &ResourceBase[T]{
+ u: s,
+ httpcl: httpcl,
+ t: transformer,
+ interval: interval,
+ ready: make(chan struct{}),
+ }
+ if httpcl != nil {
+ r.httpcl = httpcl
+ }
+ r.minInterval.Store(int64(minInterval))
+ r.maxInterval.Store(int64(maxInterval))
+ r.SetNext(time.Unix(0, 0)) // initially, it should be fetched immediately
+ return r, nil
+}
+
+// URL returns the URL of the resource.
+func (r *ResourceBase[T]) URL() string {
+ return r.u
+}
+
+// Ready returns an empty error when the resource is ready. If the context
+// is canceled before the resource is ready, it will return the error from
+// the context.
+func (r *ResourceBase[T]) Ready(ctx context.Context) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-r.ready:
+ return nil
+ }
+}
+
+// Get assigns the value of the resource to the provided pointer.
+// If using the `httprc.ResourceBase[T]` type directly, you can use the `Resource()`
+// method to get the resource directly.
+//
+// This method exists because parametric types cannot be assigned to a single object type
+// that return different return values of the specialized type. i.e. for resources
+// `ResourceBase[A]` and `ResourceBase[B]`, we cannot have a single interface that can
+// be assigned to the same interface type `X` that expects a `Resource()` method that
+// returns `A` or `B` depending on the type of the resource. When accessing the
+// resource through the `httprc.Resource` interface, use this method to obtain the
+// stored value.
+func (r *ResourceBase[T]) Get(dst interface{}) error {
+ return blackmagic.AssignIfCompatible(dst, r.Resource())
+}
+
+// Resource returns the last fetched resource. If the resource has not been
+// fetched yet, this will return the zero value of type T.
+//
+// If you would rather wait until the resource is fetched, you can use the
+// `Ready()` method to wait until the resource is ready (i.e. fetched at least once).
+func (r *ResourceBase[T]) Resource() T {
+ v := r.r.Load()
+ switch v := v.(type) {
+ case T:
+ return v
+ default:
+ var zero T
+ return zero
+ }
+}
+
+func (r *ResourceBase[T]) Next() time.Time {
+ //nolint:forcetypeassert
+ return r.next.Load().(time.Time)
+}
+
+func (r *ResourceBase[T]) SetNext(v time.Time) {
+ r.next.Store(v)
+}
+
+func (r *ResourceBase[T]) ConstantInterval() time.Duration {
+ return r.interval
+}
+
+func (r *ResourceBase[T]) MaxInterval() time.Duration {
+ return time.Duration(r.maxInterval.Load())
+}
+
+func (r *ResourceBase[T]) MinInterval() time.Duration {
+ return time.Duration(r.minInterval.Load())
+}
+
+func (r *ResourceBase[T]) SetMaxInterval(v time.Duration) {
+ r.maxInterval.Store(int64(v))
+}
+
+func (r *ResourceBase[T]) SetMinInterval(v time.Duration) {
+ r.minInterval.Store(int64(v))
+}
+
+func (r *ResourceBase[T]) SetBusy(v bool) {
+ r.busy.Store(v)
+}
+
+func (r *ResourceBase[T]) IsBusy() bool {
+ return r.busy.Load()
+}
+
+// limitedBody is a wrapper around an io.Reader that will only read up to
+// MaxBufferSize bytes. This is provided to prevent the user from accidentally
+// reading a huge response body into memory
+type limitedBody struct {
+ rdr io.Reader
+ close func() error
+}
+
+func (l *limitedBody) Read(p []byte) (n int, err error) {
+ return l.rdr.Read(p)
+}
+
+func (l *limitedBody) Close() error {
+ return l.close()
+}
+
+type traceSinkKey struct{}
+
+func withTraceSink(ctx context.Context, sink TraceSink) context.Context {
+ return context.WithValue(ctx, traceSinkKey{}, sink)
+}
+
+func traceSinkFromContext(ctx context.Context) TraceSink {
+ if v := ctx.Value(traceSinkKey{}); v != nil {
+ //nolint:forcetypeassert
+ return v.(TraceSink)
+ }
+ return tracesink.Nop{}
+}
+
+type httpClientKey struct{}
+
+func withHTTPClient(ctx context.Context, cl HTTPClient) context.Context {
+ return context.WithValue(ctx, httpClientKey{}, cl)
+}
+
+func httpClientFromContext(ctx context.Context) HTTPClient {
+ if v := ctx.Value(httpClientKey{}); v != nil {
+ //nolint:forcetypeassert
+ return v.(HTTPClient)
+ }
+ return http.DefaultClient
+}
+
+func (r *ResourceBase[T]) Sync(ctx context.Context) error {
+ traceSink := traceSinkFromContext(ctx)
+ httpcl := r.httpcl
+ if httpcl == nil {
+ httpcl = httpClientFromContext(ctx)
+ }
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, r.u, nil)
+ if err != nil {
+ return fmt.Errorf(`httprc.Resource.Sync: failed to create request: %w`, err)
+ }
+
+ traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: fetching %q", r.u))
+ res, err := httpcl.Do(req)
+ if err != nil {
+ return fmt.Errorf(`httprc.Resource.Sync: failed to execute HTTP request: %w`, err)
+ }
+ defer res.Body.Close()
+
+ next := r.calculateNextRefreshTime(ctx, res)
+ traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: next refresh time for %q is %v", r.u, next))
+ r.SetNext(next)
+
+ if res.StatusCode != http.StatusOK {
+ return fmt.Errorf(`httprc.Resource.Sync: %w (status code=%d, url=%q)`, errUnexpectedStatusCode, res.StatusCode, r.u)
+ }
+
+ // replace the body of the response with a limited reader that
+ // will only read up to MaxBufferSize bytes
+ res.Body = &limitedBody{
+ rdr: &io.LimitedReader{R: res.Body, N: MaxBufferSize},
+ close: res.Body.Close,
+ }
+ traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: transforming %q", r.u))
+ v, err := r.transform(ctx, res)
+ if err != nil {
+ return fmt.Errorf(`httprc.Resource.Sync: %w: %w`, errTransformerFailed, err)
+ }
+
+ traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: storing new value for %q", r.u))
+ r.r.Store(v)
+ r.once.Do(func() { close(r.ready) })
+ traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: stored value for %q", r.u))
+ return nil
+}
+
+func (r *ResourceBase[T]) transform(ctx context.Context, res *http.Response) (ret T, gerr error) {
+ // Protect the call to Transform with a defer/recover block, so that even
+ // if the Transform method panics, we can recover from it and return an error
+ defer func() {
+ if recovered := recover(); recovered != nil {
+ gerr = fmt.Errorf(`httprc.Resource.transform: %w: %v`, errRecoveredFromPanic, recovered)
+ }
+ }()
+ return r.t.Transform(ctx, res)
+}
+
+func (r *ResourceBase[T]) determineNextFetchInterval(ctx context.Context, name string, fromHeader, minValue, maxValue time.Duration) time.Duration {
+ traceSink := traceSinkFromContext(ctx)
+
+ if fromHeader > maxValue {
+ traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s %s > maximum interval, using maximum interval %s", r.URL(), name, maxValue))
+ return maxValue
+ }
+
+ if fromHeader < minValue {
+ traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s %s < minimum interval, using minimum interval %s", r.URL(), name, minValue))
+ return minValue
+ }
+
+ traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s Using %s (%s)", r.URL(), name, fromHeader))
+ return fromHeader
+}
+
+func (r *ResourceBase[T]) calculateNextRefreshTime(ctx context.Context, res *http.Response) time.Time {
+ traceSink := traceSinkFromContext(ctx)
+ now := time.Now()
+
+ // If constant interval is set, use that regardless of what the
+ // response headers say.
+ if interval := r.ConstantInterval(); interval > 0 {
+ traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s Explicit interval set, using value %s", r.URL(), interval))
+ return now.Add(interval)
+ }
+
+ if interval := r.extractCacheControlMaxAge(ctx, res); interval > 0 {
+ return now.Add(interval)
+ }
+
+ if interval := r.extractExpiresInterval(ctx, res); interval > 0 {
+ return now.Add(interval)
+ }
+
+ traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s No cache-control/expires headers found, using minimum interval", r.URL()))
+ return now.Add(r.MinInterval())
+}
+
+func (r *ResourceBase[T]) extractCacheControlMaxAge(ctx context.Context, res *http.Response) time.Duration {
+ traceSink := traceSinkFromContext(ctx)
+
+ v := res.Header.Get(`Cache-Control`)
+ if v == "" {
+ return 0
+ }
+
+ dir, err := httpcc.ParseResponse(v)
+ if err != nil {
+ return 0
+ }
+
+ maxAge, ok := dir.MaxAge()
+ if !ok {
+ return 0
+ }
+
+ traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s Cache-Control=max-age directive set (%d)", r.URL(), maxAge))
+ return r.determineNextFetchInterval(
+ ctx,
+ "max-age",
+ time.Duration(maxAge)*time.Second,
+ r.MinInterval(),
+ r.MaxInterval(),
+ )
+}
+
+func (r *ResourceBase[T]) extractExpiresInterval(ctx context.Context, res *http.Response) time.Duration {
+ traceSink := traceSinkFromContext(ctx)
+
+ v := res.Header.Get(`Expires`)
+ if v == "" {
+ return 0
+ }
+
+ expires, err := http.ParseTime(v)
+ if err != nil {
+ return 0
+ }
+
+ traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s Expires header set (%s)", r.URL(), expires))
+ return r.determineNextFetchInterval(
+ ctx,
+ "expires",
+ time.Until(expires),
+ r.MinInterval(),
+ r.MaxInterval(),
+ )
+}
diff --git a/vendor/github.com/lestrrat-go/httprc/v3/tracesink/tracesink.go b/vendor/github.com/lestrrat-go/httprc/v3/tracesink/tracesink.go
new file mode 100644
index 0000000000..b8400a94ae
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/v3/tracesink/tracesink.go
@@ -0,0 +1,52 @@
+package tracesink
+
+import (
+ "context"
+ "log/slog"
+)
+
+type Interface interface {
+ Put(context.Context, string)
+}
+
+// Nop is an ErrorSink that does nothing. It does not require
+// any initialization, so the zero value can be used.
+type Nop struct{}
+
+// NewNop returns a new NopTraceSink object. The constructor
+// is provided for consistency.
+func NewNop() Interface {
+ return Nop{}
+}
+
+// Put for NopTraceSink does nothing.
+func (Nop) Put(context.Context, string) {}
+
+type slogSink struct {
+ level slog.Level
+ logger SlogLogger
+}
+
+type SlogLogger interface {
+ Log(context.Context, slog.Level, string, ...any)
+}
+
+// NewSlog returns a new ErrorSink that logs errors using the provided slog.Logger
+func NewSlog(l SlogLogger) Interface {
+ return &slogSink{
+ level: slog.LevelInfo,
+ logger: l,
+ }
+}
+
+func (s *slogSink) Put(ctx context.Context, v string) {
+ s.logger.Log(ctx, s.level, v)
+}
+
+// Func is a TraceSink that calls a function with the trace message.
+type Func func(context.Context, string)
+
+// Put calls the function with the trace message.
+func (f Func) Put(ctx context.Context, msg string) {
+ f(ctx, msg)
+}
diff --git a/vendor/github.com/lestrrat-go/httprc/v3/transformer.go b/vendor/github.com/lestrrat-go/httprc/v3/transformer.go
new file mode 100644
index 0000000000..2bd0635a2c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/v3/transformer.go
@@ -0,0 +1,37 @@
+package httprc
+
+import (
+ "context"
+ "encoding/json"
+ "io"
+ "net/http"
+)
+
+type bytesTransformer struct{}
+
+// BytesTransformer returns a Transformer that reads the entire response body
+// as a byte slice. This is the default Transformer used by httprc.Client
+func BytesTransformer() Transformer[[]byte] {
+ return bytesTransformer{}
+}
+
+func (bytesTransformer) Transform(_ context.Context, res *http.Response) ([]byte, error) {
+ return io.ReadAll(res.Body)
+}
+
+type jsonTransformer[T any] struct{}
+
+// JSONTransformer returns a Transformer that decodes the response body as JSON
+// into the provided type T.
+func JSONTransformer[T any]() Transformer[T] {
+ return jsonTransformer[T]{}
+}
+
+func (jsonTransformer[T]) Transform(_ context.Context, res *http.Response) (T, error) {
+ var v T
+ if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
+ var zero T
+ return zero, err
+ }
+ return v, nil
+}
diff --git a/vendor/github.com/lestrrat-go/httprc/v3/whitelist.go b/vendor/github.com/lestrrat-go/httprc/v3/whitelist.go
new file mode 100644
index 0000000000..74ef2a1be6
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/v3/whitelist.go
@@ -0,0 +1,113 @@
+package httprc
+
+import (
+ "regexp"
+ "sync"
+)
+
+// Whitelist is an interface that allows you to determine if a given URL is allowed
+// or not. Implementations of this interface can be used to restrict the URLs that
+// the client can access.
+//
+// By default all URLs are allowed, but this may not be ideal in production environments
+// for security reasons.
+//
+// This exists because you might use this module to store resources provided by
+// user of your application, in which case you cannot necessarily trust that the
+// URLs are safe.
+//
+// You will HAVE to provide some sort of whitelist.
+type Whitelist interface {
+ IsAllowed(string) bool
+}
+
+// WhitelistFunc is a function type that implements the Whitelist interface.
+type WhitelistFunc func(string) bool
+
+func (f WhitelistFunc) IsAllowed(u string) bool { return f(u) }
+
+// BlockAllWhitelist is a Whitelist implementation that blocks all URLs.
+type BlockAllWhitelist struct{}
+
+// NewBlockAllWhitelist creates a new BlockAllWhitelist instance. It is safe to
+// use the zero value of this type; this constructor is provided for consistency.
+func NewBlockAllWhitelist() BlockAllWhitelist { return BlockAllWhitelist{} }
+
+func (BlockAllWhitelist) IsAllowed(_ string) bool { return false }
+
+// InsecureWhitelist is a Whitelist implementation that allows all URLs. Be careful
+// when using this in your production code: make sure you do not blindly register
+// URLs from untrusted sources.
+type InsecureWhitelist struct{}
+
+// NewInsecureWhitelist creates a new InsecureWhitelist instance. It is safe to
+// use the zero value of this type; this constructor is provided for consistency.
+func NewInsecureWhitelist() InsecureWhitelist { return InsecureWhitelist{} }
+
+func (InsecureWhitelist) IsAllowed(_ string) bool { return true }
+
+// RegexpWhitelist is a jwk.Whitelist object comprised of a list of *regexp.Regexp
+// objects. All entries in the list are tried until one matches. If none of the
+// *regexp.Regexp objects match, then the URL is deemed unallowed.
+type RegexpWhitelist struct {
+ mu sync.RWMutex
+ patterns []*regexp.Regexp
+}
+
+// NewRegexpWhitelist creates a new RegexpWhitelist instance. It is safe to use the
+// zero value of this type; this constructor is provided for consistency.
+func NewRegexpWhitelist() *RegexpWhitelist {
+ return &RegexpWhitelist{}
+}
+
+// Add adds a new regular expression to the list of expressions to match against.
+func (w *RegexpWhitelist) Add(pat *regexp.Regexp) *RegexpWhitelist {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ w.patterns = append(w.patterns, pat)
+ return w
+}
+
+// IsAllowed returns true if any of the patterns in the whitelist
+// returns true.
+func (w *RegexpWhitelist) IsAllowed(u string) bool {
+ w.mu.RLock()
+ patterns := w.patterns
+ w.mu.RUnlock()
+ for _, pat := range patterns {
+ if pat.MatchString(u) {
+ return true
+ }
+ }
+ return false
+}
+
+// MapWhitelist is a jwk.Whitelist object comprised of a map of strings.
+// If the URL exists in the map, then the URL is allowed to be fetched.
+type MapWhitelist interface {
+ Whitelist
+ Add(string) MapWhitelist
+}
+
+type mapWhitelist struct {
+ mu sync.RWMutex
+ store map[string]struct{}
+}
+
+func NewMapWhitelist() MapWhitelist {
+ return &mapWhitelist{store: make(map[string]struct{})}
+}
+
+func (w *mapWhitelist) Add(pat string) MapWhitelist {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ w.store[pat] = struct{}{}
+ return w
+}
+
+func (w *mapWhitelist) IsAllowed(u string) bool {
+ w.mu.RLock()
+ _, b := w.store[u]
+ w.mu.RUnlock()
+ return b
+}
diff --git a/vendor/github.com/lestrrat-go/httprc/v3/worker.go b/vendor/github.com/lestrrat-go/httprc/v3/worker.go
new file mode 100644
index 0000000000..d11477dadc
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/v3/worker.go
@@ -0,0 +1,62 @@
+package httprc
+
+import (
+ "context"
+ "fmt"
+ "sync"
+)
+
+type worker struct {
+ httpcl HTTPClient
+ incoming chan any
+ next <-chan Resource
+ nextsync <-chan synchronousRequest
+ errSink ErrorSink
+ traceSink TraceSink
+}
+
+func (w worker) Run(ctx context.Context, readywg *sync.WaitGroup, donewg *sync.WaitGroup) {
+ w.traceSink.Put(ctx, "httprc worker: START worker loop")
+ defer w.traceSink.Put(ctx, "httprc worker: END worker loop")
+ defer donewg.Done()
+ ctx = withTraceSink(ctx, w.traceSink)
+ ctx = withHTTPClient(ctx, w.httpcl)
+
+ readywg.Done()
+ for {
+ select {
+ case <-ctx.Done():
+ w.traceSink.Put(ctx, "httprc worker: stopping worker loop")
+ return
+ case r := <-w.next:
+ w.traceSink.Put(ctx, fmt.Sprintf("httprc worker: syncing %q (async)", r.URL()))
+ if err := r.Sync(ctx); err != nil {
+ w.errSink.Put(ctx, err)
+ }
+ r.SetBusy(false)
+
+ w.sendAdjustIntervalRequest(ctx, r)
+ case sr := <-w.nextsync:
+ w.traceSink.Put(ctx, fmt.Sprintf("httprc worker: syncing %q (synchronous)", sr.resource.URL()))
+ if err := sr.resource.Sync(ctx); err != nil {
+ w.traceSink.Put(ctx, fmt.Sprintf("httprc worker: FAILED to sync %q (synchronous): %s", sr.resource.URL(), err))
+ sendReply(ctx, sr.reply, struct{}{}, err)
+ sr.resource.SetBusy(false)
+ return
+ }
+ w.traceSink.Put(ctx, fmt.Sprintf("httprc worker: SUCCESS syncing %q (synchronous)", sr.resource.URL()))
+ sr.resource.SetBusy(false)
+ sendReply(ctx, sr.reply, struct{}{}, nil)
+ w.sendAdjustIntervalRequest(ctx, sr.resource)
+ }
+ }
+}
+
+func (w worker) sendAdjustIntervalRequest(ctx context.Context, r Resource) {
+ w.traceSink.Put(ctx, "httprc worker: Sending interval adjustment request for "+r.URL())
+ select {
+ case <-ctx.Done():
+ case w.incoming <- adjustIntervalRequest{resource: r}:
+ }
+ w.traceSink.Put(ctx, "httprc worker: Sent interval adjustment request for "+r.URL())
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/.bazelignore b/vendor/github.com/lestrrat-go/jwx/v3/.bazelignore
new file mode 100644
index 0000000000..50347e8777
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/.bazelignore
@@ -0,0 +1,4 @@
+cmd
+bench
+examples
+tools
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/.bazelrc b/vendor/github.com/lestrrat-go/jwx/v3/.bazelrc
new file mode 100644
index 0000000000..b47648db7b
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/.bazelrc
@@ -0,0 +1 @@
+import %workspace%/.aspect/bazelrc/bazel7.bazelrc
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/.bazelversion b/vendor/github.com/lestrrat-go/jwx/v3/.bazelversion
new file mode 100644
index 0000000000..56b6be4ebb
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/.bazelversion
@@ -0,0 +1 @@
+8.3.1
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/.gitignore b/vendor/github.com/lestrrat-go/jwx/v3/.gitignore
new file mode 100644
index 0000000000..c4c0ebff32
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/.gitignore
@@ -0,0 +1,39 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
+# IDE
+.idea
+.vscode
+.DS_Store
+*~
+
+coverage.out
+
+# I redirect my test output to files named "out" way too often
+out
+
+cmd/jwx/jwx
+
+bazel-*
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/.golangci.yml b/vendor/github.com/lestrrat-go/jwx/v3/.golangci.yml
new file mode 100644
index 0000000000..214a9edaa8
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/.golangci.yml
@@ -0,0 +1,125 @@
+version: "2"
+linters:
+ default: all
+ disable:
+ - cyclop
+ - depguard
+ - dupl
+ - err113
+ - errorlint
+ - exhaustive
+ - funcorder
+ - funlen
+ - gochecknoglobals
+ - gochecknoinits
+ - gocognit
+ - gocritic
+ - gocyclo
+ - godot
+ - godox
+ - gosec
+ - gosmopolitan
+ - govet
+ - inamedparam
+ - ireturn
+ - lll
+ - maintidx
+ - makezero
+ - mnd
+ - nakedret
+ - nestif
+ - nlreturn
+ - noinlineerr
+ - nonamedreturns
+ - paralleltest
+ - perfsprint
+ - staticcheck
+ - recvcheck
+ - tagliatelle
+ - testifylint
+ - testpackage
+ - thelper
+ - varnamelen
+ - wrapcheck
+ - wsl
+ - wsl_v5
+ settings:
+ govet:
+ disable:
+ - shadow
+ - fieldalignment
+ enable-all: true
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ rules:
+ - linters:
+ - staticcheck
+ path: /*.go
+ text: 'ST1003: should not use underscores in package names'
+ - linters:
+ - revive
+ path: /*.go
+ text: don't use an underscore in package name
+ - linters:
+ - staticcheck
+ text: SA1019
+ - linters:
+ - contextcheck
+ - exhaustruct
+ path: /*.go
+ - linters:
+ - errcheck
+ path: /main.go
+ - linters:
+ - errcheck
+ path: internal/codegen/codegen.go
+ - linters:
+ - errcheck
+ - errchkjson
+ - forcetypeassert
+ path: internal/jwxtest/jwxtest.go
+ - linters:
+ - errcheck
+ - errchkjson
+ - forcetypeassert
+ path: /*_test.go
+ - linters:
+ - forbidigo
+ path: /*_example_test.go
+ - linters:
+ - forbidigo
+ path: cmd/jwx/jwx.go
+ - linters:
+ - revive
+ path: /*_test.go
+ text: 'var-naming: '
+ - linters:
+ - revive
+ path: internal/tokens/jwe_tokens.go
+ text: "don't use ALL_CAPS in Go names"
+ - linters:
+ - revive
+ path: jwt/internal/types/
+ text: "var-naming: avoid meaningless package names"
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+issues:
+ max-issues-per-linter: 0
+ max-same-issues: 0
+formatters:
+ enable:
+ - gofmt
+ - goimports
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/BUILD b/vendor/github.com/lestrrat-go/jwx/v3/BUILD
new file mode 100644
index 0000000000..2759408882
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/BUILD
@@ -0,0 +1,47 @@
+load("@rules_go//go:def.bzl", "go_library", "go_test")
+load("@gazelle//:def.bzl", "gazelle")
+
+# gazelle:prefix github.com/lestrrat-go/jwx/v3
+# gazelle:go_naming_convention import_alias
+
+gazelle(name = "gazelle")
+
+go_library(
+ name = "jwx",
+ srcs = [
+ "format.go",
+ "formatkind_string_gen.go",
+ "jwx.go",
+ "options.go",
+ ],
+ importpath = "github.com/lestrrat-go/jwx/v3",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//internal/json",
+ "//internal/tokens",
+ "@com_github_lestrrat_go_option_v2//:option",
+ ],
+)
+
+go_test(
+ name = "jwx_test",
+ srcs = ["jwx_test.go"],
+ deps = [
+ ":jwx",
+ "//internal/jose",
+ "//internal/json",
+ "//internal/jwxtest",
+ "//jwa",
+ "//jwe",
+ "//jwk",
+ "//jwk/ecdsa",
+ "//jws",
+ "@com_github_stretchr_testify//require",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":jwx",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/Changes b/vendor/github.com/lestrrat-go/jwx/v3/Changes
new file mode 100644
index 0000000000..29910bf35c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/Changes
@@ -0,0 +1,222 @@
+Changes
+=======
+
+v3 has many incompatibilities with v2. To see the full list of differences between
+v2 and v3, please read the Changes-v3.md file (https://github.com/lestrrat-go/jwx/blob/develop/v3/Changes-v3.md)
+
+v3.0.11 14 Sep 2025
+ * [jwk] Add `(jwk.Cache).Shutdown()` method that delegates to the httprc controller
+ object, to shutdown the cache.
+ * [jwk] Change timing of `res.Body.Close()` call
+ * [jwe] Previously, ecdh.PrivateKey/ecdh.PublicKey were not properly handled
+ when used for encryption, which has been fixed.
+ * [jws/jwsbb] (EXPERIMENTAL/BREAKS COMPATIBILITY) Convert most functions into
+ thin wrappers around functions from github.com/lestrrat-go/dsig package.
+ As a related change, HAMCHashFuncFor/RSAHashFuncFor/ECDSAHashFuncFor/RSAPSSOptions
+ have been removed or unexported.
+ Users of this module should be using jwsbb.Sign() and jwsbb.Verify() instead of
+ algorithm specific jwsbb.SignRSA()/jwsbb.VerifyRSA() and such. If you feel the
+ need to use these functions, you should use github.com/lestrrat-go/dsig directly.
+
+v3.0.10 04 Aug 2025
+ * [jws/jwsbb] Add `jwsbb.ErrHeaderNotFound()` to return the same error type as when
+ a non-existent header is requested. via `HeaderGetXXX()` functions. Previously, this
+ function was called `jwsbb.ErrFieldNotFound()`, but it was a misnomer.
+ * [jws/jwsbb] Fix a bug where error return values from `HeaderGetXXX()` functions
+ could not be matched against `jwsbb.ErrHeaderNotFound()` using `errors.Is()`.
+
+v3.0.9 31 Jul 2025
+ * [jws/jwsbb] `HeaderGetXXX()` functions now return errors when
+ the requested header is not found, or if the value cannot be
+ converted to the requested type.
+
+ * [jwt] `(jwt.Token).Get` methods now return specific types of errors depending
+ on if a) the specified claim was not present, or b) the specified claim could
+ not be assigned to the destination variable.
+
+ You can distinguish these by using `errors.Is` against `jwt.ClaimNotFoundError()`
+ or `jwt.ClaimAssignmentFailedError()`
+
+v3.0.8 27 Jun 2025
+ * [jwe/jwebb] (EXPERIMENTAL) Add low-level functions for JWE operations.
+ * [jws/jwsbb] (EXPERIMENTAL/BREAKS COMPATIBILITY) Add io.Reader parameter
+ so your choice of source of randomness can be passed. Defaults to crypto/rand.Reader.
+ Function signatures around jwsbb.Sign() now accept an addition `rr io.Reader`,
+ which can be nil for 99% of use cases.
+ * [jws/jwsbb] Add HeaderParse([]byte), where it is expected that the header
+ is already in its base64 decoded format.
+ * misc: replace `interface{}` with `any`
+
+v3.0.7 16 Jun 2025
+ * [jws/jwsbb] (EXPERIMENTAL) Add low-level fast access to JWS headers in compact
+ serialization form.
+ * [jws] Fix error reporting when no key matched for a signature.
+ * [jws] Refactor jws signer setup.
+ * Known algorithms are now implemented completely in the jws/jwsbb package.
+ * VerifierFor and SignerFor now always succeed, and will also return a Signer2
+ or Verifier2 that wraps the legacy Signer or Verifier if one is registered.
+
+v3.0.6 13 Jun 2025
+ * This release contains various performance improvements all over the code.
+ No, this time for real. In particular, the most common case for signing
+ a JWT with a key is approx 70% more efficient based on the number of allocations.
+
+ Please read the entry for the (retracted) v3.0.4 for what else I have to
+ say about performance improvements
+
+ * [jwt] Added fast-path for token signing and verification. The fast path
+ is triggered if you only pass `jwt.Sign()` and `jwt.Parse()` one options each
+ (`jwt.WithKey()`), with no suboptions.
+
+ * [jws] Major refactoring around basic operations:
+
+ * How to work with Signer/Verifier have completely changed. Please take
+ a look at examples/jws_custom_signer_verifier_example_test.go for how
+ to do it the new way. The old way still works, but it WILL be removed
+ when v4 arrives.
+ * Related to the above, old code has been moved to `jws/legacy`.
+
+ * A new package `jws/jwsbb` has been added. `bb` stands for building blocks.
+ This package separates out the low-level JWS operations into its own
+ package. So if you are looking for just the signing of a payload with
+ a key, this is it.
+
+ `jws/jwsbb` is currently considered to be EXPERIMENTAL.
+
+v3.0.5 11 Jun 2025
+ * Retract v3.0.4
+ * Code for v3.0.3 is the same as v3.0.3
+
+v3.0.4 09 Jun 2025
+ * This release contains various performance improvements all over the code.
+
+ Because of the direction that this library is taking, we have always been
+ more focused on correctness and usability/flexibility over performance.
+
+ It just so happens that I had a moment of inspiration and decided to see
+ just how good our AI-based coding agents are in this sort of analysis-heavy tasks.
+
+ Long story short, the AI was fairly good at identifying suspicious code with
+ an okay accuracy, but completely failed to make any meaningful changes to the
+ code in a way that both did not break the code _and_ improved performance.
+ I am sure that they will get better in the near future, but for now,
+ I had to do the changes myself. I should clarify to their defence that
+ the AI was very helpful in writing cumbersome benchmark code for me.
+
+ The end result is that we have anywhere from 10 to 30% performance improvements
+ in various parts of the code that we touched, based on number of allocations.
+ We believe that this would be a significant improvement for many users.
+
+ For further improvements, we can see that there would be a clear benefit to
+ writing optimized code path that is designed to serve the most common cases.
+ For example, for the case of signing JWTs with a single key, we could provide
+ a path that skips a lot of extra processing (we kind of did that in this change,
+ but we _could_ go ever harder in this direction). However, it is a trade-off between
+ maintainability and performance, and as I am currently the sole maintainer of
+ this library for the time being, I only plan to pursue such a route where it
+ requires minimal effort on my part.
+
+ If you are interested in helping out in this area, I hereby thank you in advance.
+ However, please be perfectly clear that unlike other types of changes, for performance
+ related changes, the balance between the performance gains and maintainability is
+ top priority. If you have good ideas and code, they will always be welcome, but
+ please be prepared to justify your changes.
+
+ Finally, thank you for using this library!
+
+v3.0.3 06 Jun 2025
+ * Update some dependencies
+ * [jwe] Change some error messages to contain more context information
+
+v3.0.2 03 Jun 2025
+ * [transform] (EXPERIMENTAL) Add utility function `transform.AsMap` to convert a
+ Mappable object to a map[string]interface{}. This is useful for converting
+ objects such as `jws.Header`, `jwk.Key`, `jwt.Token`, etc. to a map that can
+ be used with other libraries that expect a map.
+ * [jwt] (EXPERIMENTAL) Added token filtering functionality through the TokenFilter interface.
+ * [jwt/openid] (EXPERIMENTAL) Added StandardClaimsFilter() for filtering standard OpenID claims.
+ * [jws] (EXPERIMENTAL) Added header filtering functionality through the HeaderFilter interface.
+ * [jwe] (EXPERIMENTAL) Added header filtering functionality through the HeaderFilter interface.
+ * [jwk] (EXPERIMENTAL) Added key filtering functionality through the KeyFilter interface.
+ * [jwk] `jwk.Export` previously did not recognize third-party objects that implemented `jwk.Key`,
+ as it was detecting what to do by checking if the object was one of our own unexported
+ types. This caused some problems for consumers of this library that wanted to extend the
+ features of the keys.
+
+ Now `jwk.Export` checks types against interface types such as `jwk.RSAPrivateKey`, `jwk.ECDSAPrivateKey`, etc.
+ It also uses some reflect blackmagic to detect if the given object implements the `jwk.Key` interface
+ via embedding, so you should be able to embed a `jwk.Key` to another object to act as if it
+ is a legitimate `jwk.Key`, as far as `jwk.Export` is concerned.
+
+v3.0.1 29 Apr 2025
+ * [jwe] Fixed a long standing bug that could lead to degraded encryption or failure to
+ decrypt JWE messages when a very specific combination of inputs were used for
+ JWE operations.
+
+ This problem only manifested itself when the following conditions in content encryption or decryption
+ were met:
+ - Content encryption was specified to use DIRECT mode.
+ - Contentn encryption algorithm is specified as A256CBC_HS512
+ - The key was erronously constructed with a 32-byte content encryption key (CEK)
+
+ In this case, the user would be passing a mis-constructed key of 32-bytes instead
+ of the intended 64-bytes. In all other cases, this construction would cause
+ an error because `crypto/aes.NewCipher` would return an error when a key with length
+ not matching 16, 24, and 32 bytes is used. However, due to use using a the provided
+ 32-bytes as half CEK and half the hash, the `crypto/aes.NewCipher` was passed
+ a 16-byte key, which is fine for AES-128. So internally `crypto/aes.NewCipher` would
+ choose to use AES-128 instead of AES-256, and happily continue. Note that no other
+ key lengths such as 48 and 128 would have worked. It had to be exactly 32.
+
+ This does indeed result in a downgraded encryption, but we believe it is unlikely that this would cause a problem in the real world,
+ as you would have to very specifically choose to use DIRECT mode, choose
+ the specific content encryption algorithm, AND also use the wrong key size of
+ exactly 32 bytes.
+
+ However, in abandunce of caution, we recommend that you upgrade to v3.0.1 or later,
+ or v2.1.6 or later if you are still on v2 series.
+
+ * [jws] Improve performance of jws.SplitCompact and jws.SplitCompactString
+ * [jwe] Improve performance of jwe.Parse
+
+v3.0.0 1 Apr 2025
+ * Release initial v3.0.0 series. Code is identical to v3.0.0-beta2, except
+ for minor documentation changes.
+
+ Please note that v1 will no longer be maintained.
+
+ Going forward v2 will receive security updates but will no longer receive
+ feature updates. Users are encouraged to migrate to v3. There is no hard-set
+ guarantee as to how long v2 will be supported, but if/when v4 comes out,
+ v2 support will be terminated then.
+
+v3.0.0-beta2 30 Mar 2025
+ * [jwk] Fix a bug where `jwk.Set`'s `Keys()` method did not return the proper
+ non-standard fields. (#1322)
+ * [jws][jwt] Implement `WithBase64Encoder()` options to pass base64 encoders
+ to use during signing/verifying signatures. This useful when the token
+ provider generates JWTs that don't follow the specification and uses base64
+ encoding other than raw url encoding (no padding), such as, apparently,
+ AWS ALB. (#1324, #1328)
+
+v3.0.0-beta1 15 Mar 2025
+ * [jwt] Token validation no longer truncates time based fields by default.
+ To restore old behavior, you can either change the global settings by
+ calling `jwt.Settings(jwt.WithTruncation(time.Second))`, or you can
+ change it by each invocation by using `jwt.Validate(..., jwt.WithTruncation(time.Second))`
+
+v3.0.0-alpha3 13 Mar 2025
+ * [jwk] Importing/Exporting from jwk.Key with P256/P386/P521 curves to
+ ecdh.PrivateKey/ecdh.PublicKey should now work. Previously these keys were not properly
+ recognized by the exporter/importer. Note that keys that use X25519 and P256/P384/P521
+ behave differently: X25519 keys can only be exported to/imported from OKP keys,
+ while P256/P384/P521 can be exported to either ecdsa or ecdh keys.
+
+v3.0.0-alpha2 25 Feb 2025
+ * Update to work with go1.24
+ * Update tests to work with latest latchset/jose
+ * Fix build pipeline to work with latest golangci-lint
+ * Require go1.23
+
+v3.0.0-alpha1 01 Nov 2024
+ * Initial release of v3 line.
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/Changes-v2.md b/vendor/github.com/lestrrat-go/jwx/v3/Changes-v2.md
new file mode 100644
index 0000000000..af146ed33a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/Changes-v2.md
@@ -0,0 +1,390 @@
+# Incompatible Changes from v1 to v2
+
+These are changes that are incompatible with the v1.x.x version.
+
+* [tl;dr](#tldr) - If you don't feel like reading the details -- but you will read the details, right?
+* [Detailed List of Changes](#detailed-list-of-changes) - A comprehensive list of changes from v1 to v2
+
+# tl;dr
+
+## JWT
+
+```go
+// most basic
+jwt.Parse(serialized, jwt.WithKey(alg, key)) // NOTE: verification and validation are ENABLED by default!
+jwt.Sign(token, jwt.WithKey(alg,key))
+
+// with a jwk.Set
+jwt.Parse(serialized, jwt.WithKeySet(set))
+
+// UseDefault/InferAlgorithm with JWKS
+jwt.Parse(serialized, jwt.WithKeySet(set,
+ jws.WithUseDefault(true), jws.WithInferAlgorithm(true))
+
+// Use `jku`
+jwt.Parse(serialized, jwt.WithVerifyAuto(...))
+
+// Any other custom key provisioning (using functions in this
+// example, but can be anything that fulfills jws.KeyProvider)
+jwt.Parse(serialized, jwt.WithKeyProvider(jws.KeyProviderFunc(...)))
+```
+
+## JWK
+
+```go
+// jwk.New() was confusing. Renamed to fit the actual implementation
+key, err := jwk.FromRaw(rawKey)
+
+// Algorithm() now returns jwa.KeyAlgorithm type. `jws.Sign()`
+// and other function that receive JWK algorithm names accept
+// this new type, so you can use the same key and do the following
+// (previously you needed to type assert)
+jws.Sign(payload, jws.WithKey(key.Algorithm(), key))
+
+// If you need the specific type, type assert
+key.Algorithm().(jwa.SignatureAlgorithm)
+
+// jwk.AutoRefresh is no more. Use jwk.Cache
+cache := jwk.NewCache(ctx, options...)
+
+// Certificate chains are no longer jwk.CertificateChain type, but
+// *(github.com/lestrrat-go/jwx/cert).Chain
+cc := key.X509CertChain() // this is *cert.Chain now
+```
+
+## JWS
+
+```go
+// basic
+jws.Sign(payload, jws.WithKey(alg, key))
+jws.Sign(payload, jws.WithKey(alg, key), jws.WithKey(alg, key), jws.WithJSON(true))
+jws.Verify(signed, jws.WithKey(alg, key))
+
+// other ways to pass the key
+jws.Sign(payload, jws.WithKeySet(jwks))
+jws.Sign(payload, jws.WithKeyProvider(kp))
+
+// retrieve the key that succeeded in verifying
+var keyUsed interface{}
+jws.Verify(signed, jws.WithKeySet(jwks), jws.WithKeyUsed(&keyUsed))
+```
+
+## JWE
+
+```go
+// basic
+jwe.Encrypt(payload, jwe.WithKey(alg, key)) // other defaults are inferred
+jwe.Encrypt(payload, jwe.WithKey(alg, key), jwe.WithKey(alg, key), jwe.WithJSON(true))
+jwe.Decrypt(encrypted, jwe.WithKey(alg, key))
+
+// other ways to pass the key
+jwe.Encrypt(payload, jwe.WithKeySet(jwks))
+jwe.Encrypt(payload, jwe.WithKeyProvider(kp))
+
+// retrieve the key that succeeded in decrypting
+var keyUsed interface{}
+jwe.Verify(signed, jwe.WithKeySet(jwks), jwe.WithKeyUsed(&keyUsed))
+```
+
+# Detailed List of Changes
+
+## Module
+
+* Module now requires go 1.16
+
+* Use of github.com/pkg/errors is no more. If you were relying on behavior
+ that depends on the errors being an instance of github.com/pkg/errors
+ then you need to change your code
+
+* File-generation tools have been moved out of internal/ directories.
+ These files pre-dates Go modules, and they were in internal/ in order
+ to avoid being listed in the `go doc` -- however, now that we can
+ make them separate modules this is no longer necessary.
+
+* New package `cert` has been added to handle `x5c` certificate
+ chains, and to work with certificates
+ * cert.Chain to store base64 encoded ASN.1 DER format certificates
+ * cert.EncodeBase64 to encode ASN.1 DER format certificate using base64
+ * cert.Create to create a base64 encoded ASN.1 DER format certificates
+ * cert.Parse to parse base64 encoded ASN.1 DER format certificates
+
+## JWE
+
+* `jwe.Compact()`'s signature has changed to
+ `jwe.Compact(*jwe.Message, ...jwe.CompactOption)`
+
+* `jwe.JSON()` has been removed. You can generate JSON serialization
+ using `jwe.Encrypt(jwe.WitJSON())` or `json.Marshal(jwe.Message)`
+
+* `(jwe.Message).Decrypt()` has been removed. Since formatting of the
+ original serialized message matters (including whitespace), using a parsed
+ object was inherently confusing.
+
+* `jwe.Encrypt()` can now generate JWE messages in either compact or JSON
+ forms. By default, the compact form is used. JSON format can be
+ enabled by using the `jwe.WithJSON` option.
+
+* `jwe.Encrypt()` can now accept multiple keys by passing multiple
+ `jwe.WithKey()` options. This can be used with `jwe.WithJSON` to
+ create JWE messages with multiple recipients.
+
+* `jwe.DecryptEncryptOption()` has been renamed to `jwe.EncryptDecryptOption()`.
+ This is so that it is more uniform with `jws` equivalent of `jws.SignVerifyOption()`
+ where the producer (`Sign`) comes before the consumer (`Verify`) in the naming
+
+* `jwe.WithCompact` and `jwe.WithJSON` options have been added
+ to control the serialization format.
+
+* jwe.Decrypt()'s method signature has been changed to `jwt.Decrypt([]byte, ...jwe.DecryptOption) ([]byte, error)`.
+ These options can be stacked. Therefore, you could configure the
+ verification process to attempt a static key pair, a JWKS, and only
+ try other forms if the first two fails, for example.
+
+ - For static key pair, use `jwe.WithKey()`
+ - For static JWKS, use `jwe.WithKeySet()` (NOTE: InferAlgorithmFromKey like in `jws` package is NOT supported)
+ - For custom, possibly dynamic key provisioning, use `jwe.WithKeyProvider()`
+
+* jwe.Decrypter has been unexported. Users did not need this.
+
+* jwe.WithKeyProvider() has been added to specify arbitrary
+ code to specify which keys to try.
+
+* jwe.KeyProvider interface has been added
+
+* jwe.KeyProviderFunc has been added
+
+* `WithPostParser()` has been removed. You can achieve the same effect
+ by using `jwe.WithKeyProvider()`. Because this was the only consumer for
+ `jwe.DecryptCtx`, this type has been removed as well.
+
+* `x5c` field type has been changed to `*cert.Chain` instead of `[]string`
+
+* Method signature for `jwe.Parse()` has been changed to include options,
+ but options are currently not used
+
+* `jwe.ReadFile` now supports the option `jwe.WithFS` which allows you to
+ read data from arbitrary `fs.FS` objects
+
+* jwe.WithKeyUsed has been added to allow users to retrieve
+ the key used for decryption. This is useful in cases you provided
+ multiple keys and you want to know which one was successful
+
+## JWK
+
+* `jwk.New()` has been renamed to `jwk.FromRaw()`, which hopefully will
+ make it easier for the users what the input should be.
+
+* `jwk.Set` has many interface changes:
+ * Changed methods to match jwk.Key and its semantics:
+ * Field is now Get() (returns values for arbitrary fields other than keys). Fetching a key is done via Key()
+ * Remove() now removes arbitrary fields, not keys. to remove keys, use RemoveKey()
+ * Iterate has been added to iterate through all non-key fields.
+ * Add is now AddKey(Key) string, and returns an error when the same key is added
+ * Get is now Key(int) (Key, bool)
+ * Remove is now RemoveKey(Key) error
+ * Iterate is now Keys(context.Context) KeyIterator
+ * Clear is now Clear() error
+
+* `jwk.CachedSet` has been added. You can create a `jwk.Set` that is backed by
+ `jwk.Cache` so you can do this:
+
+```go
+cache := jkw.NewCache(ctx)
+cachedSet := jwk.NewCachedSet(cache, jwksURI)
+
+// cachedSet is always the refreshed, cached version from jwk.Cache
+jws.Verify(signed, jws.WithKeySet(cachedSet))
+```
+
+* `jwk.NewRSAPRivateKey()`, `jwk.NewECDSAPrivateKey()`, etc have been removed.
+ There is no longer any way to create concrete types of `jwk.Key`
+
+* `jwk.Key` type no longer supports direct unmarshaling via `json.Unmarshal()`,
+ because you can no longer instantiate concrete `jwk.Key` types. You will need to
+ use `jwk.ParseKey()`. See the documentation for ways to parse JWKs.
+
+* `(jwk.Key).Algorithm()` is now of `jwk.KeyAlgorithm` type. This field used
+ to be `string` and therefore could not be passed directly to `jwt.Sign()`
+ `jws.Sign()`, `jwe.Encrypt()`, et al. This is no longer the case, and
+ now you can pass it directly. See
+ https://github.com/lestrrat-go/jwx/blob/v2/docs/99-faq.md#why-is-jwkkeyalgorithm-and-jwakeyalgorithm-so-confusing
+ for more details
+
+* `jwk.Fetcher` and `jwk.FetchFunc` has been added.
+ They represent something that can fetch a `jwk.Set`
+
+* `jwk.CertificateChain` has been removed, use `*cert.Chain`
+* `x5c` field type has been changed to `*cert.Chain` instead of `[]*x509.Certificate`
+
+* `jwk.ReadFile` now supports the option `jwk.WithFS` which allows you to
+ read data from arbitrary `fs.FS` objects
+
+* Added `jwk.PostFetcher`, `jwk.PostFetchFunc`, and `jwk.WithPostFetch` to
+ allow users to get at the `jwk.Set` that was fetched in `jwk.Cache`.
+ This will make it possible for users to supply extra information and edit
+ `jwk.Set` after it has been fetched and parsed, but before it is cached.
+ You could, for example, modify the `alg` field so that it's easier to
+ work with when you use it in `jws.Verify` later.
+
+* Reworked `jwk.AutoRefresh` in terms of `github.com/lestrrat-go/httprc`
+ and renamed it `jwk.Cache`.
+
+ Major difference between `jwk.AutoRefresh` and `jwk.Cache` is that while
+ former used one `time.Timer` per resource, the latter uses a static timer
+ (based on `jwk.WithRefreshWindow()` value, default 15 minutes) that periodically
+ refreshes all resources that were due to be refreshed within that time frame.
+
+ This method may cause your updates to happen slightly later, but uses significantly
+ less resources and is less prone to clogging.
+
+* Reimplemented `jwk.Fetch` in terms of `github.com/lestrrat-go/httprc`.
+
+* Previously `jwk.Fetch` and `jwk.AutoRefresh` respected backoff options,
+ but this has been removed. This is to avoid unwanted clogging of the fetch workers
+ which is the default processing mode in `github.com/lestrrat-go/httprc`.
+
+ If you are using backoffs, you need to control your inputs more carefully so as
+ not to clog your fetch queue, and therefore you should be writing custom code that
+ suits your needs
+
+## JWS
+
+* `jws.Sign()` can now generate JWS messages in either compact or JSON
+ forms. By default, the compact form is used. JSON format can be
+ enabled by using the `jws.WithJSON` option.
+
+* `jws.Sign()` can now accept multiple keys by passing multiple
+ `jws.WithKey()` options. This can be used with `jws.WithJSON` to
+ create JWS messages with multiple signatures.
+
+* `jws.WithCompact` and `jws.WithJSON` options have been added
+ to control the serialization format.
+
+* jws.Verify()'s method signature has been changed to `jwt.Verify([]byte, ...jws.VerifyOption) ([]byte, error)`.
+ These options can be stacked. Therefore, you could configure the
+ verification process to attempt a static key pair, a JWKS, and only
+ try other forms if the first two fails, for example.
+
+ - For static key pair, use `jws.WithKey()`
+ - For static JWKS, use `jws.WithKeySet()`
+ - For enabling verification using `jku`, use `jws.WithVerifyAuto()`
+ - For custom, possibly dynamic key provisioning, use `jws.WithKeyProvider()`
+
+* jws.WithVerify() has been removed.
+
+* jws.WithKey() has been added to specify an algorithm + key to
+ verify the payload with.
+
+* jws.WithKeySet() has been added to specify a JWKS to be used for
+ verification. By default `kid` AND `alg` must match between the signature
+ and the key.
+
+ The option can take further suboptions:
+
+```go
+jws.Parse(serialized,
+ jws.WithKeySet(set,
+ // by default `kid` is required. set false to disable.
+ jws.WithRequireKid(false),
+ // optionally skip matching kid if there's exactly one key in set
+ jws.WithUseDefault(true),
+ // infer algorithm name from key type
+ jws.WithInferAlgorithm(true),
+ ),
+)
+```
+
+* `jws.VerifuAuto` has been removed in favor of using
+ `jws.WithVerifyAuto` option with `jws.Verify()`
+
+* `jws.WithVerifyAuto` has been added to enable verification
+ using `jku`.
+
+ The first argument must be a jwk.Fetcher object, but can be
+ set to `nil` to use the default implementation which is `jwk.Fetch`
+
+ The rest of the arguments are treated as options passed to the
+ `(jwk.Fetcher).Fetch()` function.
+
+* Remove `jws.WithPayloadSigner()`. This should be completely replaceable
+ using `jws.WithKey()`
+
+* jws.WithKeyProvider() has been added to specify arbitrary
+ code to specify which keys to try.
+
+* jws.KeyProvider interface has been added
+
+* jws.KeyProviderFunc has been added
+
+* jws.WithKeyUsed has been added to allow users to retrieve
+ the key used for verification. This is useful in cases you provided
+ multiple keys and you want to know which one was successful
+
+* `x5c` field type has been changed to `*cert.Chain` instead of `[]string`
+
+* `jws.ReadFile` now supports the option `jws.WithFS` which allows you to
+ read data from arbitrary `fs.FS` objects
+
+## JWT
+
+* `jwt.Parse` now verifies the signature and validates the token
+ by default. You must disable it explicitly using `jwt.WithValidate(false)`
+ and/or `jwt.WithVerify(false)` if you only want to parse the JWT message.
+
+ If you don't want either, a convenience function `jwt.ParseInsecure`
+ has been added.
+
+* `jwt.Parse` can only parse raw JWT (JSON) or JWS (JSON or Compact).
+ It no longer accepts JWE messages.
+
+* `jwt.WithDecrypt` has been removed
+
+* `jwt.WithJweHeaders` has been removed
+
+* `jwt.WithVerify()` has been renamed to `jwt.WithKey()`. The option can
+ be used for signing, encryption, and parsing.
+
+* `jwt.Validator` has been changed to return `jwt.ValidationError`.
+ If you provide a custom validator, you should wrap the error with
+ `jwt.NewValidationError()`
+
+* `jwt.UseDefault()` has been removed. You should use `jws.WithUseDefault()`
+ as a suboption in the `jwt.WithKeySet()` option.
+
+```go
+jwt.Parse(serialized, jwt.WithKeySet(set, jws.WithUseDefault(true)))
+```
+
+* `jwt.InferAlgorithmFromKey()` has been removed. You should use
+ `jws.WithInferAlgorithmFromKey()` as a suboption in the `jwt.WithKeySet()` option.
+
+```go
+jwt.Parse(serialized, jwt.WithKeySet(set, jws.WithInferAlgorithmFromKey(true)))
+```
+
+* jwt.WithKeySetProvider has been removed. Use `jwt.WithKeyProvider()`
+ instead. If jwt.WithKeyProvider seems a bit complicated, use a combination of
+ JWS parse, no-verify/validate JWT parse, and an extra JWS verify:
+
+```go
+msg, _ := jws.Parse(signed)
+token, _ := jwt.Parse(msg.Payload(), jwt.WithVerify(false), jwt.WithValidate(false))
+// Get information out of token, for example, `iss`
+switch token.Issuer() {
+case ...:
+ jws.Verify(signed, jwt.WithKey(...))
+}
+```
+
+* `jwt.WithHeaders` and `jwt.WithJwsHeaders` have been removed.
+ You should be able to use the new `jwt.WithKey` option to pass headers
+
+* `jwt.WithSignOption` and `jwt.WithEncryptOption` have been added as
+ escape hatches for options that are declared in `jws` and `jwe` packages
+ but not in `jwt`
+
+* `jwt.ReadFile` now supports the option `jwt.WithFS` which allows you to
+ read data from arbitrary `fs.FS` objects
+
+* `jwt.Sign()` has been changed so that it works more like the new `jws.Sign()`
+
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/Changes-v3.md b/vendor/github.com/lestrrat-go/jwx/v3/Changes-v3.md
new file mode 100644
index 0000000000..c2fa8747b9
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/Changes-v3.md
@@ -0,0 +1,140 @@
+# Incompatible Changes from v2 to v3
+
+These are changes that are incompatible with the v2.x.x version.
+
+# Detailed list of changes
+
+## Module
+
+* This module now requires Go 1.23
+
+* All `xxx.Get()` methods have been changed from `Get(string) (interface{}, error)` to
+ `Get(string, interface{}) error`, where the second argument should be a pointer
+ to the storage destination of the field.
+
+* All convenience accessors (e.g. `(jwt.Token).Subject`) now return `(T, bool)` instead of
+ `T`. If you want an accessor that returns a single value, consider using `Get()`
+
+* Most major errors can now be differentiated using `errors.Is`
+
+## JWA
+
+* All string constants have been renamed to equivalent functions that return a struct.
+ You should rewrite `jwa.RS256` as `jwa.RS256()` and so forth.
+
+* By default, only known algorithm names are accepted. For example, in our JWK tests,
+ there are tests that deal with "ECMR" algorithm, but this will now fail by default.
+ If you want this algorithm to succeed parsing, you need to call `jwa.RegisterXXXX`
+ functions before using them.
+
+* Previously, unmarshaling unquoted strings used to work (e.g. `var s = "RS256"`),
+ but now they must conform to the JSON standard and be quoted (e.g. `var s = strconv.Quote("RS256")`)
+
+## JWT
+
+* All convenience accessors (e.g. `Subject`) now return `(T, bool)` instead of
+ just `T`. If you want a single return value accessor, use `Get(dst) error` instead.
+
+* Validation used to work for `iat`, `nbf`, `exp` fields where these fields were
+ set to the explicit time.Time{} zero value, but now the _presence_ of these fields matter.
+
+* Validation of fields related to time used to be truncated to one second accuracy,
+ but no longer does so. To restore old behavior, you can either change the global settings by
+ calling `jwt.Settings(jwt.WithTruncation(time.Second))`, or you can
+ change it by each invocation by using `jwt.Validate(..., jwt.WithTruncation(time.Second))`
+
+* Error names have been renamed. For example `jwt.ErrInvalidJWT` has been renamed to
+ `jwt.UnknownPayloadTypeError` to better reflect what the error means. For other errors,
+ `func ErrXXXX()` have generally been renamed to `func XXXError()`
+
+* Validation errors are now wrapped. While `Validate()` returns a `ValidateError()` type,
+ it can also be matched against more specific error types such as `TokenExpierdError()`
+ using `errors.Is`
+
+* `jwt.ErrMissingRequiredClaim` has been removed
+
+## JWS
+
+* Iterators have been completely removed.
+* As a side effect of removing iterators, some methods such as `Copy()` lost the
+ `context.Context` argument
+
+* All convenience accessors (e.g. `Algorithm`) now return `(T, bool)` instead of
+ just `T`. If you want a single return value accessor, use `Get(dst) error` instead.
+
+* Errors from `jws.Sign` and `jws.Verify`, as well as `jws.Parse` (and friends)
+ can now be differentiated by using `errors.Is`. All `jws.IsXXXXError` functions
+ have been removed.
+
+## JWE
+
+* Iterators have been completely removed.
+* As a side effect of removing iterators, some methods such as `Copy()` lost the
+ `context.Context` argument
+
+* All convenience accessors (e.g. `Algorithm`) now return `(T, bool)` instead of
+ just `T`. If you want a single return value accessor, use `Get(dst) error` instead.
+
+* Errors from `jwe.Decrypt` and `jwe.Encrypt`, as well as `jwe.Parse` (and friends)
+ can now be differentiated by using `errors.Is`. All `jwe.IsXXXXrror` functions
+ have been removed.
+
+## JWK
+
+* All convenience accessors (e.g. `Algorithm`, `Crv`) now return `(T, bool)` instead
+ of just `T`, except `KeyType`, which _always_ returns a valid value. If you want a
+ single return value accessor, use `Get(dst) error` instead.
+
+* `jwk.KeyUsageType` can now be configured so that it's possible to assign values
+ other than "sig" and "enc" via `jwk.RegisterKeyUsage()`. Furthermore, strict
+ checks can be turned on/off against these registered values
+
+* `jwk.Cache` has been completely re-worked based on github.com/lestrrat-go/httprc/v3.
+ In particular, the default whitelist mode has changed from "block everything" to
+ "allow everything".
+
+* Experimental secp256k1 encoding/decoding for PEM encoded ASN.1 DER Format
+ has been removed. Instead, `jwk.PEMDecoder` and `jwk.PEMEncoder` have been
+ added to support those who want to perform non-standard PEM encoding/decoding
+
+* Iterators have been completely removed.
+
+* `jwk/x25519` has been removed. To use X25519 keys, use `(crypto/ecdh).PrivateKey` and
+ `(crypto/ecdh).PublicKey`. Similarly, internals have been reworked to use `crypto/ecdh`
+
+* Parsing has completely been reworked. It is now possible to add your own `jwk.KeyParser`
+ to generate a custom `jwk.Key` that this library may not natively support. Also see
+ `jwk.RegisterKeyParser()`
+
+* `jwk.KeyProbe` has been added to aid probing the JSON message. This is used to
+ guess the type of key described in the JSON message before deciding which concrete
+ type to instantiate, and aids implementing your own `jwk.KeyParser`. Also see
+ `jwk.RegisterKeyProbe()`
+
+* Conversion between raw keys and `jwk.Key` can be customized using `jwk.KeyImporter` and `jwk.KeyExporter`.
+ Also see `jwk.RegisterKeyImporter()` and `jwk.RegisterKeyExporter()`
+
+* Added `jwk/ecdsa` to keep track of which curves are available for ECDSA keys.
+
+* `(jwk.Key).Raw()` has been deprecated. Use `jwk.Export()` instead to convert `jwk.Key`
+ objects into their "raw" versions (e.g. `*rsa.PrivateKey`, `*ecdsa.PrivateKey`, etc).
+ This is to allow third parties to register custom key types that this library does not
+ natively support: Whereas a method must be bound to an object, and thus does not necessarily
+ have a way to hook into a global settings (i.e. custom exporter/importer) for arbitrary
+ key types, if the entrypoint is a function it's much easier and cleaner to for third-parties
+ to take advantage and hook into the mechanisms.
+
+* `jwk.FromRaw()` has been derepcated. Use `jwk.Import()` instead to convert "raw"
+ keys (e.g. `*rsa.PrivateKEy`, `*Ecdsa.PrivateKey`, etc) int `jwk.Key`s.
+
+* `(jwk.Key).FromRaw()` has been deprecated. The method `(jwk.Key).Import()` still exist for
+ built-in types, but it is no longer part of any public API (`interface{}`).
+
+* `jwk.Fetch` is marked as a simple wrapper around `net/http` and `jwk.Parse`.
+
+* `jwk.SetGlobalFetcher` has been deprecated.
+
+* `jwk.Fetcher` has been clearly marked as something that has limited
+ usage for `jws.WithVerifyAuto`
+
+* `jwk.Key` with P256/P386/P521 curves can be exporrted to `ecdh.PrivateKey`/`ecdh.PublicKey`
\ No newline at end of file
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/LICENSE b/vendor/github.com/lestrrat-go/jwx/v3/LICENSE
similarity index 99%
rename from vendor/github.com/open-policy-agent/opa/internal/jwx/LICENSE
rename to vendor/github.com/lestrrat-go/jwx/v3/LICENSE
index 6369f4fcc4..205e33a7f1 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/LICENSE
+++ b/vendor/github.com/lestrrat-go/jwx/v3/LICENSE
@@ -19,3 +19,4 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
+
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel b/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel
new file mode 100644
index 0000000000..167e9b5c89
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel
@@ -0,0 +1,34 @@
+module(
+ name = "com_github_lestrrat_go_jwx_v3",
+ version = "3.0.0",
+ repo_name = "com_github_lestrrat_go_jwx_v2",
+)
+
+bazel_dep(name = "bazel_skylib", version = "1.7.1")
+bazel_dep(name = "rules_go", version = "0.55.1")
+bazel_dep(name = "gazelle", version = "0.44.0")
+bazel_dep(name = "aspect_bazel_lib", version = "2.11.0")
+
+# Go SDK setup - using Go 1.24.4 to match the toolchain in go.mod
+go_sdk = use_extension("@rules_go//go:extensions.bzl", "go_sdk")
+go_sdk.download(version = "1.24.4")
+
+# Go dependencies from go.mod
+go_deps = use_extension("@gazelle//:extensions.bzl", "go_deps")
+go_deps.from_file(go_mod = "//:go.mod")
+
+# Use repositories for external Go dependencies
+use_repo(
+ go_deps,
+ "com_github_decred_dcrd_dcrec_secp256k1_v4",
+ "com_github_goccy_go_json",
+ "com_github_lestrrat_go_blackmagic",
+ "com_github_lestrrat_go_dsig",
+ "com_github_lestrrat_go_dsig_secp256k1",
+ "com_github_lestrrat_go_httprc_v3",
+ "com_github_lestrrat_go_option_v2",
+ "com_github_segmentio_asm",
+ "com_github_stretchr_testify",
+ "com_github_valyala_fastjson",
+ "org_golang_x_crypto",
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel.lock b/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel.lock
new file mode 100644
index 0000000000..2848e8716d
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel.lock
@@ -0,0 +1,230 @@
+{
+ "lockFileVersion": 18,
+ "registryFileHashes": {
+ "https://bcr.bazel.build/bazel_registry.json": "8a28e4aff06ee60aed2a8c281907fb8bcbf3b753c91fb5a5c57da3215d5b3497",
+ "https://bcr.bazel.build/modules/abseil-cpp/20210324.2/MODULE.bazel": "7cd0312e064fde87c8d1cd79ba06c876bd23630c83466e9500321be55c96ace2",
+ "https://bcr.bazel.build/modules/abseil-cpp/20211102.0/MODULE.bazel": "70390338f7a5106231d20620712f7cccb659cd0e9d073d1991c038eb9fc57589",
+ "https://bcr.bazel.build/modules/abseil-cpp/20230125.1/MODULE.bazel": "89047429cb0207707b2dface14ba7f8df85273d484c2572755be4bab7ce9c3a0",
+ "https://bcr.bazel.build/modules/abseil-cpp/20230802.0.bcr.1/MODULE.bazel": "1c8cec495288dccd14fdae6e3f95f772c1c91857047a098fad772034264cc8cb",
+ "https://bcr.bazel.build/modules/abseil-cpp/20230802.0/MODULE.bazel": "d253ae36a8bd9ee3c5955384096ccb6baf16a1b1e93e858370da0a3b94f77c16",
+ "https://bcr.bazel.build/modules/abseil-cpp/20230802.1/MODULE.bazel": "fa92e2eb41a04df73cdabeec37107316f7e5272650f81d6cc096418fe647b915",
+ "https://bcr.bazel.build/modules/abseil-cpp/20240116.1/MODULE.bazel": "37bcdb4440fbb61df6a1c296ae01b327f19e9bb521f9b8e26ec854b6f97309ed",
+ "https://bcr.bazel.build/modules/abseil-cpp/20240116.1/source.json": "9be551b8d4e3ef76875c0d744b5d6a504a27e3ae67bc6b28f46415fd2d2957da",
+ "https://bcr.bazel.build/modules/aspect_bazel_lib/2.11.0/MODULE.bazel": "cb1ba9f9999ed0bc08600c221f532c1ddd8d217686b32ba7d45b0713b5131452",
+ "https://bcr.bazel.build/modules/aspect_bazel_lib/2.11.0/source.json": "92494d5aa43b96665397dd13ee16023097470fa85e276b93674d62a244de47ee",
+ "https://bcr.bazel.build/modules/bazel_features/1.1.0/MODULE.bazel": "cfd42ff3b815a5f39554d97182657f8c4b9719568eb7fded2b9135f084bf760b",
+ "https://bcr.bazel.build/modules/bazel_features/1.1.1/MODULE.bazel": "27b8c79ef57efe08efccbd9dd6ef70d61b4798320b8d3c134fd571f78963dbcd",
+ "https://bcr.bazel.build/modules/bazel_features/1.11.0/MODULE.bazel": "f9382337dd5a474c3b7d334c2f83e50b6eaedc284253334cf823044a26de03e8",
+ "https://bcr.bazel.build/modules/bazel_features/1.15.0/MODULE.bazel": "d38ff6e517149dc509406aca0db3ad1efdd890a85e049585b7234d04238e2a4d",
+ "https://bcr.bazel.build/modules/bazel_features/1.17.0/MODULE.bazel": "039de32d21b816b47bd42c778e0454217e9c9caac4a3cf8e15c7231ee3ddee4d",
+ "https://bcr.bazel.build/modules/bazel_features/1.18.0/MODULE.bazel": "1be0ae2557ab3a72a57aeb31b29be347bcdc5d2b1eb1e70f39e3851a7e97041a",
+ "https://bcr.bazel.build/modules/bazel_features/1.19.0/MODULE.bazel": "59adcdf28230d220f0067b1f435b8537dd033bfff8db21335ef9217919c7fb58",
+ "https://bcr.bazel.build/modules/bazel_features/1.30.0/MODULE.bazel": "a14b62d05969a293b80257e72e597c2da7f717e1e69fa8b339703ed6731bec87",
+ "https://bcr.bazel.build/modules/bazel_features/1.30.0/source.json": "b07e17f067fe4f69f90b03b36ef1e08fe0d1f3cac254c1241a1818773e3423bc",
+ "https://bcr.bazel.build/modules/bazel_features/1.4.1/MODULE.bazel": "e45b6bb2350aff3e442ae1111c555e27eac1d915e77775f6fdc4b351b758b5d7",
+ "https://bcr.bazel.build/modules/bazel_features/1.9.0/MODULE.bazel": "885151d58d90d8d9c811eb75e3288c11f850e1d6b481a8c9f766adee4712358b",
+ "https://bcr.bazel.build/modules/bazel_features/1.9.1/MODULE.bazel": "8f679097876a9b609ad1f60249c49d68bfab783dd9be012faf9d82547b14815a",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.0.3/MODULE.bazel": "bcb0fd896384802d1ad283b4e4eb4d718eebd8cb820b0a2c3a347fb971afd9d8",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.1.1/MODULE.bazel": "1add3e7d93ff2e6998f9e118022c84d163917d912f5afafb3058e3d2f1545b5e",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.2.0/MODULE.bazel": "44fe84260e454ed94ad326352a698422dbe372b21a1ac9f3eab76eb531223686",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.2.1/MODULE.bazel": "f35baf9da0efe45fa3da1696ae906eea3d615ad41e2e3def4aeb4e8bc0ef9a7a",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.3.0/MODULE.bazel": "20228b92868bf5cfc41bda7afc8a8ba2a543201851de39d990ec957b513579c5",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.4.1/MODULE.bazel": "a0dcb779424be33100dcae821e9e27e4f2901d9dfd5333efe5ac6a8d7ab75e1d",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.4.2/MODULE.bazel": "3bd40978e7a1fac911d5989e6b09d8f64921865a45822d8b09e815eaa726a651",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.5.0/MODULE.bazel": "32880f5e2945ce6a03d1fbd588e9198c0a959bb42297b2cfaf1685b7bc32e138",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.6.1/MODULE.bazel": "8fdee2dbaace6c252131c00e1de4b165dc65af02ea278476187765e1a617b917",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.7.0/MODULE.bazel": "0db596f4563de7938de764cc8deeabec291f55e8ec15299718b93c4423e9796d",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.7.1/MODULE.bazel": "3120d80c5861aa616222ec015332e5f8d3171e062e3e804a2a0253e1be26e59b",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.7.1/source.json": "f121b43eeefc7c29efbd51b83d08631e2347297c95aac9764a701f2a6a2bb953",
+ "https://bcr.bazel.build/modules/buildozer/7.1.2/MODULE.bazel": "2e8dd40ede9c454042645fd8d8d0cd1527966aa5c919de86661e62953cd73d84",
+ "https://bcr.bazel.build/modules/buildozer/7.1.2/source.json": "c9028a501d2db85793a6996205c8de120944f50a0d570438fcae0457a5f9d1f8",
+ "https://bcr.bazel.build/modules/gazelle/0.32.0/MODULE.bazel": "b499f58a5d0d3537f3cf5b76d8ada18242f64ec474d8391247438bf04f58c7b8",
+ "https://bcr.bazel.build/modules/gazelle/0.33.0/MODULE.bazel": "a13a0f279b462b784fb8dd52a4074526c4a2afe70e114c7d09066097a46b3350",
+ "https://bcr.bazel.build/modules/gazelle/0.34.0/MODULE.bazel": "abdd8ce4d70978933209db92e436deb3a8b737859e9354fb5fd11fb5c2004c8a",
+ "https://bcr.bazel.build/modules/gazelle/0.36.0/MODULE.bazel": "e375d5d6e9a6ca59b0cb38b0540bc9a05b6aa926d322f2de268ad267a2ee74c0",
+ "https://bcr.bazel.build/modules/gazelle/0.44.0/MODULE.bazel": "fd3177ca0938da57a1e416cad3f39b9c4334defbc717e89aba9d9ddbbb0341da",
+ "https://bcr.bazel.build/modules/gazelle/0.44.0/source.json": "7fb65ef9c1ce470d099ca27fd478673d9d64c844af28d0d472b0874c7d590cb6",
+ "https://bcr.bazel.build/modules/google_benchmark/1.8.2/MODULE.bazel": "a70cf1bba851000ba93b58ae2f6d76490a9feb74192e57ab8e8ff13c34ec50cb",
+ "https://bcr.bazel.build/modules/googletest/1.11.0/MODULE.bazel": "3a83f095183f66345ca86aa13c58b59f9f94a2f81999c093d4eeaa2d262d12f4",
+ "https://bcr.bazel.build/modules/googletest/1.14.0.bcr.1/MODULE.bazel": "22c31a561553727960057361aa33bf20fb2e98584bc4fec007906e27053f80c6",
+ "https://bcr.bazel.build/modules/googletest/1.14.0.bcr.1/source.json": "41e9e129f80d8c8bf103a7acc337b76e54fad1214ac0a7084bf24f4cd924b8b4",
+ "https://bcr.bazel.build/modules/googletest/1.14.0/MODULE.bazel": "cfbcbf3e6eac06ef9d85900f64424708cc08687d1b527f0ef65aa7517af8118f",
+ "https://bcr.bazel.build/modules/jsoncpp/1.9.5/MODULE.bazel": "31271aedc59e815656f5736f282bb7509a97c7ecb43e927ac1a37966e0578075",
+ "https://bcr.bazel.build/modules/jsoncpp/1.9.5/source.json": "4108ee5085dd2885a341c7fab149429db457b3169b86eb081fa245eadf69169d",
+ "https://bcr.bazel.build/modules/libpfm/4.11.0/MODULE.bazel": "45061ff025b301940f1e30d2c16bea596c25b176c8b6b3087e92615adbd52902",
+ "https://bcr.bazel.build/modules/package_metadata/0.0.2/MODULE.bazel": "fb8d25550742674d63d7b250063d4580ca530499f045d70748b1b142081ebb92",
+ "https://bcr.bazel.build/modules/package_metadata/0.0.2/source.json": "e53a759a72488d2c0576f57491ef2da0cf4aab05ac0997314012495935531b73",
+ "https://bcr.bazel.build/modules/platforms/0.0.10/MODULE.bazel": "8cb8efaf200bdeb2150d93e162c40f388529a25852b332cec879373771e48ed5",
+ "https://bcr.bazel.build/modules/platforms/0.0.11/MODULE.bazel": "0daefc49732e227caa8bfa834d65dc52e8cc18a2faf80df25e8caea151a9413f",
+ "https://bcr.bazel.build/modules/platforms/0.0.11/source.json": "f7e188b79ebedebfe75e9e1d098b8845226c7992b307e28e1496f23112e8fc29",
+ "https://bcr.bazel.build/modules/platforms/0.0.4/MODULE.bazel": "9b328e31ee156f53f3c416a64f8491f7eb731742655a47c9eec4703a71644aee",
+ "https://bcr.bazel.build/modules/platforms/0.0.5/MODULE.bazel": "5733b54ea419d5eaf7997054bb55f6a1d0b5ff8aedf0176fef9eea44f3acda37",
+ "https://bcr.bazel.build/modules/platforms/0.0.6/MODULE.bazel": "ad6eeef431dc52aefd2d77ed20a4b353f8ebf0f4ecdd26a807d2da5aa8cd0615",
+ "https://bcr.bazel.build/modules/platforms/0.0.7/MODULE.bazel": "72fd4a0ede9ee5c021f6a8dd92b503e089f46c227ba2813ff183b71616034814",
+ "https://bcr.bazel.build/modules/platforms/0.0.8/MODULE.bazel": "9f142c03e348f6d263719f5074b21ef3adf0b139ee4c5133e2aa35664da9eb2d",
+ "https://bcr.bazel.build/modules/protobuf/21.7/MODULE.bazel": "a5a29bb89544f9b97edce05642fac225a808b5b7be74038ea3640fae2f8e66a7",
+ "https://bcr.bazel.build/modules/protobuf/27.0/MODULE.bazel": "7873b60be88844a0a1d8f80b9d5d20cfbd8495a689b8763e76c6372998d3f64c",
+ "https://bcr.bazel.build/modules/protobuf/27.1/MODULE.bazel": "703a7b614728bb06647f965264967a8ef1c39e09e8f167b3ca0bb1fd80449c0d",
+ "https://bcr.bazel.build/modules/protobuf/29.0-rc2.bcr.1/MODULE.bazel": "52f4126f63a2f0bbf36b99c2a87648f08467a4eaf92ba726bc7d6a500bbf770c",
+ "https://bcr.bazel.build/modules/protobuf/29.0-rc2/MODULE.bazel": "6241d35983510143049943fc0d57937937122baf1b287862f9dc8590fc4c37df",
+ "https://bcr.bazel.build/modules/protobuf/29.0/MODULE.bazel": "319dc8bf4c679ff87e71b1ccfb5a6e90a6dbc4693501d471f48662ac46d04e4e",
+ "https://bcr.bazel.build/modules/protobuf/29.0/source.json": "b857f93c796750eef95f0d61ee378f3420d00ee1dd38627b27193aa482f4f981",
+ "https://bcr.bazel.build/modules/protobuf/3.19.0/MODULE.bazel": "6b5fbb433f760a99a22b18b6850ed5784ef0e9928a72668b66e4d7ccd47db9b0",
+ "https://bcr.bazel.build/modules/protobuf/3.19.2/MODULE.bazel": "532ffe5f2186b69fdde039efe6df13ba726ff338c6bc82275ad433013fa10573",
+ "https://bcr.bazel.build/modules/protobuf/3.19.6/MODULE.bazel": "9233edc5e1f2ee276a60de3eaa47ac4132302ef9643238f23128fea53ea12858",
+ "https://bcr.bazel.build/modules/pybind11_bazel/2.11.1/MODULE.bazel": "88af1c246226d87e65be78ed49ecd1e6f5e98648558c14ce99176da041dc378e",
+ "https://bcr.bazel.build/modules/pybind11_bazel/2.11.1/source.json": "be4789e951dd5301282729fe3d4938995dc4c1a81c2ff150afc9f1b0504c6022",
+ "https://bcr.bazel.build/modules/re2/2023-09-01/MODULE.bazel": "cb3d511531b16cfc78a225a9e2136007a48cf8a677e4264baeab57fe78a80206",
+ "https://bcr.bazel.build/modules/re2/2023-09-01/source.json": "e044ce89c2883cd957a2969a43e79f7752f9656f6b20050b62f90ede21ec6eb4",
+ "https://bcr.bazel.build/modules/rules_android/0.1.1/MODULE.bazel": "48809ab0091b07ad0182defb787c4c5328bd3a278938415c00a7b69b50c4d3a8",
+ "https://bcr.bazel.build/modules/rules_android/0.1.1/source.json": "e6986b41626ee10bdc864937ffb6d6bf275bb5b9c65120e6137d56e6331f089e",
+ "https://bcr.bazel.build/modules/rules_cc/0.0.1/MODULE.bazel": "cb2aa0747f84c6c3a78dad4e2049c154f08ab9d166b1273835a8174940365647",
+ "https://bcr.bazel.build/modules/rules_cc/0.0.10/MODULE.bazel": "ec1705118f7eaedd6e118508d3d26deba2a4e76476ada7e0e3965211be012002",
+ "https://bcr.bazel.build/modules/rules_cc/0.0.13/MODULE.bazel": "0e8529ed7b323dad0775ff924d2ae5af7640b23553dfcd4d34344c7e7a867191",
+ "https://bcr.bazel.build/modules/rules_cc/0.0.14/MODULE.bazel": "5e343a3aac88b8d7af3b1b6d2093b55c347b8eefc2e7d1442f7a02dc8fea48ac",
+ "https://bcr.bazel.build/modules/rules_cc/0.0.15/MODULE.bazel": "6704c35f7b4a72502ee81f61bf88706b54f06b3cbe5558ac17e2e14666cd5dcc",
+ "https://bcr.bazel.build/modules/rules_cc/0.0.16/MODULE.bazel": "7661303b8fc1b4d7f532e54e9d6565771fea666fbdf839e0a86affcd02defe87",
+ "https://bcr.bazel.build/modules/rules_cc/0.0.2/MODULE.bazel": "6915987c90970493ab97393024c156ea8fb9f3bea953b2f3ec05c34f19b5695c",
+ "https://bcr.bazel.build/modules/rules_cc/0.0.6/MODULE.bazel": "abf360251023dfe3efcef65ab9d56beefa8394d4176dd29529750e1c57eaa33f",
+ "https://bcr.bazel.build/modules/rules_cc/0.0.8/MODULE.bazel": "964c85c82cfeb6f3855e6a07054fdb159aced38e99a5eecf7bce9d53990afa3e",
+ "https://bcr.bazel.build/modules/rules_cc/0.0.9/MODULE.bazel": "836e76439f354b89afe6a911a7adf59a6b2518fafb174483ad78a2a2fde7b1c5",
+ "https://bcr.bazel.build/modules/rules_cc/0.1.1/MODULE.bazel": "2f0222a6f229f0bf44cd711dc13c858dad98c62d52bd51d8fc3a764a83125513",
+ "https://bcr.bazel.build/modules/rules_cc/0.1.1/source.json": "d61627377bd7dd1da4652063e368d9366fc9a73920bfa396798ad92172cf645c",
+ "https://bcr.bazel.build/modules/rules_foreign_cc/0.9.0/MODULE.bazel": "c9e8c682bf75b0e7c704166d79b599f93b72cfca5ad7477df596947891feeef6",
+ "https://bcr.bazel.build/modules/rules_fuzzing/0.5.2/MODULE.bazel": "40c97d1144356f52905566c55811f13b299453a14ac7769dfba2ac38192337a8",
+ "https://bcr.bazel.build/modules/rules_fuzzing/0.5.2/source.json": "c8b1e2c717646f1702290959a3302a178fb639d987ab61d548105019f11e527e",
+ "https://bcr.bazel.build/modules/rules_go/0.41.0/MODULE.bazel": "55861d8e8bb0e62cbd2896f60ff303f62ffcb0eddb74ecb0e5c0cbe36fc292c8",
+ "https://bcr.bazel.build/modules/rules_go/0.42.0/MODULE.bazel": "8cfa875b9aa8c6fce2b2e5925e73c1388173ea3c32a0db4d2b4804b453c14270",
+ "https://bcr.bazel.build/modules/rules_go/0.46.0/MODULE.bazel": "3477df8bdcc49e698b9d25f734c4f3a9f5931ff34ee48a2c662be168f5f2d3fd",
+ "https://bcr.bazel.build/modules/rules_go/0.51.0/MODULE.bazel": "b6920f505935bfd69381651c942496d99b16e2a12f3dd5263b90ded16f3b4d0f",
+ "https://bcr.bazel.build/modules/rules_go/0.55.1/MODULE.bazel": "a57a6fc59a74326c0b440d07cca209edf13c7d1a641e48cfbeab56e79f873609",
+ "https://bcr.bazel.build/modules/rules_go/0.55.1/source.json": "827a740c8959c9d20616889e7746cde4dcc6ee80d25146943627ccea0736328f",
+ "https://bcr.bazel.build/modules/rules_java/4.0.0/MODULE.bazel": "5a78a7ae82cd1a33cef56dc578c7d2a46ed0dca12643ee45edbb8417899e6f74",
+ "https://bcr.bazel.build/modules/rules_java/5.3.5/MODULE.bazel": "a4ec4f2db570171e3e5eb753276ee4b389bae16b96207e9d3230895c99644b86",
+ "https://bcr.bazel.build/modules/rules_java/6.0.0/MODULE.bazel": "8a43b7df601a7ec1af61d79345c17b31ea1fedc6711fd4abfd013ea612978e39",
+ "https://bcr.bazel.build/modules/rules_java/6.3.0/MODULE.bazel": "a97c7678c19f236a956ad260d59c86e10a463badb7eb2eda787490f4c969b963",
+ "https://bcr.bazel.build/modules/rules_java/6.4.0/MODULE.bazel": "e986a9fe25aeaa84ac17ca093ef13a4637f6107375f64667a15999f77db6c8f6",
+ "https://bcr.bazel.build/modules/rules_java/6.5.2/MODULE.bazel": "1d440d262d0e08453fa0c4d8f699ba81609ed0e9a9a0f02cd10b3e7942e61e31",
+ "https://bcr.bazel.build/modules/rules_java/7.10.0/MODULE.bazel": "530c3beb3067e870561739f1144329a21c851ff771cd752a49e06e3dc9c2e71a",
+ "https://bcr.bazel.build/modules/rules_java/7.12.2/MODULE.bazel": "579c505165ee757a4280ef83cda0150eea193eed3bef50b1004ba88b99da6de6",
+ "https://bcr.bazel.build/modules/rules_java/7.2.0/MODULE.bazel": "06c0334c9be61e6cef2c8c84a7800cef502063269a5af25ceb100b192453d4ab",
+ "https://bcr.bazel.build/modules/rules_java/7.3.2/MODULE.bazel": "50dece891cfdf1741ea230d001aa9c14398062f2b7c066470accace78e412bc2",
+ "https://bcr.bazel.build/modules/rules_java/7.6.1/MODULE.bazel": "2f14b7e8a1aa2f67ae92bc69d1ec0fa8d9f827c4e17ff5e5f02e91caa3b2d0fe",
+ "https://bcr.bazel.build/modules/rules_java/8.12.0/MODULE.bazel": "8e6590b961f2defdfc2811c089c75716cb2f06c8a4edeb9a8d85eaa64ee2a761",
+ "https://bcr.bazel.build/modules/rules_java/8.12.0/source.json": "cbd5d55d9d38d4008a7d00bee5b5a5a4b6031fcd4a56515c9accbcd42c7be2ba",
+ "https://bcr.bazel.build/modules/rules_jvm_external/4.4.2/MODULE.bazel": "a56b85e418c83eb1839819f0b515c431010160383306d13ec21959ac412d2fe7",
+ "https://bcr.bazel.build/modules/rules_jvm_external/5.1/MODULE.bazel": "33f6f999e03183f7d088c9be518a63467dfd0be94a11d0055fe2d210f89aa909",
+ "https://bcr.bazel.build/modules/rules_jvm_external/5.2/MODULE.bazel": "d9351ba35217ad0de03816ef3ed63f89d411349353077348a45348b096615036",
+ "https://bcr.bazel.build/modules/rules_jvm_external/5.3/MODULE.bazel": "bf93870767689637164657731849fb887ad086739bd5d360d90007a581d5527d",
+ "https://bcr.bazel.build/modules/rules_jvm_external/6.1/MODULE.bazel": "75b5fec090dbd46cf9b7d8ea08cf84a0472d92ba3585b476f44c326eda8059c4",
+ "https://bcr.bazel.build/modules/rules_jvm_external/6.3/MODULE.bazel": "c998e060b85f71e00de5ec552019347c8bca255062c990ac02d051bb80a38df0",
+ "https://bcr.bazel.build/modules/rules_jvm_external/6.3/source.json": "6f5f5a5a4419ae4e37c35a5bb0a6ae657ed40b7abc5a5189111b47fcebe43197",
+ "https://bcr.bazel.build/modules/rules_kotlin/1.9.0/MODULE.bazel": "ef85697305025e5a61f395d4eaede272a5393cee479ace6686dba707de804d59",
+ "https://bcr.bazel.build/modules/rules_kotlin/1.9.6/MODULE.bazel": "d269a01a18ee74d0335450b10f62c9ed81f2321d7958a2934e44272fe82dcef3",
+ "https://bcr.bazel.build/modules/rules_kotlin/1.9.6/source.json": "2faa4794364282db7c06600b7e5e34867a564ae91bda7cae7c29c64e9466b7d5",
+ "https://bcr.bazel.build/modules/rules_license/0.0.3/MODULE.bazel": "627e9ab0247f7d1e05736b59dbb1b6871373de5ad31c3011880b4133cafd4bd0",
+ "https://bcr.bazel.build/modules/rules_license/0.0.7/MODULE.bazel": "088fbeb0b6a419005b89cf93fe62d9517c0a2b8bb56af3244af65ecfe37e7d5d",
+ "https://bcr.bazel.build/modules/rules_license/1.0.0/MODULE.bazel": "a7fda60eefdf3d8c827262ba499957e4df06f659330bbe6cdbdb975b768bb65c",
+ "https://bcr.bazel.build/modules/rules_license/1.0.0/source.json": "a52c89e54cc311196e478f8382df91c15f7a2bfdf4c6cd0e2675cc2ff0b56efb",
+ "https://bcr.bazel.build/modules/rules_pkg/0.7.0/MODULE.bazel": "df99f03fc7934a4737122518bb87e667e62d780b610910f0447665a7e2be62dc",
+ "https://bcr.bazel.build/modules/rules_pkg/1.0.1/MODULE.bazel": "5b1df97dbc29623bccdf2b0dcd0f5cb08e2f2c9050aab1092fd39a41e82686ff",
+ "https://bcr.bazel.build/modules/rules_pkg/1.0.1/source.json": "bd82e5d7b9ce2d31e380dd9f50c111d678c3bdaca190cb76b0e1c71b05e1ba8a",
+ "https://bcr.bazel.build/modules/rules_proto/4.0.0/MODULE.bazel": "a7a7b6ce9bee418c1a760b3d84f83a299ad6952f9903c67f19e4edd964894e06",
+ "https://bcr.bazel.build/modules/rules_proto/5.3.0-21.7/MODULE.bazel": "e8dff86b0971688790ae75528fe1813f71809b5afd57facb44dad9e8eca631b7",
+ "https://bcr.bazel.build/modules/rules_proto/6.0.0/MODULE.bazel": "b531d7f09f58dce456cd61b4579ce8c86b38544da75184eadaf0a7cb7966453f",
+ "https://bcr.bazel.build/modules/rules_proto/6.0.2/MODULE.bazel": "ce916b775a62b90b61888052a416ccdda405212b6aaeb39522f7dc53431a5e73",
+ "https://bcr.bazel.build/modules/rules_proto/7.0.2/MODULE.bazel": "bf81793bd6d2ad89a37a40693e56c61b0ee30f7a7fdbaf3eabbf5f39de47dea2",
+ "https://bcr.bazel.build/modules/rules_proto/7.0.2/source.json": "1e5e7260ae32ef4f2b52fd1d0de8d03b606a44c91b694d2f1afb1d3b28a48ce1",
+ "https://bcr.bazel.build/modules/rules_python/0.10.2/MODULE.bazel": "cc82bc96f2997baa545ab3ce73f196d040ffb8756fd2d66125a530031cd90e5f",
+ "https://bcr.bazel.build/modules/rules_python/0.23.1/MODULE.bazel": "49ffccf0511cb8414de28321f5fcf2a31312b47c40cc21577144b7447f2bf300",
+ "https://bcr.bazel.build/modules/rules_python/0.25.0/MODULE.bazel": "72f1506841c920a1afec76975b35312410eea3aa7b63267436bfb1dd91d2d382",
+ "https://bcr.bazel.build/modules/rules_python/0.28.0/MODULE.bazel": "cba2573d870babc976664a912539b320cbaa7114cd3e8f053c720171cde331ed",
+ "https://bcr.bazel.build/modules/rules_python/0.31.0/MODULE.bazel": "93a43dc47ee570e6ec9f5779b2e64c1476a6ce921c48cc9a1678a91dd5f8fd58",
+ "https://bcr.bazel.build/modules/rules_python/0.4.0/MODULE.bazel": "9208ee05fd48bf09ac60ed269791cf17fb343db56c8226a720fbb1cdf467166c",
+ "https://bcr.bazel.build/modules/rules_python/0.40.0/MODULE.bazel": "9d1a3cd88ed7d8e39583d9ffe56ae8a244f67783ae89b60caafc9f5cf318ada7",
+ "https://bcr.bazel.build/modules/rules_python/0.40.0/source.json": "939d4bd2e3110f27bfb360292986bb79fd8dcefb874358ccd6cdaa7bda029320",
+ "https://bcr.bazel.build/modules/rules_shell/0.2.0/MODULE.bazel": "fda8a652ab3c7d8fee214de05e7a9916d8b28082234e8d2c0094505c5268ed3c",
+ "https://bcr.bazel.build/modules/rules_shell/0.3.0/MODULE.bazel": "de4402cd12f4cc8fda2354fce179fdb068c0b9ca1ec2d2b17b3e21b24c1a937b",
+ "https://bcr.bazel.build/modules/rules_shell/0.3.0/source.json": "c55ed591aa5009401ddf80ded9762ac32c358d2517ee7820be981e2de9756cf3",
+ "https://bcr.bazel.build/modules/stardoc/0.5.1/MODULE.bazel": "1a05d92974d0c122f5ccf09291442580317cdd859f07a8655f1db9a60374f9f8",
+ "https://bcr.bazel.build/modules/stardoc/0.5.3/MODULE.bazel": "c7f6948dae6999bf0db32c1858ae345f112cacf98f174c7a8bb707e41b974f1c",
+ "https://bcr.bazel.build/modules/stardoc/0.5.6/MODULE.bazel": "c43dabc564990eeab55e25ed61c07a1aadafe9ece96a4efabb3f8bf9063b71ef",
+ "https://bcr.bazel.build/modules/stardoc/0.6.2/MODULE.bazel": "7060193196395f5dd668eda046ccbeacebfd98efc77fed418dbe2b82ffaa39fd",
+ "https://bcr.bazel.build/modules/stardoc/0.7.0/MODULE.bazel": "05e3d6d30c099b6770e97da986c53bd31844d7f13d41412480ea265ac9e8079c",
+ "https://bcr.bazel.build/modules/stardoc/0.7.1/MODULE.bazel": "3548faea4ee5dda5580f9af150e79d0f6aea934fc60c1cc50f4efdd9420759e7",
+ "https://bcr.bazel.build/modules/stardoc/0.7.1/source.json": "b6500ffcd7b48cd72c29bb67bcac781e12701cc0d6d55d266a652583cfcdab01",
+ "https://bcr.bazel.build/modules/upb/0.0.0-20220923-a547704/MODULE.bazel": "7298990c00040a0e2f121f6c32544bab27d4452f80d9ce51349b1a28f3005c43",
+ "https://bcr.bazel.build/modules/zlib/1.2.11/MODULE.bazel": "07b389abc85fdbca459b69e2ec656ae5622873af3f845e1c9d80fe179f3effa0",
+ "https://bcr.bazel.build/modules/zlib/1.2.12/MODULE.bazel": "3b1a8834ada2a883674be8cbd36ede1b6ec481477ada359cd2d3ddc562340b27",
+ "https://bcr.bazel.build/modules/zlib/1.3.1.bcr.5/MODULE.bazel": "eec517b5bbe5492629466e11dae908d043364302283de25581e3eb944326c4ca",
+ "https://bcr.bazel.build/modules/zlib/1.3.1.bcr.5/source.json": "22bc55c47af97246cfc093d0acf683a7869377de362b5d1c552c2c2e16b7a806",
+ "https://bcr.bazel.build/modules/zlib/1.3.1/MODULE.bazel": "751c9940dcfe869f5f7274e1295422a34623555916eb98c174c1e945594bf198"
+ },
+ "selectedYankedVersions": {},
+ "moduleExtensions": {
+ "@@rules_kotlin+//src/main/starlark/core/repositories:bzlmod_setup.bzl%rules_kotlin_extensions": {
+ "general": {
+ "bzlTransitiveDigest": "hUTp2w+RUVdL7ma5esCXZJAFnX7vLbVfLd7FwnQI6bU=",
+ "usagesDigest": "QI2z8ZUR+mqtbwsf2fLqYdJAkPOHdOV+tF2yVAUgRzw=",
+ "recordedFileInputs": {},
+ "recordedDirentsInputs": {},
+ "envVariables": {},
+ "generatedRepoSpecs": {
+ "com_github_jetbrains_kotlin_git": {
+ "repoRuleId": "@@rules_kotlin+//src/main/starlark/core/repositories:compiler.bzl%kotlin_compiler_git_repository",
+ "attributes": {
+ "urls": [
+ "https://github.com/JetBrains/kotlin/releases/download/v1.9.23/kotlin-compiler-1.9.23.zip"
+ ],
+ "sha256": "93137d3aab9afa9b27cb06a824c2324195c6b6f6179d8a8653f440f5bd58be88"
+ }
+ },
+ "com_github_jetbrains_kotlin": {
+ "repoRuleId": "@@rules_kotlin+//src/main/starlark/core/repositories:compiler.bzl%kotlin_capabilities_repository",
+ "attributes": {
+ "git_repository_name": "com_github_jetbrains_kotlin_git",
+ "compiler_version": "1.9.23"
+ }
+ },
+ "com_github_google_ksp": {
+ "repoRuleId": "@@rules_kotlin+//src/main/starlark/core/repositories:ksp.bzl%ksp_compiler_plugin_repository",
+ "attributes": {
+ "urls": [
+ "https://github.com/google/ksp/releases/download/1.9.23-1.0.20/artifacts.zip"
+ ],
+ "sha256": "ee0618755913ef7fd6511288a232e8fad24838b9af6ea73972a76e81053c8c2d",
+ "strip_version": "1.9.23-1.0.20"
+ }
+ },
+ "com_github_pinterest_ktlint": {
+ "repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_file",
+ "attributes": {
+ "sha256": "01b2e0ef893383a50dbeb13970fe7fa3be36ca3e83259e01649945b09d736985",
+ "urls": [
+ "https://github.com/pinterest/ktlint/releases/download/1.3.0/ktlint"
+ ],
+ "executable": true
+ }
+ },
+ "rules_android": {
+ "repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_archive",
+ "attributes": {
+ "sha256": "cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806",
+ "strip_prefix": "rules_android-0.1.1",
+ "urls": [
+ "https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip"
+ ]
+ }
+ }
+ },
+ "recordedRepoMappingEntries": [
+ [
+ "rules_kotlin+",
+ "bazel_tools",
+ "bazel_tools"
+ ]
+ ]
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/Makefile b/vendor/github.com/lestrrat-go/jwx/v3/Makefile
new file mode 100644
index 0000000000..672c007b29
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/Makefile
@@ -0,0 +1,98 @@
+.PHONY: generate realclean cover viewcover test lint check_diffs imports tidy jwx
+generate:
+ @go generate
+ @$(MAKE) generate-jwa generate-jwe generate-jwk generate-jws generate-jwt
+ @./tools/cmd/gofmt.sh
+
+generate-%:
+ @go generate $(shell pwd -P)/$(patsubst generate-%,%,$@)
+
+realclean:
+ rm coverage.out
+
+test-cmd:
+ env TESTOPTS="$(TESTOPTS)" ./tools/test.sh
+
+test:
+ $(MAKE) test-stdlib TESTOPTS=
+
+test-stdlib:
+ $(MAKE) test-cmd TESTOPTS=
+
+test-goccy:
+ $(MAKE) test-cmd TESTOPTS="-tags jwx_goccy"
+
+test-es256k:
+ $(MAKE) test-cmd TESTOPTS="-tags jwx_es256k"
+
+test-secp256k1-pem:
+ $(MAKE) test-cmd TESTOPTS="-tags jwx_es256k,jwx_secp256k1_pem"
+
+test-asmbase64:
+ $(MAKE) test-cmd TESTOPTS="-tags jwx_asmbase64"
+
+test-alltags:
+ $(MAKE) test-cmd TESTOPTS="-tags jwx_asmbase64,jwx_goccy,jwx_es256k,jwx_secp256k1_pem"
+
+cover-cmd:
+ env MODE=cover ./tools/test.sh
+
+cover:
+ $(MAKE) cover-stdlib
+
+cover-stdlib:
+ $(MAKE) cover-cmd TESTOPTS=
+
+cover-goccy:
+ $(MAKE) cover-cmd TESTOPTS="-tags jwx_goccy"
+
+cover-es256k:
+ $(MAKE) cover-cmd TESTOPTS="-tags jwx_es256k"
+
+cover-secp256k1-pem:
+ $(MAKE) cover-cmd TESTOPTS="-tags jwx_es256k,jwx_secp256k1"
+
+cover-asmbase64:
+ $(MAKE) cover-cmd TESTOPTS="-tags jwx_asmbase64"
+
+cover-alltags:
+ $(MAKE) cover-cmd TESTOPTS="-tags jwx_asmbase64,jwx_goccy,jwx_es256k,jwx_secp256k1_pem"
+
+smoke-cmd:
+ env MODE=short ./tools/test.sh
+
+smoke:
+ $(MAKE) smoke-stdlib
+
+smoke-stdlib:
+ $(MAKE) smoke-cmd TESTOPTS=
+
+smoke-goccy:
+ $(MAKE) smoke-cmd TESTOPTS="-tags jwx_goccy"
+
+smoke-es256k:
+ $(MAKE) smoke-cmd TESTOPTS="-tags jwx_es256k"
+
+smoke-secp256k1-pem:
+ $(MAKE) smoke-cmd TESTOPTS="-tags jwx_es256k,jwx_secp256k1_pem"
+
+smoke-alltags:
+ $(MAKE) smoke-cmd TESTOPTS="-tags jwx_goccy,jwx_es256k,jwx_secp256k1_pem"
+
+viewcover:
+ go tool cover -html=coverage.out
+
+lint:
+ golangci-lint run ./...
+
+check_diffs:
+ ./scripts/check-diff.sh
+
+imports:
+ goimports -w ./
+
+tidy:
+ ./scripts/tidy.sh
+
+jwx:
+ @./tools/cmd/install-jwx.sh
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/README.md b/vendor/github.com/lestrrat-go/jwx/v3/README.md
new file mode 100644
index 0000000000..632033f3cb
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/README.md
@@ -0,0 +1,263 @@
+# github.com/lestrrat-go/jwx/v3 [](https://github.com/lestrrat-go/jwx/actions/workflows/ci.yml) [](https://pkg.go.dev/github.com/lestrrat-go/jwx/v3) [](https://codecov.io/github/lestrrat-go/jwx?branch=v3)
+
+Go module implementing various JWx (JWA/JWE/JWK/JWS/JWT, otherwise known as JOSE) technologies.
+
+If you are using this module in your product or your company, please add your product and/or company name in the [Wiki](https://github.com/lestrrat-go/jwx/wiki/Users)! It really helps keeping up our motivation.
+
+# Features
+
+* Complete coverage of JWA/JWE/JWK/JWS/JWT, not just JWT+minimum tool set.
+ * Supports JWS messages with multiple signatures, both compact and JSON serialization
+ * Supports JWS with detached payload
+ * Supports JWS with unencoded payload (RFC7797)
+ * Supports JWE messages with multiple recipients, both compact and JSON serialization
+ * Most operations work with either JWK or raw keys e.g. *rsa.PrivateKey, *ecdsa.PrivateKey, etc).
+* Opinionated, but very uniform API. Everything is symmetric, and follows a standard convention
+ * jws.Parse/Verify/Sign
+ * jwe.Parse/Encrypt/Decrypt
+ * Arguments are organized as explicit required parameters and optional WithXXXX() style options.
+* Extra utilities
+ * `jwk.Cache` to always keep a JWKS up-to-date
+ * [bazel](https://bazel.build)-ready
+
+Some more in-depth discussion on why you might want to use this library over others
+can be found in the [Description section](#description)
+
+If you are using v0 or v1, you are strongly encouraged to migrate to using v3
+(the version that comes with the README you are reading).
+
+# SYNOPSIS
+
+
+```go
+package examples_test
+
+import (
+ "bytes"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jwe"
+ "github.com/lestrrat-go/jwx/v3/jwk"
+ "github.com/lestrrat-go/jwx/v3/jws"
+ "github.com/lestrrat-go/jwx/v3/jwt"
+)
+
+func Example() {
+ // Parse, serialize, slice and dice JWKs!
+ privkey, err := jwk.ParseKey(jsonRSAPrivateKey)
+ if err != nil {
+ fmt.Printf("failed to parse JWK: %s\n", err)
+ return
+ }
+
+ pubkey, err := jwk.PublicKeyOf(privkey)
+ if err != nil {
+ fmt.Printf("failed to get public key: %s\n", err)
+ return
+ }
+
+ // Work with JWTs!
+ {
+ // Build a JWT!
+ tok, err := jwt.NewBuilder().
+ Issuer(`github.com/lestrrat-go/jwx`).
+ IssuedAt(time.Now()).
+ Build()
+ if err != nil {
+ fmt.Printf("failed to build token: %s\n", err)
+ return
+ }
+
+ // Sign a JWT!
+ signed, err := jwt.Sign(tok, jwt.WithKey(jwa.RS256(), privkey))
+ if err != nil {
+ fmt.Printf("failed to sign token: %s\n", err)
+ return
+ }
+
+ // Verify a JWT!
+ {
+ verifiedToken, err := jwt.Parse(signed, jwt.WithKey(jwa.RS256(), pubkey))
+ if err != nil {
+ fmt.Printf("failed to verify JWS: %s\n", err)
+ return
+ }
+ _ = verifiedToken
+ }
+
+ // Work with *http.Request!
+ {
+ req, err := http.NewRequest(http.MethodGet, `https://github.com/lestrrat-go/jwx`, nil)
+ req.Header.Set(`Authorization`, fmt.Sprintf(`Bearer %s`, signed))
+
+ verifiedToken, err := jwt.ParseRequest(req, jwt.WithKey(jwa.RS256(), pubkey))
+ if err != nil {
+ fmt.Printf("failed to verify token from HTTP request: %s\n", err)
+ return
+ }
+ _ = verifiedToken
+ }
+ }
+
+ // Encrypt and Decrypt arbitrary payload with JWE!
+ {
+ encrypted, err := jwe.Encrypt(payloadLoremIpsum, jwe.WithKey(jwa.RSA_OAEP(), jwkRSAPublicKey))
+ if err != nil {
+ fmt.Printf("failed to encrypt payload: %s\n", err)
+ return
+ }
+
+ decrypted, err := jwe.Decrypt(encrypted, jwe.WithKey(jwa.RSA_OAEP(), jwkRSAPrivateKey))
+ if err != nil {
+ fmt.Printf("failed to decrypt payload: %s\n", err)
+ return
+ }
+
+ if !bytes.Equal(decrypted, payloadLoremIpsum) {
+ fmt.Printf("verified payload did not match\n")
+ return
+ }
+ }
+
+ // Sign and Verify arbitrary payload with JWS!
+ {
+ signed, err := jws.Sign(payloadLoremIpsum, jws.WithKey(jwa.RS256(), jwkRSAPrivateKey))
+ if err != nil {
+ fmt.Printf("failed to sign payload: %s\n", err)
+ return
+ }
+
+ verified, err := jws.Verify(signed, jws.WithKey(jwa.RS256(), jwkRSAPublicKey))
+ if err != nil {
+ fmt.Printf("failed to verify payload: %s\n", err)
+ return
+ }
+
+ if !bytes.Equal(verified, payloadLoremIpsum) {
+ fmt.Printf("verified payload did not match\n")
+ return
+ }
+ }
+ // OUTPUT:
+}
+```
+source: [examples/jwx_readme_example_test.go](https://github.com/lestrrat-go/jwx/blob/v3/examples/jwx_readme_example_test.go)
+
+
+# How-to Documentation
+
+* [API documentation](https://pkg.go.dev/github.com/lestrrat-go/jwx/v3)
+* [How-to style documentation](./docs)
+* [Runnable Examples](./examples)
+
+# Description
+
+This Go module implements JWA, JWE, JWK, JWS, and JWT. Please see the following table for the list of
+available packages:
+
+| Package name | Notes |
+|-----------------------------------------------------------|-------------------------------------------------|
+| [jwt](https://github.com/lestrrat-go/jwx/tree/v3/jwt) | [RFC 7519](https://tools.ietf.org/html/rfc7519) |
+| [jwk](https://github.com/lestrrat-go/jwx/tree/v3/jwk) | [RFC 7517](https://tools.ietf.org/html/rfc7517) + [RFC 7638](https://tools.ietf.org/html/rfc7638) |
+| [jwa](https://github.com/lestrrat-go/jwx/tree/v3/jwa) | [RFC 7518](https://tools.ietf.org/html/rfc7518) |
+| [jws](https://github.com/lestrrat-go/jwx/tree/v3/jws) | [RFC 7515](https://tools.ietf.org/html/rfc7515) + [RFC 7797](https://tools.ietf.org/html/rfc7797) |
+| [jwe](https://github.com/lestrrat-go/jwx/tree/v3/jwe) | [RFC 7516](https://tools.ietf.org/html/rfc7516) |
+## History
+
+My goal was to write a server that heavily uses JWK and JWT. At first glance
+the libraries that already exist seemed sufficient, but soon I realized that
+
+1. To completely implement the protocols, I needed the entire JWT, JWK, JWS, JWE (and JWA, by necessity).
+2. Most of the libraries that existed only deal with a subset of the various JWx specifications that were necessary to implement their specific needs
+
+For example, a certain library looks like it had most of JWS, JWE, JWK covered, but then it lacked the ability to include private claims in its JWT responses. Another library had support of all the private claims, but completely lacked in its flexibility to generate various different response formats.
+
+Because I was writing the server side (and the client side for testing), I needed the *entire* JOSE toolset to properly implement my server, **and** they needed to be *flexible* enough to fulfill the entire spec that I was writing.
+
+So here's `github.com/lestrrat-go/jwx/v3`. This library is extensible, customizable, and hopefully well organized to the point that it is easy for you to slice and dice it.
+
+## Why would I use this library?
+
+There are several other major Go modules that handle JWT and related data formats,
+so why should you use this library?
+
+From a purely functional perspective, the only major difference is this:
+Whereas most other projects only deal with what they seem necessary to handle
+JWTs, this module handles the **_entire_** spectrum of JWS, JWE, JWK, and JWT.
+
+That is, if you need to not only parse JWTs, but also to control JWKs, or
+if you need to handle payloads that are NOT JWTs, you should probably consider
+using this module. You should also note that JWT is built _on top_ of those
+other technologies. You simply cannot have a complete JWT package without
+implementing the entirety of JWS/JWE/JWK, which this library does.
+
+Next, from an implementation perspective, this module differs significantly
+from others in that it tries very hard to expose only the APIs, and not the
+internal data. For example, individual JWT claims are not accessible through
+struct field lookups. You need to use one of the getter methods.
+
+This is because this library takes the stance that the end user is fully capable
+and even willing to shoot themselves on the foot when presented with a lax
+API. By making sure that users do not have access to open structs, we can protect
+users from doing silly things like creating _incomplete_ structs, or access the
+structs concurrently without any protection. This structure also allows
+us to put extra smarts in the structs, such as doing the right thing when
+you want to parse / write custom fields (this module does not require the user
+to specify alternate structs to parse objects with custom fields)
+
+In the end I think it comes down to your usage pattern, and priorities.
+Some general guidelines that come to mind are:
+
+* If you want a single library to handle everything JWx, such as using JWE, JWK, JWS, handling [auto-refreshing JWKs](https://github.com/lestrrat-go/jwx/blob/v3/docs/04-jwk.md#auto-refreshing-remote-keys), use this module.
+* If you want to honor all possible custom fields transparently, use this module.
+* If you want a standardized clean API, use this module.
+
+Otherwise, feel free to choose something else.
+
+# Contributions
+
+## Issues
+
+For bug reports and feature requests, please try to follow the issue templates as much as possible.
+For either bug reports or feature requests, failing tests are even better.
+
+## Pull Requests
+
+Please make sure to include tests that exercise the changes you made.
+
+If you are editing auto-generated files (those files with the `_gen.go` suffix, please make sure that you do the following:
+
+1. Edit the generator, not the generated files (e.g. internal/cmd/genreadfile/main.go)
+2. Run `make generate` (or `go generate`) to generate the new code
+3. Commit _both_ the generator _and_ the generated files
+
+## Discussions / Usage
+
+Please try [discussions](https://github.com/lestrrat-go/jwx/tree/v3/discussions) first.
+
+# Related Modules
+
+* [github.com/lestrrat-go/echo-middleware-jwx](https://github.com/lestrrat-go/echo-middleware-jwx) - Sample Echo middleware
+* [github.com/jwx-go/crypto-signer/gcp](https://github.com/jwx-go/crypto-signer/tree/main/gcp) - GCP KMS wrapper that implements [`crypto.Signer`](https://pkg.go.dev/crypto#Signer)
+* [github.com/jwx-go/crypto-signer/aws](https://github.com/jwx-go/crypto-signer/tree/main/aws) - AWS KMS wrapper that implements [`crypto.Signer`](https://pkg.go.dev/crypto#Signer)
+
+# Credits
+
+* Initial work on this library was generously sponsored by HDE Inc (https://www.hde.co.jp)
+* Lots of code, especially JWE was initially taken from go-jose library (https://github.com/square/go-jose)
+* Lots of individual contributors have helped this project over the years. Thank each and everyone of you very much.
+
+# Quid pro quo
+
+If you use this software to build products in a for-profit organization, we ask you to _consider_
+contributing back to FOSS in the following manner:
+
+* For every 100 employees (direct hires) of your organization, please consider contributing minimum of $1 every year to either this project, **or** another FOSS projects that this project uses. For example, for 100 employees, we ask you contribute $100 yearly; for 10,000 employees, we ask you contribute $10,000 yearly.
+* If possible, please make this information public. You do not need to disclose the amount you are contributing, but please make the information that you are contributing to particular FOSS projects public. For this project, please consider writing your name on the [Wiki](https://github.com/lestrrat-go/jwx/wiki/Users)
+
+This is _NOT_ a licensing term: you are still free to use this software according to the license it
+comes with. This clause is only a plea for people to acknowledge the work from FOSS developers whose
+work you rely on each and everyday.
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/SECURITY.md b/vendor/github.com/lestrrat-go/jwx/v3/SECURITY.md
new file mode 100644
index 0000000000..601dced5cd
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/SECURITY.md
@@ -0,0 +1,18 @@
+# Security Policy
+
+## Supported Versions
+
+Most recent two major versions will receive security updates
+
+| Version | Supported |
+| -------- | ------------------ |
+| v3.x.x | :white_check_mark: |
+| v2.x.x | :white_check_mark: |
+| < v2.0.0 | :x: |
+
+## Reporting a Vulnerability
+
+If you think you found a vulnerability, please report it via [GitHub Security Advisory](https://github.com/lestrrat-go/jwx/security/advisories/new).
+Please include explicit steps to reproduce the security issue.
+
+We will do our best to respond in a timely manner, but please also be aware that this project is maintained by a very limited number of people. Please help us with test code and such.
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/WORKSPACE b/vendor/github.com/lestrrat-go/jwx/v3/WORKSPACE
new file mode 100644
index 0000000000..c8578d8b0a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/WORKSPACE
@@ -0,0 +1,2 @@
+# Empty WORKSPACE file for bzlmod compatibility
+# All dependencies are now managed in MODULE.bazel
\ No newline at end of file
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/cert/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/cert/BUILD.bazel
new file mode 100644
index 0000000000..f308530bcf
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/cert/BUILD.bazel
@@ -0,0 +1,34 @@
+load("@rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "cert",
+ srcs = [
+ "cert.go",
+ "chain.go",
+ ],
+ importpath = "github.com/lestrrat-go/jwx/v3/cert",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//internal/base64",
+ "//internal/tokens",
+ ],
+)
+
+go_test(
+ name = "cert_test",
+ srcs = [
+ "cert_test.go",
+ "chain_test.go",
+ ],
+ deps = [
+ ":cert",
+ "//internal/jwxtest",
+ "@com_github_stretchr_testify//require",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":cert",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/cert/cert.go b/vendor/github.com/lestrrat-go/jwx/v3/cert/cert.go
new file mode 100644
index 0000000000..efefbcb417
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/cert/cert.go
@@ -0,0 +1,48 @@
+package cert
+
+import (
+ "crypto/x509"
+ stdlibb64 "encoding/base64"
+ "fmt"
+ "io"
+
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+)
+
+// Create is a wrapper around x509.CreateCertificate, but it additionally
+// encodes it in base64 so that it can be easily added to `x5c` fields
+func Create(rand io.Reader, template, parent *x509.Certificate, pub, priv any) ([]byte, error) {
+ der, err := x509.CreateCertificate(rand, template, parent, pub, priv)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to create x509 certificate: %w`, err)
+ }
+ return EncodeBase64(der)
+}
+
+// EncodeBase64 is a utility function to encode ASN.1 DER certificates
+// using base64 encoding. This operation is normally done by `pem.Encode`
+// but since PEM would include the markers (`-----BEGIN`, and the like)
+// while `x5c` fields do not need this, this function can be used to
+// shave off a few lines
+func EncodeBase64(der []byte) ([]byte, error) {
+ enc := stdlibb64.StdEncoding
+ dst := make([]byte, enc.EncodedLen(len(der)))
+ enc.Encode(dst, der)
+ return dst, nil
+}
+
+// Parse is a utility function to decode a base64 encoded
+// ASN.1 DER format certificate, and to parse the byte sequence.
+// The certificate must be in PKIX format, and it must not contain PEM markers
+func Parse(src []byte) (*x509.Certificate, error) {
+ dst, err := base64.Decode(src)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to base64 decode the certificate: %w`, err)
+ }
+
+ cert, err := x509.ParseCertificate(dst)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to parse x509 certificate: %w`, err)
+ }
+ return cert, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/cert/chain.go b/vendor/github.com/lestrrat-go/jwx/v3/cert/chain.go
new file mode 100644
index 0000000000..112274669a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/cert/chain.go
@@ -0,0 +1,80 @@
+package cert
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+)
+
+// Chain represents a certificate chain as used in the `x5c` field of
+// various objects within JOSE.
+//
+// It stores the certificates as a list of base64 encoded []byte
+// sequence. By definition these values must PKIX encoded.
+type Chain struct {
+ certificates [][]byte
+}
+
+func (cc Chain) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ buf.WriteByte(tokens.OpenSquareBracket)
+ for i, cert := range cc.certificates {
+ if i > 0 {
+ buf.WriteByte(tokens.Comma)
+ }
+ buf.WriteByte('"')
+ buf.Write(cert)
+ buf.WriteByte('"')
+ }
+ buf.WriteByte(tokens.CloseSquareBracket)
+ return buf.Bytes(), nil
+}
+
+func (cc *Chain) UnmarshalJSON(data []byte) error {
+ var tmp []string
+ if err := json.Unmarshal(data, &tmp); err != nil {
+ return fmt.Errorf(`failed to unmarshal certificate chain: %w`, err)
+ }
+
+ certs := make([][]byte, len(tmp))
+ for i, cert := range tmp {
+ certs[i] = []byte(cert)
+ }
+ cc.certificates = certs
+ return nil
+}
+
+// Get returns the n-th ASN.1 DER + base64 encoded certificate
+// stored. `false` will be returned in the second argument if
+// the corresponding index is out of range.
+func (cc *Chain) Get(index int) ([]byte, bool) {
+ if index < 0 || index >= len(cc.certificates) {
+ return nil, false
+ }
+
+ return cc.certificates[index], true
+}
+
+// Len returns the number of certificates stored in this Chain
+func (cc *Chain) Len() int {
+ return len(cc.certificates)
+}
+
+var pemStart = []byte("----- BEGIN CERTIFICATE -----")
+var pemEnd = []byte("----- END CERTIFICATE -----")
+
+func (cc *Chain) AddString(der string) error {
+ return cc.Add([]byte(der))
+}
+
+func (cc *Chain) Add(der []byte) error {
+ // We're going to be nice and remove marker lines if they
+ // give it to us
+ der = bytes.TrimPrefix(der, pemStart)
+ der = bytes.TrimSuffix(der, pemEnd)
+ der = bytes.TrimSpace(der)
+ cc.certificates = append(cc.certificates, der)
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/codecov.yml b/vendor/github.com/lestrrat-go/jwx/v3/codecov.yml
new file mode 100644
index 0000000000..130effd7a6
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/codecov.yml
@@ -0,0 +1,2 @@
+codecov:
+ allow_coverage_offsets: true
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/format.go b/vendor/github.com/lestrrat-go/jwx/v3/format.go
new file mode 100644
index 0000000000..6cb6efe7eb
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/format.go
@@ -0,0 +1,104 @@
+package jwx
+
+import (
+ "bytes"
+ "encoding/json"
+
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+)
+
+type FormatKind int
+
+// These constants describe the result from guessing the format
+// of the incoming buffer.
+const (
+ // InvalidFormat is returned when the format of the incoming buffer
+ // has been deemed conclusively invalid
+ InvalidFormat FormatKind = iota
+ // UnknownFormat is returned when GuessFormat was not able to conclusively
+ // determine the format of the
+ UnknownFormat
+ JWE
+ JWS
+ JWK
+ JWKS
+ JWT
+)
+
+type formatHint struct {
+ Payload json.RawMessage `json:"payload"` // Only in JWS
+ Signatures json.RawMessage `json:"signatures"` // Only in JWS
+ Ciphertext json.RawMessage `json:"ciphertext"` // Only in JWE
+ KeyType json.RawMessage `json:"kty"` // Only in JWK
+ Keys json.RawMessage `json:"keys"` // Only in JWKS
+ Audience json.RawMessage `json:"aud"` // Only in JWT
+}
+
+// GuessFormat is used to guess the format the given payload is in
+// using heuristics. See the type FormatKind for a full list of
+// possible types.
+//
+// This may be useful in determining your next action when you may
+// encounter a payload that could either be a JWE, JWS, or a plain JWT.
+//
+// Because JWTs are almost always JWS signed, you may be thrown off
+// if you pass what you think is a JWT payload to this function.
+// If the function is in the "Compact" format, it means it's a JWS
+// signed message, and its payload is the JWT. Therefore this function
+// will return JWS, not JWT.
+//
+// This function requires an extra parsing of the payload, and therefore
+// may be inefficient if you call it every time before parsing.
+func GuessFormat(payload []byte) FormatKind {
+ // The check against kty, keys, and aud are something this library
+ // made up. for the distinctions between JWE and JWS, we used
+ // https://datatracker.ietf.org/doc/html/rfc7516#section-9.
+ //
+ // The above RFC described several ways to distinguish between
+ // a JWE and JWS JSON, but we're only using one of them
+
+ payload = bytes.TrimSpace(payload)
+ if len(payload) <= 0 {
+ return UnknownFormat
+ }
+
+ if payload[0] != tokens.OpenCurlyBracket {
+ // Compact format. It's probably a JWS or JWE
+ sep := []byte{tokens.Period} // I want to const this :/
+
+ // Note: this counts the number of occurrences of the
+ // separator, but the RFC talks about the number of segments.
+ // number of tokens.Period == segments - 1, so that's why we have 2 and 4 here
+ switch count := bytes.Count(payload, sep); count {
+ case 2:
+ return JWS
+ case 4:
+ return JWE
+ default:
+ return InvalidFormat
+ }
+ }
+
+ // If we got here, we probably have JSON.
+ var h formatHint
+ if err := json.Unmarshal(payload, &h); err != nil {
+ return UnknownFormat
+ }
+
+ if h.Audience != nil {
+ return JWT
+ }
+ if h.KeyType != nil {
+ return JWK
+ }
+ if h.Keys != nil {
+ return JWKS
+ }
+ if h.Ciphertext != nil {
+ return JWE
+ }
+ if h.Signatures != nil && h.Payload != nil {
+ return JWS
+ }
+ return UnknownFormat
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/formatkind_string_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/formatkind_string_gen.go
new file mode 100644
index 0000000000..38abd1bc47
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/formatkind_string_gen.go
@@ -0,0 +1,29 @@
+// Code generated by "stringer -type=FormatKind"; DO NOT EDIT.
+
+package jwx
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[InvalidFormat-0]
+ _ = x[UnknownFormat-1]
+ _ = x[JWE-2]
+ _ = x[JWS-3]
+ _ = x[JWK-4]
+ _ = x[JWKS-5]
+ _ = x[JWT-6]
+}
+
+const _FormatKind_name = "InvalidFormatUnknownFormatJWEJWSJWKJWKSJWT"
+
+var _FormatKind_index = [...]uint8{0, 13, 26, 29, 32, 35, 39, 42}
+
+func (i FormatKind) String() string {
+ if i < 0 || i >= FormatKind(len(_FormatKind_index)-1) {
+ return "FormatKind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _FormatKind_name[_FormatKind_index[i]:_FormatKind_index[i+1]]
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/base64/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/internal/base64/BUILD.bazel
new file mode 100644
index 0000000000..57da5179f3
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/base64/BUILD.bazel
@@ -0,0 +1,21 @@
+load("@rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "base64",
+ srcs = ["base64.go"],
+ importpath = "github.com/lestrrat-go/jwx/v3/internal/base64",
+ visibility = ["//:__subpackages__"],
+)
+
+go_test(
+ name = "base64_test",
+ srcs = ["base64_test.go"],
+ embed = [":base64"],
+ deps = ["@com_github_stretchr_testify//require"],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":base64",
+ visibility = ["//:__subpackages__"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/base64/asmbase64.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/base64/asmbase64.go
new file mode 100644
index 0000000000..6e83ecc4a5
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/base64/asmbase64.go
@@ -0,0 +1,51 @@
+//go:build jwx_asmbase64
+
+package base64
+
+import (
+ "fmt"
+ "slices"
+
+ asmbase64 "github.com/segmentio/asm/base64"
+)
+
+func init() {
+ SetEncoder(asmEncoder{asmbase64.RawURLEncoding})
+ SetDecoder(asmDecoder{})
+}
+
+type asmEncoder struct {
+ *asmbase64.Encoding
+}
+
+func (e asmEncoder) AppendEncode(dst, src []byte) []byte {
+ n := e.Encoding.EncodedLen(len(src))
+ dst = slices.Grow(dst, n)
+ e.Encoding.Encode(dst[len(dst):][:n], src)
+ return dst[:len(dst)+n]
+}
+
+type asmDecoder struct{}
+
+func (d asmDecoder) Decode(src []byte) ([]byte, error) {
+ var enc *asmbase64.Encoding
+ switch Guess(src) {
+ case Std:
+ enc = asmbase64.StdEncoding
+ case RawStd:
+ enc = asmbase64.RawStdEncoding
+ case URL:
+ enc = asmbase64.URLEncoding
+ case RawURL:
+ enc = asmbase64.RawURLEncoding
+ default:
+ return nil, fmt.Errorf(`invalid encoding`)
+ }
+
+ dst := make([]byte, enc.DecodedLen(len(src)))
+ n, err := enc.Decode(dst, src)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to decode source: %w`, err)
+ }
+ return dst[:n], nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/base64/base64.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/base64/base64.go
new file mode 100644
index 0000000000..5ed8e35006
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/base64/base64.go
@@ -0,0 +1,139 @@
+package base64
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/binary"
+ "fmt"
+ "sync"
+)
+
+type Decoder interface {
+ Decode([]byte) ([]byte, error)
+}
+
+type Encoder interface {
+ Encode([]byte, []byte)
+ EncodedLen(int) int
+ EncodeToString([]byte) string
+ AppendEncode([]byte, []byte) []byte
+}
+
+var muEncoder sync.RWMutex
+var encoder Encoder = base64.RawURLEncoding
+var muDecoder sync.RWMutex
+var decoder Decoder = defaultDecoder{}
+
+func SetEncoder(enc Encoder) {
+ muEncoder.Lock()
+ defer muEncoder.Unlock()
+ encoder = enc
+}
+
+func getEncoder() Encoder {
+ muEncoder.RLock()
+ defer muEncoder.RUnlock()
+ return encoder
+}
+
+func DefaultEncoder() Encoder {
+ return getEncoder()
+}
+
+func SetDecoder(dec Decoder) {
+ muDecoder.Lock()
+ defer muDecoder.Unlock()
+ decoder = dec
+}
+
+func getDecoder() Decoder {
+ muDecoder.RLock()
+ defer muDecoder.RUnlock()
+ return decoder
+}
+
+func Encode(src []byte) []byte {
+ encoder := getEncoder()
+ dst := make([]byte, encoder.EncodedLen(len(src)))
+ encoder.Encode(dst, src)
+ return dst
+}
+
+func EncodeToString(src []byte) string {
+ return getEncoder().EncodeToString(src)
+}
+
+func EncodeUint64ToString(v uint64) string {
+ data := make([]byte, 8)
+ binary.BigEndian.PutUint64(data, v)
+
+ i := 0
+ for ; i < len(data); i++ {
+ if data[i] != 0x0 {
+ break
+ }
+ }
+
+ return EncodeToString(data[i:])
+}
+
+const (
+ InvalidEncoding = iota
+ Std
+ URL
+ RawStd
+ RawURL
+)
+
+func Guess(src []byte) int {
+ var isRaw = !bytes.HasSuffix(src, []byte{'='})
+ var isURL = !bytes.ContainsAny(src, "+/")
+ switch {
+ case isRaw && isURL:
+ return RawURL
+ case isURL:
+ return URL
+ case isRaw:
+ return RawStd
+ default:
+ return Std
+ }
+}
+
+// defaultDecoder is a Decoder that detects the encoding of the source and
+// decodes it accordingly. This shouldn't really be required per the spec, but
+// it exist because we have seen in the wild JWTs that are encoded using
+// various versions of the base64 encoding.
+type defaultDecoder struct{}
+
+func (defaultDecoder) Decode(src []byte) ([]byte, error) {
+ var enc *base64.Encoding
+
+ switch Guess(src) {
+ case RawURL:
+ enc = base64.RawURLEncoding
+ case URL:
+ enc = base64.URLEncoding
+ case RawStd:
+ enc = base64.RawStdEncoding
+ case Std:
+ enc = base64.StdEncoding
+ default:
+ return nil, fmt.Errorf(`invalid encoding`)
+ }
+
+ dst := make([]byte, enc.DecodedLen(len(src)))
+ n, err := enc.Decode(dst, src)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to decode source: %w`, err)
+ }
+ return dst[:n], nil
+}
+
+func Decode(src []byte) ([]byte, error) {
+ return getDecoder().Decode(src)
+}
+
+func DecodeString(src string) ([]byte, error) {
+ return getDecoder().Decode([]byte(src))
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/ecutil/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/internal/ecutil/BUILD.bazel
new file mode 100644
index 0000000000..3ccdcf372a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/ecutil/BUILD.bazel
@@ -0,0 +1,14 @@
+load("@rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "ecutil",
+ srcs = ["ecutil.go"],
+ importpath = "github.com/lestrrat-go/jwx/v3/internal/ecutil",
+ visibility = ["//:__subpackages__"],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":ecutil",
+ visibility = ["//:__subpackages__"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/ecutil/ecutil.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/ecutil/ecutil.go
new file mode 100644
index 0000000000..cf0bd4ac48
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/ecutil/ecutil.go
@@ -0,0 +1,76 @@
+// Package ecutil defines tools that help with elliptic curve related
+// computation
+package ecutil
+
+import (
+ "crypto/elliptic"
+ "math/big"
+ "sync"
+)
+
+const (
+ // size of buffer that needs to be allocated for EC521 curve
+ ec521BufferSize = 66 // (521 / 8) + 1
+)
+
+var ecpointBufferPool = sync.Pool{
+ New: func() any {
+ // In most cases the curve bit size will be less than this length
+ // so allocate the maximum, and keep reusing
+ buf := make([]byte, 0, ec521BufferSize)
+ return &buf
+ },
+}
+
+func getCrvFixedBuffer(size int) []byte {
+ //nolint:forcetypeassert
+ buf := *(ecpointBufferPool.Get().(*[]byte))
+ if size > ec521BufferSize && cap(buf) < size {
+ buf = append(buf, make([]byte, size-cap(buf))...)
+ }
+ return buf[:size]
+}
+
+// ReleaseECPointBuffer releases the []byte buffer allocated.
+func ReleaseECPointBuffer(buf []byte) {
+ buf = buf[:cap(buf)]
+ buf[0] = 0x0
+ for i := 1; i < len(buf); i *= 2 {
+ copy(buf[i:], buf[:i])
+ }
+ buf = buf[:0]
+ ecpointBufferPool.Put(&buf)
+}
+
+func CalculateKeySize(crv elliptic.Curve) int {
+ // We need to create a buffer that fits the entire curve.
+ // If the curve size is 66, that fits in 9 bytes. If the curve
+ // size is 64, it fits in 8 bytes.
+ bits := crv.Params().BitSize
+
+ // For most common cases we know before hand what the byte length
+ // is going to be. optimize
+ var inBytes int
+ switch bits {
+ case 224, 256, 384: // TODO: use constant?
+ inBytes = bits / 8
+ case 521:
+ inBytes = ec521BufferSize
+ default:
+ inBytes = bits / 8
+ if (bits % 8) != 0 {
+ inBytes++
+ }
+ }
+
+ return inBytes
+}
+
+// AllocECPointBuffer allocates a buffer for the given point in the given
+// curve. This buffer should be released using the ReleaseECPointBuffer
+// function.
+func AllocECPointBuffer(v *big.Int, crv elliptic.Curve) []byte {
+ buf := getCrvFixedBuffer(CalculateKeySize(crv))
+ v.FillBytes(buf)
+ return buf
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/json/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/BUILD.bazel
new file mode 100644
index 0000000000..4e2dbe12b7
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/BUILD.bazel
@@ -0,0 +1,19 @@
+load("@rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "json",
+ srcs = [
+ "json.go",
+ "registry.go",
+ "stdlib.go",
+ ],
+ importpath = "github.com/lestrrat-go/jwx/v3/internal/json",
+ visibility = ["//:__subpackages__"],
+ deps = ["//internal/base64"],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":json",
+ visibility = ["//:__subpackages__"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/json/goccy.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/goccy.go
new file mode 100644
index 0000000000..e70a3c1edc
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/goccy.go
@@ -0,0 +1,49 @@
+//go:build jwx_goccy
+// +build jwx_goccy
+
+package json
+
+import (
+ "io"
+
+ "github.com/goccy/go-json"
+)
+
+type Decoder = json.Decoder
+type Delim = json.Delim
+type Encoder = json.Encoder
+type Marshaler = json.Marshaler
+type Number = json.Number
+type RawMessage = json.RawMessage
+type Unmarshaler = json.Unmarshaler
+
+func Engine() string {
+ return "github.com/goccy/go-json"
+}
+
+// NewDecoder respects the values specified in DecoderSettings,
+// and creates a Decoder that has certain features turned on/off
+func NewDecoder(r io.Reader) *json.Decoder {
+ dec := json.NewDecoder(r)
+
+ if UseNumber() {
+ dec.UseNumber()
+ }
+
+ return dec
+}
+
+// NewEncoder is just a proxy for "encoding/json".NewEncoder
+func NewEncoder(w io.Writer) *json.Encoder {
+ return json.NewEncoder(w)
+}
+
+// Marshal is just a proxy for "encoding/json".Marshal
+func Marshal(v any) ([]byte, error) {
+ return json.Marshal(v)
+}
+
+// MarshalIndent is just a proxy for "encoding/json".MarshalIndent
+func MarshalIndent(v any, prefix, indent string) ([]byte, error) {
+ return json.MarshalIndent(v, prefix, indent)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/json/json.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/json.go
new file mode 100644
index 0000000000..c1917ef27a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/json.go
@@ -0,0 +1,127 @@
+package json
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "sync/atomic"
+
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+)
+
+var useNumber uint32 // TODO: at some point, change to atomic.Bool
+
+func UseNumber() bool {
+ return atomic.LoadUint32(&useNumber) == 1
+}
+
+// Sets the global configuration for json decoding
+func DecoderSettings(inUseNumber bool) {
+ var val uint32
+ if inUseNumber {
+ val = 1
+ }
+ atomic.StoreUint32(&useNumber, val)
+}
+
+// Unmarshal respects the values specified in DecoderSettings,
+// and uses a Decoder that has certain features turned on/off
+func Unmarshal(b []byte, v any) error {
+ dec := NewDecoder(bytes.NewReader(b))
+ return dec.Decode(v)
+}
+
+func AssignNextBytesToken(dst *[]byte, dec *Decoder) error {
+ var val string
+ if err := dec.Decode(&val); err != nil {
+ return fmt.Errorf(`error reading next value: %w`, err)
+ }
+
+ buf, err := base64.DecodeString(val)
+ if err != nil {
+ return fmt.Errorf(`expected base64 encoded []byte (%T)`, val)
+ }
+ *dst = buf
+ return nil
+}
+
+func ReadNextStringToken(dec *Decoder) (string, error) {
+ var val string
+ if err := dec.Decode(&val); err != nil {
+ return "", fmt.Errorf(`error reading next value: %w`, err)
+ }
+ return val, nil
+}
+
+func AssignNextStringToken(dst **string, dec *Decoder) error {
+ val, err := ReadNextStringToken(dec)
+ if err != nil {
+ return err
+ }
+ *dst = &val
+ return nil
+}
+
+// FlattenAudience is a flag to specify if we should flatten the "aud"
+// entry to a string when there's only one entry.
+// In jwx < 1.1.8 we just dumped everything as an array of strings,
+// but apparently AWS Cognito doesn't handle this well.
+//
+// So now we have the ability to dump "aud" as a string if there's
+// only one entry, but we need to retain the old behavior so that
+// we don't accidentally break somebody else's code. (e.g. messing
+// up how signatures are calculated)
+var FlattenAudience uint32
+
+func MarshalAudience(aud []string, flatten bool) ([]byte, error) {
+ var val any
+ if len(aud) == 1 && flatten {
+ val = aud[0]
+ } else {
+ val = aud
+ }
+ return Marshal(val)
+}
+
+func EncodeAudience(enc *Encoder, aud []string, flatten bool) error {
+ var val any
+ if len(aud) == 1 && flatten {
+ val = aud[0]
+ } else {
+ val = aud
+ }
+ return enc.Encode(val)
+}
+
+// DecodeCtx is an interface for objects that needs that extra something
+// when decoding JSON into an object.
+type DecodeCtx interface {
+ Registry() *Registry
+}
+
+// DecodeCtxContainer is used to differentiate objects that can carry extra
+// decoding hints and those who can't.
+type DecodeCtxContainer interface {
+ DecodeCtx() DecodeCtx
+ SetDecodeCtx(DecodeCtx)
+}
+
+// stock decodeCtx. should cover 80% of the cases
+type decodeCtx struct {
+ registry *Registry
+}
+
+func NewDecodeCtx(r *Registry) DecodeCtx {
+ return &decodeCtx{registry: r}
+}
+
+func (dc *decodeCtx) Registry() *Registry {
+ return dc.registry
+}
+
+func Dump(v any) {
+ enc := NewEncoder(os.Stdout)
+ enc.SetIndent("", " ")
+ //nolint:errchkjson
+ _ = enc.Encode(v)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/json/registry.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/registry.go
new file mode 100644
index 0000000000..04a6a4c4a5
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/registry.go
@@ -0,0 +1,90 @@
+package json
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+)
+
+// CustomDecoder is the interface we expect from RegisterCustomField in jws, jwe, jwk, and jwt packages.
+type CustomDecoder interface {
+ // Decode takes a JSON encoded byte slice and returns the desired
+ // decoded value,which will be used as the value for that field
+ // registered through RegisterCustomField
+ Decode([]byte) (any, error)
+}
+
+// CustomDecodeFunc is a stateless, function-based implementation of CustomDecoder
+type CustomDecodeFunc func([]byte) (any, error)
+
+func (fn CustomDecodeFunc) Decode(data []byte) (any, error) {
+ return fn(data)
+}
+
+type objectTypeDecoder struct {
+ typ reflect.Type
+ name string
+}
+
+func (dec *objectTypeDecoder) Decode(data []byte) (any, error) {
+ ptr := reflect.New(dec.typ).Interface()
+ if err := Unmarshal(data, ptr); err != nil {
+ return nil, fmt.Errorf(`failed to decode field %s: %w`, dec.name, err)
+ }
+ return reflect.ValueOf(ptr).Elem().Interface(), nil
+}
+
+type Registry struct {
+ mu *sync.RWMutex
+ ctrs map[string]CustomDecoder
+}
+
+func NewRegistry() *Registry {
+ return &Registry{
+ mu: &sync.RWMutex{},
+ ctrs: make(map[string]CustomDecoder),
+ }
+}
+
+func (r *Registry) Register(name string, object any) {
+ if object == nil {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ delete(r.ctrs, name)
+ return
+ }
+
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if ctr, ok := object.(CustomDecoder); ok {
+ r.ctrs[name] = ctr
+ } else {
+ r.ctrs[name] = &objectTypeDecoder{
+ typ: reflect.TypeOf(object),
+ name: name,
+ }
+ }
+}
+
+func (r *Registry) Decode(dec *Decoder, name string) (any, error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if ctr, ok := r.ctrs[name]; ok {
+ var raw RawMessage
+ if err := dec.Decode(&raw); err != nil {
+ return nil, fmt.Errorf(`failed to decode field %s: %w`, name, err)
+ }
+ v, err := ctr.Decode([]byte(raw))
+ if err != nil {
+ return nil, fmt.Errorf(`failed to decode field %s: %w`, name, err)
+ }
+ return v, nil
+ }
+
+ var decoded any
+ if err := dec.Decode(&decoded); err != nil {
+ return nil, fmt.Errorf(`failed to decode field %s: %w`, name, err)
+ }
+ return decoded, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/json/stdlib.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/stdlib.go
new file mode 100644
index 0000000000..6f416ec89a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/stdlib.go
@@ -0,0 +1,47 @@
+//go:build !jwx_goccy
+// +build !jwx_goccy
+
+package json
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type Decoder = json.Decoder
+type Delim = json.Delim
+type Encoder = json.Encoder
+type Marshaler = json.Marshaler
+type Number = json.Number
+type RawMessage = json.RawMessage
+type Unmarshaler = json.Unmarshaler
+
+func Engine() string {
+ return "encoding/json"
+}
+
+// NewDecoder respects the values specified in DecoderSettings,
+// and creates a Decoder that has certain features turned on/off
+func NewDecoder(r io.Reader) *json.Decoder {
+ dec := json.NewDecoder(r)
+
+ if UseNumber() {
+ dec.UseNumber()
+ }
+
+ return dec
+}
+
+func NewEncoder(w io.Writer) *json.Encoder {
+ return json.NewEncoder(w)
+}
+
+// Marshal is just a proxy for "encoding/json".Marshal
+func Marshal(v any) ([]byte, error) {
+ return json.Marshal(v)
+}
+
+// MarshalIndent is just a proxy for "encoding/json".MarshalIndent
+func MarshalIndent(v any, prefix, indent string) ([]byte, error) {
+ return json.MarshalIndent(v, prefix, indent)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/jwxio/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/internal/jwxio/BUILD.bazel
new file mode 100644
index 0000000000..c70c4d2871
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/jwxio/BUILD.bazel
@@ -0,0 +1,8 @@
+load("@rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "jwxio",
+ srcs = ["jwxio.go"],
+ importpath = "github.com/lestrrat-go/jwx/v3/internal/jwxio",
+ visibility = ["//:__subpackages__"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/jwxio/jwxio.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/jwxio/jwxio.go
new file mode 100644
index 0000000000..8396417a9d
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/jwxio/jwxio.go
@@ -0,0 +1,29 @@
+package jwxio
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "strings"
+)
+
+var errNonFiniteSource = errors.New(`cannot read from non-finite source`)
+
+func NonFiniteSourceError() error {
+ return errNonFiniteSource
+}
+
+// ReadAllFromFiniteSource reads all data from a io.Reader _if_ it comes from a
+// finite source.
+func ReadAllFromFiniteSource(rdr io.Reader) ([]byte, error) {
+ switch rdr.(type) {
+ case *bytes.Reader, *bytes.Buffer, *strings.Reader:
+ data, err := io.ReadAll(rdr)
+ if err != nil {
+ return nil, err
+ }
+ return data, nil
+ default:
+ return nil, errNonFiniteSource
+ }
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/BUILD.bazel
new file mode 100644
index 0000000000..d46d2f3814
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/BUILD.bazel
@@ -0,0 +1,31 @@
+load("@rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "keyconv",
+ srcs = ["keyconv.go"],
+ importpath = "github.com/lestrrat-go/jwx/v3/internal/keyconv",
+ visibility = ["//:__subpackages__"],
+ deps = [
+ "//jwk",
+ "@com_github_lestrrat_go_blackmagic//:blackmagic",
+ "@org_golang_x_crypto//ed25519",
+ ],
+)
+
+go_test(
+ name = "keyconv_test",
+ srcs = ["keyconv_test.go"],
+ deps = [
+ ":keyconv",
+ "//internal/jwxtest",
+ "//jwa",
+ "//jwk",
+ "@com_github_stretchr_testify//require",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":keyconv",
+ visibility = ["//:__subpackages__"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/keyconv.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/keyconv.go
new file mode 100644
index 0000000000..751fd8f05a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/keyconv.go
@@ -0,0 +1,354 @@
+package keyconv
+
+import (
+ "crypto"
+ "crypto/ecdh"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "fmt"
+ "math/big"
+
+ "github.com/lestrrat-go/blackmagic"
+ "github.com/lestrrat-go/jwx/v3/jwk"
+)
+
+// RSAPrivateKey assigns src to dst.
+// `dst` should be a pointer to a rsa.PrivateKey.
+// `src` may be rsa.PrivateKey, *rsa.PrivateKey, or a jwk.Key
+func RSAPrivateKey(dst, src any) error {
+ if jwkKey, ok := src.(jwk.Key); ok {
+ var raw rsa.PrivateKey
+ if err := jwk.Export(jwkKey, &raw); err != nil {
+ return fmt.Errorf(`failed to produce rsa.PrivateKey from %T: %w`, src, err)
+ }
+ src = &raw
+ }
+
+ var ptr *rsa.PrivateKey
+ switch src := src.(type) {
+ case rsa.PrivateKey:
+ ptr = &src
+ case *rsa.PrivateKey:
+ ptr = src
+ default:
+ return fmt.Errorf(`keyconv: expected rsa.PrivateKey or *rsa.PrivateKey, got %T`, src)
+ }
+
+ return blackmagic.AssignIfCompatible(dst, ptr)
+}
+
+// RSAPublicKey assigns src to dst
+// `dst` should be a pointer to a non-zero rsa.PublicKey.
+// `src` may be rsa.PublicKey, *rsa.PublicKey, or a jwk.Key
+func RSAPublicKey(dst, src any) error {
+ if jwkKey, ok := src.(jwk.Key); ok {
+ pk, err := jwk.PublicRawKeyOf(jwkKey)
+ if err != nil {
+ return fmt.Errorf(`keyconv: failed to produce public key from %T: %w`, src, err)
+ }
+ src = pk
+ }
+
+ var ptr *rsa.PublicKey
+ switch src := src.(type) {
+ case rsa.PrivateKey:
+ ptr = &src.PublicKey
+ case *rsa.PrivateKey:
+ ptr = &src.PublicKey
+ case rsa.PublicKey:
+ ptr = &src
+ case *rsa.PublicKey:
+ ptr = src
+ default:
+ return fmt.Errorf(`keyconv: expected rsa.PublicKey/rsa.PrivateKey or *rsa.PublicKey/*rsa.PrivateKey, got %T`, src)
+ }
+
+ return blackmagic.AssignIfCompatible(dst, ptr)
+}
+
+// ECDSAPrivateKey assigns src to dst, converting its type from a
+// non-pointer to a pointer
+func ECDSAPrivateKey(dst, src any) error {
+ if jwkKey, ok := src.(jwk.Key); ok {
+ var raw ecdsa.PrivateKey
+ if err := jwk.Export(jwkKey, &raw); err != nil {
+ return fmt.Errorf(`keyconv: failed to produce ecdsa.PrivateKey from %T: %w`, src, err)
+ }
+ src = &raw
+ }
+
+ var ptr *ecdsa.PrivateKey
+ switch src := src.(type) {
+ case ecdsa.PrivateKey:
+ ptr = &src
+ case *ecdsa.PrivateKey:
+ ptr = src
+ default:
+ return fmt.Errorf(`keyconv: expected ecdsa.PrivateKey or *ecdsa.PrivateKey, got %T`, src)
+ }
+ return blackmagic.AssignIfCompatible(dst, ptr)
+}
+
+// ECDSAPublicKey assigns src to dst, converting its type from a
+// non-pointer to a pointer
+func ECDSAPublicKey(dst, src any) error {
+ if jwkKey, ok := src.(jwk.Key); ok {
+ pk, err := jwk.PublicRawKeyOf(jwkKey)
+ if err != nil {
+ return fmt.Errorf(`keyconv: failed to produce public key from %T: %w`, src, err)
+ }
+ src = pk
+ }
+
+ var ptr *ecdsa.PublicKey
+ switch src := src.(type) {
+ case ecdsa.PrivateKey:
+ ptr = &src.PublicKey
+ case *ecdsa.PrivateKey:
+ ptr = &src.PublicKey
+ case ecdsa.PublicKey:
+ ptr = &src
+ case *ecdsa.PublicKey:
+ ptr = src
+ default:
+ return fmt.Errorf(`keyconv: expected ecdsa.PublicKey/ecdsa.PrivateKey or *ecdsa.PublicKey/*ecdsa.PrivateKey, got %T`, src)
+ }
+ return blackmagic.AssignIfCompatible(dst, ptr)
+}
+
+func ByteSliceKey(dst, src any) error {
+ if jwkKey, ok := src.(jwk.Key); ok {
+ var raw []byte
+ if err := jwk.Export(jwkKey, &raw); err != nil {
+ return fmt.Errorf(`keyconv: failed to produce []byte from %T: %w`, src, err)
+ }
+ src = raw
+ }
+
+ if _, ok := src.([]byte); !ok {
+ return fmt.Errorf(`keyconv: expected []byte, got %T`, src)
+ }
+ return blackmagic.AssignIfCompatible(dst, src)
+}
+
+func Ed25519PrivateKey(dst, src any) error {
+ if jwkKey, ok := src.(jwk.Key); ok {
+ var raw ed25519.PrivateKey
+ if err := jwk.Export(jwkKey, &raw); err != nil {
+ return fmt.Errorf(`failed to produce ed25519.PrivateKey from %T: %w`, src, err)
+ }
+ src = &raw
+ }
+
+ var ptr *ed25519.PrivateKey
+ switch src := src.(type) {
+ case ed25519.PrivateKey:
+ ptr = &src
+ case *ed25519.PrivateKey:
+ ptr = src
+ default:
+ return fmt.Errorf(`expected ed25519.PrivateKey or *ed25519.PrivateKey, got %T`, src)
+ }
+ return blackmagic.AssignIfCompatible(dst, ptr)
+}
+
+func Ed25519PublicKey(dst, src any) error {
+ if jwkKey, ok := src.(jwk.Key); ok {
+ pk, err := jwk.PublicRawKeyOf(jwkKey)
+ if err != nil {
+ return fmt.Errorf(`keyconv: failed to produce public key from %T: %w`, src, err)
+ }
+ src = pk
+ }
+
+ switch key := src.(type) {
+ case ed25519.PrivateKey:
+ src = key.Public()
+ case *ed25519.PrivateKey:
+ src = key.Public()
+ }
+
+ var ptr *ed25519.PublicKey
+ switch src := src.(type) {
+ case ed25519.PublicKey:
+ ptr = &src
+ case *ed25519.PublicKey:
+ ptr = src
+ case *crypto.PublicKey:
+ tmp, ok := (*src).(ed25519.PublicKey)
+ if !ok {
+ return fmt.Errorf(`failed to retrieve ed25519.PublicKey out of *crypto.PublicKey`)
+ }
+ ptr = &tmp
+ case crypto.PublicKey:
+ tmp, ok := src.(ed25519.PublicKey)
+ if !ok {
+ return fmt.Errorf(`failed to retrieve ed25519.PublicKey out of crypto.PublicKey`)
+ }
+ ptr = &tmp
+ default:
+ return fmt.Errorf(`expected ed25519.PublicKey or *ed25519.PublicKey, got %T`, src)
+ }
+ return blackmagic.AssignIfCompatible(dst, ptr)
+}
+
+type privECDHer interface {
+ ECDH() (*ecdh.PrivateKey, error)
+}
+
+func ECDHPrivateKey(dst, src any) error {
+ var privECDH *ecdh.PrivateKey
+ if jwkKey, ok := src.(jwk.Key); ok {
+ var rawECDH ecdh.PrivateKey
+ if err := jwk.Export(jwkKey, &rawECDH); err == nil {
+ privECDH = &rawECDH
+ } else {
+ // If we cannot export the key as an ecdh.PrivateKey, we try to export it as an ecdsa.PrivateKey
+ var rawECDSA ecdsa.PrivateKey
+ if err := jwk.Export(jwkKey, &rawECDSA); err != nil {
+ return fmt.Errorf(`keyconv: failed to produce ecdh.PrivateKey or ecdsa.PrivateKey from %T: %w`, src, err)
+ }
+ src = &rawECDSA
+ }
+ }
+
+ switch src := src.(type) {
+ case ecdh.PrivateKey:
+ privECDH = &src
+ case *ecdh.PrivateKey:
+ privECDH = src
+ case privECDHer:
+ priv, err := src.ECDH()
+ if err != nil {
+ return fmt.Errorf(`keyconv: failed to convert ecdsa.PrivateKey to ecdh.PrivateKey: %w`, err)
+ }
+ privECDH = priv
+ }
+
+ return blackmagic.AssignIfCompatible(dst, privECDH)
+}
+
+type pubECDHer interface {
+ ECDH() (*ecdh.PublicKey, error)
+}
+
+func ECDHPublicKey(dst, src any) error {
+ var pubECDH *ecdh.PublicKey
+ if jwkKey, ok := src.(jwk.Key); ok {
+ var rawECDH ecdh.PublicKey
+ if err := jwk.Export(jwkKey, &rawECDH); err == nil {
+ pubECDH = &rawECDH
+ } else {
+ // If we cannot export the key as an ecdh.PublicKey, we try to export it as an ecdsa.PublicKey
+ var rawECDSA ecdsa.PublicKey
+ if err := jwk.Export(jwkKey, &rawECDSA); err != nil {
+ return fmt.Errorf(`keyconv: failed to produce ecdh.PublicKey or ecdsa.PublicKey from %T: %w`, src, err)
+ }
+ src = &rawECDSA
+ }
+ }
+
+ switch src := src.(type) {
+ case ecdh.PublicKey:
+ pubECDH = &src
+ case *ecdh.PublicKey:
+ pubECDH = src
+ case pubECDHer:
+ pub, err := src.ECDH()
+ if err != nil {
+ return fmt.Errorf(`keyconv: failed to convert ecdsa.PublicKey to ecdh.PublicKey: %w`, err)
+ }
+ pubECDH = pub
+ }
+
+ return blackmagic.AssignIfCompatible(dst, pubECDH)
+}
+
+// ecdhCurveToElliptic maps ECDH curves to elliptic curves
+func ecdhCurveToElliptic(ecdhCurve ecdh.Curve) (elliptic.Curve, error) {
+ switch ecdhCurve {
+ case ecdh.P256():
+ return elliptic.P256(), nil
+ case ecdh.P384():
+ return elliptic.P384(), nil
+ case ecdh.P521():
+ return elliptic.P521(), nil
+ default:
+ return nil, fmt.Errorf(`keyconv: unsupported ECDH curve: %v`, ecdhCurve)
+ }
+}
+
+// ecdhPublicKeyToECDSA converts an ECDH public key to an ECDSA public key
+func ecdhPublicKeyToECDSA(ecdhPubKey *ecdh.PublicKey) (*ecdsa.PublicKey, error) {
+ curve, err := ecdhCurveToElliptic(ecdhPubKey.Curve())
+ if err != nil {
+ return nil, err
+ }
+
+ pubBytes := ecdhPubKey.Bytes()
+
+ // Parse the uncompressed point format (0x04 prefix + X + Y coordinates)
+ if len(pubBytes) == 0 || pubBytes[0] != 0x04 {
+ return nil, fmt.Errorf(`keyconv: invalid ECDH public key format`)
+ }
+
+ keyLen := (len(pubBytes) - 1) / 2
+ if len(pubBytes) != 1+2*keyLen {
+ return nil, fmt.Errorf(`keyconv: invalid ECDH public key length`)
+ }
+
+ x := new(big.Int).SetBytes(pubBytes[1 : 1+keyLen])
+ y := new(big.Int).SetBytes(pubBytes[1+keyLen:])
+
+ return &ecdsa.PublicKey{
+ Curve: curve,
+ X: x,
+ Y: y,
+ }, nil
+}
+
+func ECDHToECDSA(dst, src any) error {
+ // convert ecdh.PublicKey to ecdsa.PublicKey, ecdh.PrivateKey to ecdsa.PrivateKey
+
+ // First, handle value types by converting to pointers
+ switch s := src.(type) {
+ case ecdh.PrivateKey:
+ src = &s
+ case ecdh.PublicKey:
+ src = &s
+ }
+
+ var privBytes []byte
+ var pubkey *ecdh.PublicKey
+ // Now handle the actual conversion with pointer types
+ switch src := src.(type) {
+ case *ecdh.PrivateKey:
+ pubkey = src.PublicKey()
+ privBytes = src.Bytes()
+ case *ecdh.PublicKey:
+ pubkey = src
+ default:
+ return fmt.Errorf(`keyconv: expected ecdh.PrivateKey, *ecdh.PrivateKey, ecdh.PublicKey, or *ecdh.PublicKey, got %T`, src)
+ }
+
+ // convert the public key
+ ecdsaPubKey, err := ecdhPublicKeyToECDSA(pubkey)
+ if err != nil {
+ return fmt.Errorf(`keyconv.ECDHToECDSA: failed to convert ECDH public key to ECDSA public key: %w`, err)
+ }
+
+ // return if we were being asked to convert *ecdh.PublicKey
+ if privBytes == nil {
+ return blackmagic.AssignIfCompatible(dst, ecdsaPubKey)
+ }
+
+ // Then create the private key with the public key embedded
+ ecdsaPrivKey := &ecdsa.PrivateKey{
+ D: new(big.Int).SetBytes(privBytes),
+ PublicKey: *ecdsaPubKey,
+ }
+
+ return blackmagic.AssignIfCompatible(dst, ecdsaPrivKey)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/BUILD.bazel
new file mode 100644
index 0000000000..cebc269330
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/BUILD.bazel
@@ -0,0 +1,32 @@
+load("@rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "pool",
+ srcs = [
+ "big_int.go",
+ "byte_slice.go",
+ "bytes_buffer.go",
+ "error_slice.go",
+ "key_to_error_map.go",
+ "pool.go",
+ ],
+ importpath = "github.com/lestrrat-go/jwx/v3/internal/pool",
+ visibility = ["//:__subpackages__"],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":pool",
+ visibility = ["//:__subpackages__"],
+)
+
+go_test(
+ name = "pool_test",
+ srcs = [
+ "byte_slice_test.go",
+ ],
+ deps = [
+ ":pool",
+ "@com_github_stretchr_testify//require",
+ ],
+)
\ No newline at end of file
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/big_int.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/big_int.go
new file mode 100644
index 0000000000..57c446d4d2
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/big_int.go
@@ -0,0 +1,19 @@
+package pool
+
+import "math/big"
+
+var bigIntPool = New[*big.Int](allocBigInt, freeBigInt)
+
+func allocBigInt() *big.Int {
+ return &big.Int{}
+}
+
+func freeBigInt(b *big.Int) *big.Int {
+ b.SetInt64(0) // Reset the value to zero
+ return b
+}
+
+// BigInt returns a pool of *big.Int instances.
+func BigInt() *Pool[*big.Int] {
+ return bigIntPool
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/byte_slice.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/byte_slice.go
new file mode 100644
index 0000000000..46f1028343
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/byte_slice.go
@@ -0,0 +1,19 @@
+package pool
+
+var byteSlicePool = SlicePool[byte]{
+ pool: New[[]byte](allocByteSlice, freeByteSlice),
+}
+
+func allocByteSlice() []byte {
+ return make([]byte, 0, 64) // Default capacity of 64 bytes
+}
+
+func freeByteSlice(b []byte) []byte {
+ clear(b)
+ b = b[:0] // Reset the slice to zero length
+ return b
+}
+
+func ByteSlice() SlicePool[byte] {
+ return byteSlicePool
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/bytes_buffer.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/bytes_buffer.go
new file mode 100644
index 0000000000..a877f73ff8
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/bytes_buffer.go
@@ -0,0 +1,18 @@
+package pool
+
+import "bytes"
+
+var bytesBufferPool = New[*bytes.Buffer](allocBytesBuffer, freeBytesBuffer)
+
+func allocBytesBuffer() *bytes.Buffer {
+ return &bytes.Buffer{}
+}
+
+func freeBytesBuffer(b *bytes.Buffer) *bytes.Buffer {
+ b.Reset()
+ return b
+}
+
+func BytesBuffer() *Pool[*bytes.Buffer] {
+ return bytesBufferPool
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/error_slice.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/error_slice.go
new file mode 100644
index 0000000000..4f1675c1c0
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/error_slice.go
@@ -0,0 +1,16 @@
+package pool
+
+var errorSlicePool = New[[]error](allocErrorSlice, freeErrorSlice)
+
+func allocErrorSlice() []error {
+ return make([]error, 0, 1)
+}
+
+func freeErrorSlice(s []error) []error {
+ // Reset the slice to its zero value
+ return s[:0]
+}
+
+func ErrorSlice() *Pool[[]error] {
+ return errorSlicePool
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/key_to_error_map.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/key_to_error_map.go
new file mode 100644
index 0000000000..9fae012644
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/key_to_error_map.go
@@ -0,0 +1,19 @@
+package pool
+
+var keyToErrorMapPool = New[map[string]error](allocKeyToErrorMap, freeKeyToErrorMap)
+
+func allocKeyToErrorMap() map[string]error {
+ return make(map[string]error)
+}
+
+func freeKeyToErrorMap(m map[string]error) map[string]error {
+ for k := range m {
+ delete(m, k) // Clear the map
+ }
+ return m
+}
+
+// KeyToErrorMap returns a pool of map[string]error instances.
+func KeyToErrorMap() *Pool[map[string]error] {
+ return keyToErrorMapPool
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/pool.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/pool.go
new file mode 100644
index 0000000000..008b1cdb8b
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/pool.go
@@ -0,0 +1,70 @@
+package pool
+
+import (
+ "sync"
+)
+
+type Pool[T any] struct {
+ pool sync.Pool
+ destructor func(T) T
+}
+
+// New creates a new Pool instance for the type T.
+// The allocator function is used to create new instances of T when the pool is empty.
+// The destructor function is used to clean up instances of T before they are returned to the pool.
+// The destructor should reset the state of T to a clean state, so it can be reused, and
+// return the modified instance of T. This is required for cases when you reset operations
+// can modify the underlying data structure, such as slices or maps.
+func New[T any](allocator func() T, destructor func(T) T) *Pool[T] {
+ return &Pool[T]{
+ pool: sync.Pool{
+ New: func() any {
+ return allocator()
+ },
+ },
+ destructor: destructor,
+ }
+}
+
+// Get retrieves an item of type T from the pool.
+func (p *Pool[T]) Get() T {
+ //nolint:forcetypeassert
+ return p.pool.Get().(T)
+}
+
+// Put returns an item of type T to the pool.
+// The item is first processed by the destructor function to ensure it is in a clean state.
+func (p *Pool[T]) Put(item T) {
+ p.pool.Put(p.destructor(item))
+}
+
+// SlicePool is a specialized pool for slices of type T. It is identical to Pool[T] but
+// provides additional functionality to get slices with a specific capacity.
+type SlicePool[T any] struct {
+ pool *Pool[[]T]
+}
+
+func NewSlicePool[T any](allocator func() []T, destructor func([]T) []T) SlicePool[T] {
+ return SlicePool[T]{
+ pool: New(allocator, destructor),
+ }
+}
+
+func (p SlicePool[T]) Get() []T {
+ return p.pool.Get()
+}
+
+func (p SlicePool[T]) GetCapacity(capacity int) []T {
+ if capacity <= 0 {
+ return p.Get()
+ }
+ s := p.Get()
+ if cap(s) < capacity {
+ s = make([]T, 0, capacity)
+ }
+ return s
+}
+
+func (p SlicePool[T]) Put(s []T) {
+ p.pool.Put(s)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/BUILD.bazel
new file mode 100644
index 0000000000..6a331efb34
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/BUILD.bazel
@@ -0,0 +1,17 @@
+load("@rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "tokens",
+ srcs = [
+ "jwe_tokens.go",
+ "tokens.go",
+ ],
+ importpath = "github.com/lestrrat-go/jwx/v3/internal/tokens",
+ visibility = ["//:__subpackages__"],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":tokens",
+ visibility = ["//:__subpackages__"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/jwe_tokens.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/jwe_tokens.go
new file mode 100644
index 0000000000..9001cbbbc7
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/jwe_tokens.go
@@ -0,0 +1,48 @@
+package tokens
+
+// JWE Key Encryption Algorithms
+const (
+ // RSA algorithms
+ RSA1_5 = "RSA1_5"
+ RSA_OAEP = "RSA-OAEP"
+ RSA_OAEP_256 = "RSA-OAEP-256"
+ RSA_OAEP_384 = "RSA-OAEP-384"
+ RSA_OAEP_512 = "RSA-OAEP-512"
+
+ // AES Key Wrap algorithms
+ A128KW = "A128KW"
+ A192KW = "A192KW"
+ A256KW = "A256KW"
+
+ // AES GCM Key Wrap algorithms
+ A128GCMKW = "A128GCMKW"
+ A192GCMKW = "A192GCMKW"
+ A256GCMKW = "A256GCMKW"
+
+ // ECDH-ES algorithms
+ ECDH_ES = "ECDH-ES"
+ ECDH_ES_A128KW = "ECDH-ES+A128KW"
+ ECDH_ES_A192KW = "ECDH-ES+A192KW"
+ ECDH_ES_A256KW = "ECDH-ES+A256KW"
+
+ // PBES2 algorithms
+ PBES2_HS256_A128KW = "PBES2-HS256+A128KW"
+ PBES2_HS384_A192KW = "PBES2-HS384+A192KW"
+ PBES2_HS512_A256KW = "PBES2-HS512+A256KW"
+
+ // Direct key agreement
+ DIRECT = "dir"
+)
+
+// JWE Content Encryption Algorithms
+const (
+ // AES GCM algorithms
+ A128GCM = "A128GCM"
+ A192GCM = "A192GCM"
+ A256GCM = "A256GCM"
+
+ // AES CBC + HMAC algorithms
+ A128CBC_HS256 = "A128CBC-HS256"
+ A192CBC_HS384 = "A192CBC-HS384"
+ A256CBC_HS512 = "A256CBC-HS512"
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/tokens.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/tokens.go
new file mode 100644
index 0000000000..2af3b88de1
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/tokens.go
@@ -0,0 +1,51 @@
+package tokens
+
+const (
+ CloseCurlyBracket = '}'
+ CloseSquareBracket = ']'
+ Colon = ':'
+ Comma = ','
+ DoubleQuote = '"'
+ OpenCurlyBracket = '{'
+ OpenSquareBracket = '['
+ Period = '.'
+)
+
+// Cryptographic key sizes
+const (
+ KeySize16 = 16
+ KeySize24 = 24
+ KeySize32 = 32
+ KeySize48 = 48 // A192CBC_HS384 key size
+ KeySize64 = 64 // A256CBC_HS512 key size
+)
+
+// Bit/byte conversion factors
+const (
+ BitsPerByte = 8
+ BytesPerBit = 1.0 / 8
+)
+
+// Key wrapping constants
+const (
+ KeywrapChunkLen = 8
+ KeywrapRounds = 6 // RFC 3394 key wrap rounds
+ KeywrapBlockSize = 8 // Key wrap block size in bytes
+)
+
+// AES-GCM constants
+const (
+ GCMIVSize = 12 // GCM IV size in bytes (96 bits)
+ GCMTagSize = 16 // GCM tag size in bytes (128 bits)
+)
+
+// PBES2 constants
+const (
+ PBES2DefaultIterations = 10000 // Default PBKDF2 iteration count
+ PBES2NullByteSeparator = 0 // Null byte separator for PBES2
+)
+
+// RSA key generation constants
+const (
+ RSAKeyGenMultiplier = 2 // RSA key generation size multiplier
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwa/BUILD.bazel
new file mode 100644
index 0000000000..cfb7af02a0
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/BUILD.bazel
@@ -0,0 +1,45 @@
+load("@rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "jwa",
+ srcs = [
+ "compression_gen.go",
+ "content_encryption_gen.go",
+ "elliptic_gen.go",
+ "jwa.go",
+ "key_encryption_gen.go",
+ "key_type_gen.go",
+ "options_gen.go",
+ "signature_gen.go",
+ ],
+ importpath = "github.com/lestrrat-go/jwx/v3/jwa",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//internal/tokens",
+ "@com_github_lestrrat_go_option_v2//:option",
+ ],
+)
+
+go_test(
+ name = "jwa_test",
+ srcs = [
+ "compression_gen_test.go",
+ "content_encryption_gen_test.go",
+ "elliptic_gen_test.go",
+ "jwa_test.go",
+ "key_encryption_gen_test.go",
+ "key_type_gen_test.go",
+ "signature_gen_test.go",
+ ],
+ deps = [
+ ":jwa",
+ "@com_github_stretchr_testify//require",
+ "@com_github_lestrrat_go_option_v2//:option",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":jwa",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/README.md b/vendor/github.com/lestrrat-go/jwx/v3/jwa/README.md
new file mode 100644
index 0000000000..270e60c672
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/README.md
@@ -0,0 +1,3 @@
+# JWA [](https://pkg.go.dev/github.com/lestrrat-go/jwx/v3/jwa)
+
+Package [github.com/lestrrat-go/jwx/v3/jwa](./jwa) defines the various algorithm described in [RFC7518](https://tools.ietf.org/html/rfc7518)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/compression_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwa/compression_gen.go
new file mode 100644
index 0000000000..a7a2451afa
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/compression_gen.go
@@ -0,0 +1,153 @@
+// Code generated by tools/cmd/genjwa/main.go. DO NOT EDIT.
+
+package jwa
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "sync"
+)
+
+var muAllCompressionAlgorithm sync.RWMutex
+var allCompressionAlgorithm = map[string]CompressionAlgorithm{}
+var muListCompressionAlgorithm sync.RWMutex
+var listCompressionAlgorithm []CompressionAlgorithm
+var builtinCompressionAlgorithm = map[string]struct{}{}
+
+func init() {
+ // builtin values for CompressionAlgorithm
+ algorithms := make([]CompressionAlgorithm, 2)
+ algorithms[0] = NewCompressionAlgorithm("DEF")
+ algorithms[1] = NewCompressionAlgorithm("")
+
+ RegisterCompressionAlgorithm(algorithms...)
+}
+
+// Deflate returns an object representing the "DEF" content compression algorithm value. Using this value specifies that the content should be compressed using DEFLATE (RFC 1951).
+func Deflate() CompressionAlgorithm {
+ return lookupBuiltinCompressionAlgorithm("DEF")
+}
+
+// NoCompress returns an object representing an empty compression algorithm value. Using this value specifies that the content should not be compressed.
+func NoCompress() CompressionAlgorithm {
+ return lookupBuiltinCompressionAlgorithm("")
+}
+
+func lookupBuiltinCompressionAlgorithm(name string) CompressionAlgorithm {
+ muAllCompressionAlgorithm.RLock()
+ v, ok := allCompressionAlgorithm[name]
+ muAllCompressionAlgorithm.RUnlock()
+ if !ok {
+ panic(fmt.Sprintf(`jwa: CompressionAlgorithm %q not registered`, name))
+ }
+ return v
+}
+
+// CompressionAlgorithm represents the compression algorithms as described in https://tools.ietf.org/html/rfc7518#section-7.3
+type CompressionAlgorithm struct {
+ name string
+ deprecated bool
+}
+
+func (s CompressionAlgorithm) String() string {
+ return s.name
+}
+
+// IsDeprecated returns true if the CompressionAlgorithm object is deprecated.
+func (s CompressionAlgorithm) IsDeprecated() bool {
+ return s.deprecated
+}
+
+// EmptyCompressionAlgorithm returns an empty CompressionAlgorithm object, used as a zero value.
+func EmptyCompressionAlgorithm() CompressionAlgorithm {
+ return CompressionAlgorithm{}
+}
+
+// NewCompressionAlgorithm creates a new CompressionAlgorithm object with the given name.
+func NewCompressionAlgorithm(name string, options ...NewAlgorithmOption) CompressionAlgorithm {
+ var deprecated bool
+ for _, option := range options {
+ switch option.Ident() {
+ case identDeprecated{}:
+ if err := option.Value(&deprecated); err != nil {
+ panic("jwa.NewCompressionAlgorithm: WithDeprecated option must be a boolean")
+ }
+ }
+ }
+ return CompressionAlgorithm{name: name, deprecated: deprecated}
+}
+
+// LookupCompressionAlgorithm returns the CompressionAlgorithm object for the given name.
+func LookupCompressionAlgorithm(name string) (CompressionAlgorithm, bool) {
+ muAllCompressionAlgorithm.RLock()
+ v, ok := allCompressionAlgorithm[name]
+ muAllCompressionAlgorithm.RUnlock()
+ return v, ok
+}
+
+// RegisterCompressionAlgorithm registers a new CompressionAlgorithm. The signature value must be immutable
+// and safe to be used by multiple goroutines, as it is going to be shared with all other users of this library.
+func RegisterCompressionAlgorithm(algorithms ...CompressionAlgorithm) {
+ muAllCompressionAlgorithm.Lock()
+ for _, alg := range algorithms {
+ allCompressionAlgorithm[alg.String()] = alg
+ }
+ muAllCompressionAlgorithm.Unlock()
+ rebuildCompressionAlgorithm()
+}
+
+// UnregisterCompressionAlgorithm unregisters a CompressionAlgorithm from its known database.
+// Non-existent entries, as well as built-in algorithms will silently be ignored.
+func UnregisterCompressionAlgorithm(algorithms ...CompressionAlgorithm) {
+ muAllCompressionAlgorithm.Lock()
+ for _, alg := range algorithms {
+ if _, ok := builtinCompressionAlgorithm[alg.String()]; ok {
+ continue
+ }
+ delete(allCompressionAlgorithm, alg.String())
+ }
+ muAllCompressionAlgorithm.Unlock()
+ rebuildCompressionAlgorithm()
+}
+
+func rebuildCompressionAlgorithm() {
+ list := make([]CompressionAlgorithm, 0, len(allCompressionAlgorithm))
+ muAllCompressionAlgorithm.RLock()
+ for _, v := range allCompressionAlgorithm {
+ list = append(list, v)
+ }
+ muAllCompressionAlgorithm.RUnlock()
+ sort.Slice(list, func(i, j int) bool {
+ return list[i].String() < list[j].String()
+ })
+ muListCompressionAlgorithm.Lock()
+ listCompressionAlgorithm = list
+ muListCompressionAlgorithm.Unlock()
+}
+
+// CompressionAlgorithms returns a list of all available values for CompressionAlgorithm.
+func CompressionAlgorithms() []CompressionAlgorithm {
+ muListCompressionAlgorithm.RLock()
+ defer muListCompressionAlgorithm.RUnlock()
+ return listCompressionAlgorithm
+}
+
+// MarshalJSON serializes the CompressionAlgorithm object to a JSON string.
+func (s CompressionAlgorithm) MarshalJSON() ([]byte, error) {
+ return json.Marshal(s.String())
+}
+
+// UnmarshalJSON deserializes the JSON string to a CompressionAlgorithm object.
+func (s *CompressionAlgorithm) UnmarshalJSON(data []byte) error {
+ var name string
+ if err := json.Unmarshal(data, &name); err != nil {
+ return fmt.Errorf(`failed to unmarshal CompressionAlgorithm: %w`, err)
+ }
+ v, ok := LookupCompressionAlgorithm(name)
+ if !ok {
+ return fmt.Errorf(`unknown CompressionAlgorithm: %q`, name)
+ }
+ *s = v
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/content_encryption_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwa/content_encryption_gen.go
new file mode 100644
index 0000000000..8ccc47e462
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/content_encryption_gen.go
@@ -0,0 +1,179 @@
+// Code generated by tools/cmd/genjwa/main.go. DO NOT EDIT.
+
+package jwa
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "sync"
+
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+)
+
+var muAllContentEncryptionAlgorithm sync.RWMutex
+var allContentEncryptionAlgorithm = map[string]ContentEncryptionAlgorithm{}
+var muListContentEncryptionAlgorithm sync.RWMutex
+var listContentEncryptionAlgorithm []ContentEncryptionAlgorithm
+var builtinContentEncryptionAlgorithm = map[string]struct{}{}
+
+func init() {
+ // builtin values for ContentEncryptionAlgorithm
+ algorithms := make([]ContentEncryptionAlgorithm, 6)
+ algorithms[0] = NewContentEncryptionAlgorithm(tokens.A128CBC_HS256)
+ algorithms[1] = NewContentEncryptionAlgorithm(tokens.A128GCM)
+ algorithms[2] = NewContentEncryptionAlgorithm(tokens.A192CBC_HS384)
+ algorithms[3] = NewContentEncryptionAlgorithm(tokens.A192GCM)
+ algorithms[4] = NewContentEncryptionAlgorithm(tokens.A256CBC_HS512)
+ algorithms[5] = NewContentEncryptionAlgorithm(tokens.A256GCM)
+
+ RegisterContentEncryptionAlgorithm(algorithms...)
+}
+
+// A128CBC_HS256 returns an object representing A128CBC-HS256. Using this value specifies that the content should be encrypted using AES-CBC + HMAC-SHA256 (128).
+func A128CBC_HS256() ContentEncryptionAlgorithm {
+ return lookupBuiltinContentEncryptionAlgorithm(tokens.A128CBC_HS256)
+}
+
+// A128GCM returns an object representing A128GCM. Using this value specifies that the content should be encrypted using AES-GCM (128).
+func A128GCM() ContentEncryptionAlgorithm {
+ return lookupBuiltinContentEncryptionAlgorithm(tokens.A128GCM)
+}
+
+// A192CBC_HS384 returns an object representing A192CBC-HS384. Using this value specifies that the content should be encrypted using AES-CBC + HMAC-SHA384 (192).
+func A192CBC_HS384() ContentEncryptionAlgorithm {
+ return lookupBuiltinContentEncryptionAlgorithm(tokens.A192CBC_HS384)
+}
+
+// A192GCM returns an object representing A192GCM. Using this value specifies that the content should be encrypted using AES-GCM (192).
+func A192GCM() ContentEncryptionAlgorithm {
+ return lookupBuiltinContentEncryptionAlgorithm(tokens.A192GCM)
+}
+
+// A256CBC_HS512 returns an object representing A256CBC-HS512. Using this value specifies that the content should be encrypted using AES-CBC + HMAC-SHA512 (256).
+func A256CBC_HS512() ContentEncryptionAlgorithm {
+ return lookupBuiltinContentEncryptionAlgorithm(tokens.A256CBC_HS512)
+}
+
+// A256GCM returns an object representing A256GCM. Using this value specifies that the content should be encrypted using AES-GCM (256).
+func A256GCM() ContentEncryptionAlgorithm {
+ return lookupBuiltinContentEncryptionAlgorithm(tokens.A256GCM)
+}
+
+func lookupBuiltinContentEncryptionAlgorithm(name string) ContentEncryptionAlgorithm {
+ muAllContentEncryptionAlgorithm.RLock()
+ v, ok := allContentEncryptionAlgorithm[name]
+ muAllContentEncryptionAlgorithm.RUnlock()
+ if !ok {
+ panic(fmt.Sprintf(`jwa: ContentEncryptionAlgorithm %q not registered`, name))
+ }
+ return v
+}
+
+// ContentEncryptionAlgorithm represents the various encryption algorithms as described in https://tools.ietf.org/html/rfc7518#section-5
+type ContentEncryptionAlgorithm struct {
+ name string
+ deprecated bool
+}
+
+func (s ContentEncryptionAlgorithm) String() string {
+ return s.name
+}
+
+// IsDeprecated returns true if the ContentEncryptionAlgorithm object is deprecated.
+func (s ContentEncryptionAlgorithm) IsDeprecated() bool {
+ return s.deprecated
+}
+
+// EmptyContentEncryptionAlgorithm returns an empty ContentEncryptionAlgorithm object, used as a zero value.
+func EmptyContentEncryptionAlgorithm() ContentEncryptionAlgorithm {
+ return ContentEncryptionAlgorithm{}
+}
+
+// NewContentEncryptionAlgorithm creates a new ContentEncryptionAlgorithm object with the given name.
+func NewContentEncryptionAlgorithm(name string, options ...NewAlgorithmOption) ContentEncryptionAlgorithm {
+ var deprecated bool
+ for _, option := range options {
+ switch option.Ident() {
+ case identDeprecated{}:
+ if err := option.Value(&deprecated); err != nil {
+ panic("jwa.NewContentEncryptionAlgorithm: WithDeprecated option must be a boolean")
+ }
+ }
+ }
+ return ContentEncryptionAlgorithm{name: name, deprecated: deprecated}
+}
+
+// LookupContentEncryptionAlgorithm returns the ContentEncryptionAlgorithm object for the given name.
+func LookupContentEncryptionAlgorithm(name string) (ContentEncryptionAlgorithm, bool) {
+ muAllContentEncryptionAlgorithm.RLock()
+ v, ok := allContentEncryptionAlgorithm[name]
+ muAllContentEncryptionAlgorithm.RUnlock()
+ return v, ok
+}
+
+// RegisterContentEncryptionAlgorithm registers a new ContentEncryptionAlgorithm. The signature value must be immutable
+// and safe to be used by multiple goroutines, as it is going to be shared with all other users of this library.
+func RegisterContentEncryptionAlgorithm(algorithms ...ContentEncryptionAlgorithm) {
+ muAllContentEncryptionAlgorithm.Lock()
+ for _, alg := range algorithms {
+ allContentEncryptionAlgorithm[alg.String()] = alg
+ }
+ muAllContentEncryptionAlgorithm.Unlock()
+ rebuildContentEncryptionAlgorithm()
+}
+
+// UnregisterContentEncryptionAlgorithm unregisters a ContentEncryptionAlgorithm from its known database.
+// Non-existent entries, as well as built-in algorithms will silently be ignored.
+func UnregisterContentEncryptionAlgorithm(algorithms ...ContentEncryptionAlgorithm) {
+ muAllContentEncryptionAlgorithm.Lock()
+ for _, alg := range algorithms {
+ if _, ok := builtinContentEncryptionAlgorithm[alg.String()]; ok {
+ continue
+ }
+ delete(allContentEncryptionAlgorithm, alg.String())
+ }
+ muAllContentEncryptionAlgorithm.Unlock()
+ rebuildContentEncryptionAlgorithm()
+}
+
+func rebuildContentEncryptionAlgorithm() {
+ list := make([]ContentEncryptionAlgorithm, 0, len(allContentEncryptionAlgorithm))
+ muAllContentEncryptionAlgorithm.RLock()
+ for _, v := range allContentEncryptionAlgorithm {
+ list = append(list, v)
+ }
+ muAllContentEncryptionAlgorithm.RUnlock()
+ sort.Slice(list, func(i, j int) bool {
+ return list[i].String() < list[j].String()
+ })
+ muListContentEncryptionAlgorithm.Lock()
+ listContentEncryptionAlgorithm = list
+ muListContentEncryptionAlgorithm.Unlock()
+}
+
+// ContentEncryptionAlgorithms returns a list of all available values for ContentEncryptionAlgorithm.
+func ContentEncryptionAlgorithms() []ContentEncryptionAlgorithm {
+ muListContentEncryptionAlgorithm.RLock()
+ defer muListContentEncryptionAlgorithm.RUnlock()
+ return listContentEncryptionAlgorithm
+}
+
+// MarshalJSON serializes the ContentEncryptionAlgorithm object to a JSON string.
+func (s ContentEncryptionAlgorithm) MarshalJSON() ([]byte, error) {
+ return json.Marshal(s.String())
+}
+
+// UnmarshalJSON deserializes the JSON string to a ContentEncryptionAlgorithm object.
+func (s *ContentEncryptionAlgorithm) UnmarshalJSON(data []byte) error {
+ var name string
+ if err := json.Unmarshal(data, &name); err != nil {
+ return fmt.Errorf(`failed to unmarshal ContentEncryptionAlgorithm: %w`, err)
+ }
+ v, ok := LookupContentEncryptionAlgorithm(name)
+ if !ok {
+ return fmt.Errorf(`unknown ContentEncryptionAlgorithm: %q`, name)
+ }
+ *s = v
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/elliptic_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwa/elliptic_gen.go
new file mode 100644
index 0000000000..2418efde08
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/elliptic_gen.go
@@ -0,0 +1,190 @@
+// Code generated by tools/cmd/genjwa/main.go. DO NOT EDIT.
+
+package jwa
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "sync"
+)
+
+var muAllEllipticCurveAlgorithm sync.RWMutex
+var allEllipticCurveAlgorithm = map[string]EllipticCurveAlgorithm{}
+var muListEllipticCurveAlgorithm sync.RWMutex
+var listEllipticCurveAlgorithm []EllipticCurveAlgorithm
+var builtinEllipticCurveAlgorithm = map[string]struct{}{}
+
+func init() {
+ // builtin values for EllipticCurveAlgorithm
+ algorithms := make([]EllipticCurveAlgorithm, 7)
+ algorithms[0] = NewEllipticCurveAlgorithm("Ed25519")
+ algorithms[1] = NewEllipticCurveAlgorithm("Ed448")
+ algorithms[2] = NewEllipticCurveAlgorithm("P-256")
+ algorithms[3] = NewEllipticCurveAlgorithm("P-384")
+ algorithms[4] = NewEllipticCurveAlgorithm("P-521")
+ algorithms[5] = NewEllipticCurveAlgorithm("X25519")
+ algorithms[6] = NewEllipticCurveAlgorithm("X448")
+
+ RegisterEllipticCurveAlgorithm(algorithms...)
+}
+
+// Ed25519 returns an object representing Ed25519 algorithm for EdDSA operations.
+func Ed25519() EllipticCurveAlgorithm {
+ return lookupBuiltinEllipticCurveAlgorithm("Ed25519")
+}
+
+// Ed448 returns an object representing Ed448 algorithm for EdDSA operations.
+func Ed448() EllipticCurveAlgorithm {
+ return lookupBuiltinEllipticCurveAlgorithm("Ed448")
+}
+
+var invalidEllipticCurve = NewEllipticCurveAlgorithm("P-invalid")
+
+// InvalidEllipticCurve returns an object representing an invalid elliptic curve.
+func InvalidEllipticCurve() EllipticCurveAlgorithm {
+ return invalidEllipticCurve
+}
+
+// P256 returns an object representing P-256 algorithm for ECDSA operations.
+func P256() EllipticCurveAlgorithm {
+ return lookupBuiltinEllipticCurveAlgorithm("P-256")
+}
+
+// P384 returns an object representing P-384 algorithm for ECDSA operations.
+func P384() EllipticCurveAlgorithm {
+ return lookupBuiltinEllipticCurveAlgorithm("P-384")
+}
+
+// P521 returns an object representing P-521 algorithm for ECDSA operations.
+func P521() EllipticCurveAlgorithm {
+ return lookupBuiltinEllipticCurveAlgorithm("P-521")
+}
+
+// X25519 returns an object representing X25519 algorithm for ECDH operations.
+func X25519() EllipticCurveAlgorithm {
+ return lookupBuiltinEllipticCurveAlgorithm("X25519")
+}
+
+// X448 returns an object representing X448 algorithm for ECDH operations.
+func X448() EllipticCurveAlgorithm {
+ return lookupBuiltinEllipticCurveAlgorithm("X448")
+}
+
+func lookupBuiltinEllipticCurveAlgorithm(name string) EllipticCurveAlgorithm {
+ muAllEllipticCurveAlgorithm.RLock()
+ v, ok := allEllipticCurveAlgorithm[name]
+ muAllEllipticCurveAlgorithm.RUnlock()
+ if !ok {
+ panic(fmt.Sprintf(`jwa: EllipticCurveAlgorithm %q not registered`, name))
+ }
+ return v
+}
+
+// EllipticCurveAlgorithm represents the algorithms used for EC keys
+type EllipticCurveAlgorithm struct {
+ name string
+ deprecated bool
+}
+
+func (s EllipticCurveAlgorithm) String() string {
+ return s.name
+}
+
+// IsDeprecated returns true if the EllipticCurveAlgorithm object is deprecated.
+func (s EllipticCurveAlgorithm) IsDeprecated() bool {
+ return s.deprecated
+}
+
+// EmptyEllipticCurveAlgorithm returns an empty EllipticCurveAlgorithm object, used as a zero value.
+func EmptyEllipticCurveAlgorithm() EllipticCurveAlgorithm {
+ return EllipticCurveAlgorithm{}
+}
+
+// NewEllipticCurveAlgorithm creates a new EllipticCurveAlgorithm object with the given name.
+func NewEllipticCurveAlgorithm(name string, options ...NewAlgorithmOption) EllipticCurveAlgorithm {
+ var deprecated bool
+ for _, option := range options {
+ switch option.Ident() {
+ case identDeprecated{}:
+ if err := option.Value(&deprecated); err != nil {
+ panic("jwa.NewEllipticCurveAlgorithm: WithDeprecated option must be a boolean")
+ }
+ }
+ }
+ return EllipticCurveAlgorithm{name: name, deprecated: deprecated}
+}
+
+// LookupEllipticCurveAlgorithm returns the EllipticCurveAlgorithm object for the given name.
+func LookupEllipticCurveAlgorithm(name string) (EllipticCurveAlgorithm, bool) {
+ muAllEllipticCurveAlgorithm.RLock()
+ v, ok := allEllipticCurveAlgorithm[name]
+ muAllEllipticCurveAlgorithm.RUnlock()
+ return v, ok
+}
+
+// RegisterEllipticCurveAlgorithm registers a new EllipticCurveAlgorithm. The signature value must be immutable
+// and safe to be used by multiple goroutines, as it is going to be shared with all other users of this library.
+func RegisterEllipticCurveAlgorithm(algorithms ...EllipticCurveAlgorithm) {
+ muAllEllipticCurveAlgorithm.Lock()
+ for _, alg := range algorithms {
+ allEllipticCurveAlgorithm[alg.String()] = alg
+ }
+ muAllEllipticCurveAlgorithm.Unlock()
+ rebuildEllipticCurveAlgorithm()
+}
+
+// UnregisterEllipticCurveAlgorithm unregisters a EllipticCurveAlgorithm from its known database.
+// Non-existent entries, as well as built-in algorithms will silently be ignored.
+func UnregisterEllipticCurveAlgorithm(algorithms ...EllipticCurveAlgorithm) {
+ muAllEllipticCurveAlgorithm.Lock()
+ for _, alg := range algorithms {
+ if _, ok := builtinEllipticCurveAlgorithm[alg.String()]; ok {
+ continue
+ }
+ delete(allEllipticCurveAlgorithm, alg.String())
+ }
+ muAllEllipticCurveAlgorithm.Unlock()
+ rebuildEllipticCurveAlgorithm()
+}
+
+func rebuildEllipticCurveAlgorithm() {
+ list := make([]EllipticCurveAlgorithm, 0, len(allEllipticCurveAlgorithm))
+ muAllEllipticCurveAlgorithm.RLock()
+ for _, v := range allEllipticCurveAlgorithm {
+ list = append(list, v)
+ }
+ muAllEllipticCurveAlgorithm.RUnlock()
+ sort.Slice(list, func(i, j int) bool {
+ return list[i].String() < list[j].String()
+ })
+ muListEllipticCurveAlgorithm.Lock()
+ listEllipticCurveAlgorithm = list
+ muListEllipticCurveAlgorithm.Unlock()
+}
+
+// EllipticCurveAlgorithms returns a list of all available values for EllipticCurveAlgorithm.
+func EllipticCurveAlgorithms() []EllipticCurveAlgorithm {
+ muListEllipticCurveAlgorithm.RLock()
+ defer muListEllipticCurveAlgorithm.RUnlock()
+ return listEllipticCurveAlgorithm
+}
+
+// MarshalJSON serializes the EllipticCurveAlgorithm object to a JSON string.
+func (s EllipticCurveAlgorithm) MarshalJSON() ([]byte, error) {
+ return json.Marshal(s.String())
+}
+
+// UnmarshalJSON deserializes the JSON string to a EllipticCurveAlgorithm object.
+func (s *EllipticCurveAlgorithm) UnmarshalJSON(data []byte) error {
+ var name string
+ if err := json.Unmarshal(data, &name); err != nil {
+ return fmt.Errorf(`failed to unmarshal EllipticCurveAlgorithm: %w`, err)
+ }
+ v, ok := LookupEllipticCurveAlgorithm(name)
+ if !ok {
+ return fmt.Errorf(`unknown EllipticCurveAlgorithm: %q`, name)
+ }
+ *s = v
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/jwa.go b/vendor/github.com/lestrrat-go/jwx/v3/jwa/jwa.go
new file mode 100644
index 0000000000..29ac1dfe76
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/jwa.go
@@ -0,0 +1,68 @@
+//go:generate ../tools/cmd/genjwa.sh
+
+// Package jwa defines the various algorithm described in https://tools.ietf.org/html/rfc7518
+package jwa
+
+import (
+ "errors"
+ "fmt"
+)
+
+// KeyAlgorithm is a workaround for jwk.Key being able to contain different
+// types of algorithms in its `alg` field.
+//
+// Previously the storage for the `alg` field was represented as a string,
+// but this caused some users to wonder why the field was not typed appropriately
+// like other fields.
+//
+// Ideally we would like to keep track of Signature Algorithms and
+// Key Encryption Algorithms separately, and force the APIs to
+// type-check at compile time, but this allows users to pass a value from a
+// jwk.Key directly
+type KeyAlgorithm interface {
+ String() string
+ IsDeprecated() bool
+}
+
+var errInvalidKeyAlgorithm = errors.New(`invalid key algorithm`)
+
+func ErrInvalidKeyAlgorithm() error {
+ return errInvalidKeyAlgorithm
+}
+
+// KeyAlgorithmFrom takes either a string, `jwa.SignatureAlgorithm`,
+// `jwa.KeyEncryptionAlgorithm`, or `jwa.ContentEncryptionAlgorithm`.
+// and returns a `jwa.KeyAlgorithm`.
+//
+// If the value cannot be handled, it returns an `jwa.InvalidKeyAlgorithm`
+// object instead of returning an error. This design choice was made to allow
+// users to directly pass the return value to functions such as `jws.Sign()`
+func KeyAlgorithmFrom(v any) (KeyAlgorithm, error) {
+ switch v := v.(type) {
+ case SignatureAlgorithm:
+ return v, nil
+ case KeyEncryptionAlgorithm:
+ return v, nil
+ case ContentEncryptionAlgorithm:
+ return v, nil
+ case string:
+ salg, ok := LookupSignatureAlgorithm(v)
+ if ok {
+ return salg, nil
+ }
+
+ kalg, ok := LookupKeyEncryptionAlgorithm(v)
+ if ok {
+ return kalg, nil
+ }
+
+ calg, ok := LookupContentEncryptionAlgorithm(v)
+ if ok {
+ return calg, nil
+ }
+
+ return nil, fmt.Errorf(`invalid key value: %q: %w`, v, errInvalidKeyAlgorithm)
+ default:
+ return nil, fmt.Errorf(`invalid key type: %T: %w`, v, errInvalidKeyAlgorithm)
+ }
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/key_encryption_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwa/key_encryption_gen.go
new file mode 100644
index 0000000000..716c43cd04
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/key_encryption_gen.go
@@ -0,0 +1,268 @@
+// Code generated by tools/cmd/genjwa/main.go. DO NOT EDIT.
+
+package jwa
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "sync"
+
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+)
+
+var muAllKeyEncryptionAlgorithm sync.RWMutex
+var allKeyEncryptionAlgorithm = map[string]KeyEncryptionAlgorithm{}
+var muListKeyEncryptionAlgorithm sync.RWMutex
+var listKeyEncryptionAlgorithm []KeyEncryptionAlgorithm
+var builtinKeyEncryptionAlgorithm = map[string]struct{}{}
+
+func init() {
+ // builtin values for KeyEncryptionAlgorithm
+ algorithms := make([]KeyEncryptionAlgorithm, 19)
+ algorithms[0] = NewKeyEncryptionAlgorithm(tokens.A128GCMKW, WithIsSymmetric(true))
+ algorithms[1] = NewKeyEncryptionAlgorithm(tokens.A128KW, WithIsSymmetric(true))
+ algorithms[2] = NewKeyEncryptionAlgorithm(tokens.A192GCMKW, WithIsSymmetric(true))
+ algorithms[3] = NewKeyEncryptionAlgorithm(tokens.A192KW, WithIsSymmetric(true))
+ algorithms[4] = NewKeyEncryptionAlgorithm(tokens.A256GCMKW, WithIsSymmetric(true))
+ algorithms[5] = NewKeyEncryptionAlgorithm(tokens.A256KW, WithIsSymmetric(true))
+ algorithms[6] = NewKeyEncryptionAlgorithm(tokens.DIRECT, WithIsSymmetric(true))
+ algorithms[7] = NewKeyEncryptionAlgorithm(tokens.ECDH_ES)
+ algorithms[8] = NewKeyEncryptionAlgorithm(tokens.ECDH_ES_A128KW)
+ algorithms[9] = NewKeyEncryptionAlgorithm(tokens.ECDH_ES_A192KW)
+ algorithms[10] = NewKeyEncryptionAlgorithm(tokens.ECDH_ES_A256KW)
+ algorithms[11] = NewKeyEncryptionAlgorithm(tokens.PBES2_HS256_A128KW, WithIsSymmetric(true))
+ algorithms[12] = NewKeyEncryptionAlgorithm(tokens.PBES2_HS384_A192KW, WithIsSymmetric(true))
+ algorithms[13] = NewKeyEncryptionAlgorithm(tokens.PBES2_HS512_A256KW, WithIsSymmetric(true))
+ algorithms[14] = NewKeyEncryptionAlgorithm(tokens.RSA1_5, WithDeprecated(true))
+ algorithms[15] = NewKeyEncryptionAlgorithm(tokens.RSA_OAEP)
+ algorithms[16] = NewKeyEncryptionAlgorithm(tokens.RSA_OAEP_256)
+ algorithms[17] = NewKeyEncryptionAlgorithm(tokens.RSA_OAEP_384)
+ algorithms[18] = NewKeyEncryptionAlgorithm(tokens.RSA_OAEP_512)
+
+ RegisterKeyEncryptionAlgorithm(algorithms...)
+}
+
+// A128GCMKW returns an object representing AES-GCM key wrap (128) key encryption algorithm.
+func A128GCMKW() KeyEncryptionAlgorithm {
+ return lookupBuiltinKeyEncryptionAlgorithm(tokens.A128GCMKW)
+}
+
+// A128KW returns an object representing AES key wrap (128) key encryption algorithm.
+func A128KW() KeyEncryptionAlgorithm {
+ return lookupBuiltinKeyEncryptionAlgorithm(tokens.A128KW)
+}
+
+// A192GCMKW returns an object representing AES-GCM key wrap (192) key encryption algorithm.
+func A192GCMKW() KeyEncryptionAlgorithm {
+ return lookupBuiltinKeyEncryptionAlgorithm(tokens.A192GCMKW)
+}
+
+// A192KW returns an object representing AES key wrap (192) key encryption algorithm.
+func A192KW() KeyEncryptionAlgorithm {
+ return lookupBuiltinKeyEncryptionAlgorithm(tokens.A192KW)
+}
+
+// A256GCMKW returns an object representing AES-GCM key wrap (256) key encryption algorithm.
+func A256GCMKW() KeyEncryptionAlgorithm {
+ return lookupBuiltinKeyEncryptionAlgorithm(tokens.A256GCMKW)
+}
+
+// A256KW returns an object representing AES key wrap (256) key encryption algorithm.
+func A256KW() KeyEncryptionAlgorithm {
+ return lookupBuiltinKeyEncryptionAlgorithm(tokens.A256KW)
+}
+
+// DIRECT returns an object representing Direct key encryption algorithm.
+func DIRECT() KeyEncryptionAlgorithm {
+ return lookupBuiltinKeyEncryptionAlgorithm(tokens.DIRECT)
+}
+
+// ECDH_ES returns an object representing ECDH-ES key encryption algorithm.
+func ECDH_ES() KeyEncryptionAlgorithm {
+ return lookupBuiltinKeyEncryptionAlgorithm(tokens.ECDH_ES)
+}
+
+// ECDH_ES_A128KW returns an object representing ECDH-ES + AES key wrap (128) key encryption algorithm.
+func ECDH_ES_A128KW() KeyEncryptionAlgorithm {
+ return lookupBuiltinKeyEncryptionAlgorithm(tokens.ECDH_ES_A128KW)
+}
+
+// ECDH_ES_A192KW returns an object representing ECDH-ES + AES key wrap (192) key encryption algorithm.
+func ECDH_ES_A192KW() KeyEncryptionAlgorithm {
+ return lookupBuiltinKeyEncryptionAlgorithm(tokens.ECDH_ES_A192KW)
+}
+
+// ECDH_ES_A256KW returns an object representing ECDH-ES + AES key wrap (256) key encryption algorithm.
+func ECDH_ES_A256KW() KeyEncryptionAlgorithm {
+ return lookupBuiltinKeyEncryptionAlgorithm(tokens.ECDH_ES_A256KW)
+}
+
+// PBES2_HS256_A128KW returns an object representing PBES2 + HMAC-SHA256 + AES key wrap (128) key encryption algorithm.
+func PBES2_HS256_A128KW() KeyEncryptionAlgorithm {
+ return lookupBuiltinKeyEncryptionAlgorithm(tokens.PBES2_HS256_A128KW)
+}
+
+// PBES2_HS384_A192KW returns an object representing PBES2 + HMAC-SHA384 + AES key wrap (192) key encryption algorithm.
+func PBES2_HS384_A192KW() KeyEncryptionAlgorithm {
+ return lookupBuiltinKeyEncryptionAlgorithm(tokens.PBES2_HS384_A192KW)
+}
+
+// PBES2_HS512_A256KW returns an object representing PBES2 + HMAC-SHA512 + AES key wrap (256) key encryption algorithm.
+func PBES2_HS512_A256KW() KeyEncryptionAlgorithm {
+ return lookupBuiltinKeyEncryptionAlgorithm(tokens.PBES2_HS512_A256KW)
+}
+
+// RSA1_5 returns an object representing RSA-PKCS1v1.5 key encryption algorithm.
+func RSA1_5() KeyEncryptionAlgorithm {
+ return lookupBuiltinKeyEncryptionAlgorithm(tokens.RSA1_5)
+}
+
+// RSA_OAEP returns an object representing RSA-OAEP-SHA1 key encryption algorithm.
+func RSA_OAEP() KeyEncryptionAlgorithm {
+ return lookupBuiltinKeyEncryptionAlgorithm(tokens.RSA_OAEP)
+}
+
+// RSA_OAEP_256 returns an object representing RSA-OAEP-SHA256 key encryption algorithm.
+func RSA_OAEP_256() KeyEncryptionAlgorithm {
+ return lookupBuiltinKeyEncryptionAlgorithm(tokens.RSA_OAEP_256)
+}
+
+// RSA_OAEP_384 returns an object representing RSA-OAEP-SHA384 key encryption algorithm.
+func RSA_OAEP_384() KeyEncryptionAlgorithm {
+ return lookupBuiltinKeyEncryptionAlgorithm(tokens.RSA_OAEP_384)
+}
+
+// RSA_OAEP_512 returns an object representing RSA-OAEP-SHA512 key encryption algorithm.
+func RSA_OAEP_512() KeyEncryptionAlgorithm {
+ return lookupBuiltinKeyEncryptionAlgorithm(tokens.RSA_OAEP_512)
+}
+
+func lookupBuiltinKeyEncryptionAlgorithm(name string) KeyEncryptionAlgorithm {
+ muAllKeyEncryptionAlgorithm.RLock()
+ v, ok := allKeyEncryptionAlgorithm[name]
+ muAllKeyEncryptionAlgorithm.RUnlock()
+ if !ok {
+ panic(fmt.Sprintf(`jwa: KeyEncryptionAlgorithm %q not registered`, name))
+ }
+ return v
+}
+
+// KeyEncryptionAlgorithm represents the various encryption algorithms as described in https://tools.ietf.org/html/rfc7518#section-4.1
+type KeyEncryptionAlgorithm struct {
+ name string
+ deprecated bool
+ isSymmetric bool
+}
+
+func (s KeyEncryptionAlgorithm) String() string {
+ return s.name
+}
+
+// IsDeprecated returns true if the KeyEncryptionAlgorithm object is deprecated.
+func (s KeyEncryptionAlgorithm) IsDeprecated() bool {
+ return s.deprecated
+}
+
+// IsSymmetric returns true if the KeyEncryptionAlgorithm object is symmetric. Symmetric algorithms use the same key for both encryption and decryption.
+func (s KeyEncryptionAlgorithm) IsSymmetric() bool {
+ return s.isSymmetric
+}
+
+// EmptyKeyEncryptionAlgorithm returns an empty KeyEncryptionAlgorithm object, used as a zero value.
+func EmptyKeyEncryptionAlgorithm() KeyEncryptionAlgorithm {
+ return KeyEncryptionAlgorithm{}
+}
+
+// NewKeyEncryptionAlgorithm creates a new KeyEncryptionAlgorithm object with the given name.
+func NewKeyEncryptionAlgorithm(name string, options ...NewKeyEncryptionAlgorithmOption) KeyEncryptionAlgorithm {
+ var deprecated bool
+ var isSymmetric bool
+ for _, option := range options {
+ switch option.Ident() {
+ case identIsSymmetric{}:
+ if err := option.Value(&isSymmetric); err != nil {
+ panic("jwa.NewKeyEncryptionAlgorithm: WithIsSymmetric option must be a boolean")
+ }
+ case identDeprecated{}:
+ if err := option.Value(&deprecated); err != nil {
+ panic("jwa.NewKeyEncryptionAlgorithm: WithDeprecated option must be a boolean")
+ }
+ }
+ }
+ return KeyEncryptionAlgorithm{name: name, deprecated: deprecated, isSymmetric: isSymmetric}
+}
+
+// LookupKeyEncryptionAlgorithm returns the KeyEncryptionAlgorithm object for the given name.
+func LookupKeyEncryptionAlgorithm(name string) (KeyEncryptionAlgorithm, bool) {
+ muAllKeyEncryptionAlgorithm.RLock()
+ v, ok := allKeyEncryptionAlgorithm[name]
+ muAllKeyEncryptionAlgorithm.RUnlock()
+ return v, ok
+}
+
+// RegisterKeyEncryptionAlgorithm registers a new KeyEncryptionAlgorithm. The signature value must be immutable
+// and safe to be used by multiple goroutines, as it is going to be shared with all other users of this library.
+func RegisterKeyEncryptionAlgorithm(algorithms ...KeyEncryptionAlgorithm) {
+ muAllKeyEncryptionAlgorithm.Lock()
+ for _, alg := range algorithms {
+ allKeyEncryptionAlgorithm[alg.String()] = alg
+ }
+ muAllKeyEncryptionAlgorithm.Unlock()
+ rebuildKeyEncryptionAlgorithm()
+}
+
+// UnregisterKeyEncryptionAlgorithm unregisters a KeyEncryptionAlgorithm from its known database.
+// Non-existent entries, as well as built-in algorithms will silently be ignored.
+func UnregisterKeyEncryptionAlgorithm(algorithms ...KeyEncryptionAlgorithm) {
+ muAllKeyEncryptionAlgorithm.Lock()
+ for _, alg := range algorithms {
+ if _, ok := builtinKeyEncryptionAlgorithm[alg.String()]; ok {
+ continue
+ }
+ delete(allKeyEncryptionAlgorithm, alg.String())
+ }
+ muAllKeyEncryptionAlgorithm.Unlock()
+ rebuildKeyEncryptionAlgorithm()
+}
+
+func rebuildKeyEncryptionAlgorithm() {
+ list := make([]KeyEncryptionAlgorithm, 0, len(allKeyEncryptionAlgorithm))
+ muAllKeyEncryptionAlgorithm.RLock()
+ for _, v := range allKeyEncryptionAlgorithm {
+ list = append(list, v)
+ }
+ muAllKeyEncryptionAlgorithm.RUnlock()
+ sort.Slice(list, func(i, j int) bool {
+ return list[i].String() < list[j].String()
+ })
+ muListKeyEncryptionAlgorithm.Lock()
+ listKeyEncryptionAlgorithm = list
+ muListKeyEncryptionAlgorithm.Unlock()
+}
+
+// KeyEncryptionAlgorithms returns a list of all available values for KeyEncryptionAlgorithm.
+func KeyEncryptionAlgorithms() []KeyEncryptionAlgorithm {
+ muListKeyEncryptionAlgorithm.RLock()
+ defer muListKeyEncryptionAlgorithm.RUnlock()
+ return listKeyEncryptionAlgorithm
+}
+
+// MarshalJSON serializes the KeyEncryptionAlgorithm object to a JSON string.
+func (s KeyEncryptionAlgorithm) MarshalJSON() ([]byte, error) {
+ return json.Marshal(s.String())
+}
+
+// UnmarshalJSON deserializes the JSON string to a KeyEncryptionAlgorithm object.
+func (s *KeyEncryptionAlgorithm) UnmarshalJSON(data []byte) error {
+ var name string
+ if err := json.Unmarshal(data, &name); err != nil {
+ return fmt.Errorf(`failed to unmarshal KeyEncryptionAlgorithm: %w`, err)
+ }
+ v, ok := LookupKeyEncryptionAlgorithm(name)
+ if !ok {
+ return fmt.Errorf(`unknown KeyEncryptionAlgorithm: %q`, name)
+ }
+ *s = v
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/key_type_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwa/key_type_gen.go
new file mode 100644
index 0000000000..8bc5ebb5f0
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/key_type_gen.go
@@ -0,0 +1,172 @@
+// Code generated by tools/cmd/genjwa/main.go. DO NOT EDIT.
+
+package jwa
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "sync"
+)
+
+var muAllKeyType sync.RWMutex
+var allKeyType = map[string]KeyType{}
+var muListKeyType sync.RWMutex
+var listKeyType []KeyType
+var builtinKeyType = map[string]struct{}{}
+
+func init() {
+ // builtin values for KeyType
+ algorithms := make([]KeyType, 4)
+ algorithms[0] = NewKeyType("EC")
+ algorithms[1] = NewKeyType("OKP")
+ algorithms[2] = NewKeyType("oct")
+ algorithms[3] = NewKeyType("RSA")
+
+ RegisterKeyType(algorithms...)
+}
+
+// EC returns an object representing EC. Elliptic Curve
+func EC() KeyType {
+ return lookupBuiltinKeyType("EC")
+}
+
+var invalidKeyType = NewKeyType("")
+
+// InvalidKeyType returns an object representing invalid key type. Invalid KeyType
+func InvalidKeyType() KeyType {
+ return invalidKeyType
+}
+
+// OKP returns an object representing OKP. Octet string key pairs
+func OKP() KeyType {
+ return lookupBuiltinKeyType("OKP")
+}
+
+// OctetSeq returns an object representing oct. Octet sequence (used to represent symmetric keys)
+func OctetSeq() KeyType {
+ return lookupBuiltinKeyType("oct")
+}
+
+// RSA returns an object representing RSA. RSA
+func RSA() KeyType {
+ return lookupBuiltinKeyType("RSA")
+}
+
+func lookupBuiltinKeyType(name string) KeyType {
+ muAllKeyType.RLock()
+ v, ok := allKeyType[name]
+ muAllKeyType.RUnlock()
+ if !ok {
+ panic(fmt.Sprintf(`jwa: KeyType %q not registered`, name))
+ }
+ return v
+}
+
+// KeyType represents the key type ("kty") that are supported
+type KeyType struct {
+ name string
+ deprecated bool
+}
+
+func (s KeyType) String() string {
+ return s.name
+}
+
+// IsDeprecated returns true if the KeyType object is deprecated.
+func (s KeyType) IsDeprecated() bool {
+ return s.deprecated
+}
+
+// EmptyKeyType returns an empty KeyType object, used as a zero value.
+func EmptyKeyType() KeyType {
+ return KeyType{}
+}
+
+// NewKeyType creates a new KeyType object with the given name.
+func NewKeyType(name string, options ...NewAlgorithmOption) KeyType {
+ var deprecated bool
+ for _, option := range options {
+ switch option.Ident() {
+ case identDeprecated{}:
+ if err := option.Value(&deprecated); err != nil {
+ panic("jwa.NewKeyType: WithDeprecated option must be a boolean")
+ }
+ }
+ }
+ return KeyType{name: name, deprecated: deprecated}
+}
+
+// LookupKeyType returns the KeyType object for the given name.
+func LookupKeyType(name string) (KeyType, bool) {
+ muAllKeyType.RLock()
+ v, ok := allKeyType[name]
+ muAllKeyType.RUnlock()
+ return v, ok
+}
+
+// RegisterKeyType registers a new KeyType. The signature value must be immutable
+// and safe to be used by multiple goroutines, as it is going to be shared with all other users of this library.
+func RegisterKeyType(algorithms ...KeyType) {
+ muAllKeyType.Lock()
+ for _, alg := range algorithms {
+ allKeyType[alg.String()] = alg
+ }
+ muAllKeyType.Unlock()
+ rebuildKeyType()
+}
+
+// UnregisterKeyType unregisters a KeyType from its known database.
+// Non-existent entries, as well as built-in algorithms will silently be ignored.
+func UnregisterKeyType(algorithms ...KeyType) {
+ muAllKeyType.Lock()
+ for _, alg := range algorithms {
+ if _, ok := builtinKeyType[alg.String()]; ok {
+ continue
+ }
+ delete(allKeyType, alg.String())
+ }
+ muAllKeyType.Unlock()
+ rebuildKeyType()
+}
+
+func rebuildKeyType() {
+ list := make([]KeyType, 0, len(allKeyType))
+ muAllKeyType.RLock()
+ for _, v := range allKeyType {
+ list = append(list, v)
+ }
+ muAllKeyType.RUnlock()
+ sort.Slice(list, func(i, j int) bool {
+ return list[i].String() < list[j].String()
+ })
+ muListKeyType.Lock()
+ listKeyType = list
+ muListKeyType.Unlock()
+}
+
+// KeyTypes returns a list of all available values for KeyType.
+func KeyTypes() []KeyType {
+ muListKeyType.RLock()
+ defer muListKeyType.RUnlock()
+ return listKeyType
+}
+
+// MarshalJSON serializes the KeyType object to a JSON string.
+func (s KeyType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(s.String())
+}
+
+// UnmarshalJSON deserializes the JSON string to a KeyType object.
+func (s *KeyType) UnmarshalJSON(data []byte) error {
+ var name string
+ if err := json.Unmarshal(data, &name); err != nil {
+ return fmt.Errorf(`failed to unmarshal KeyType: %w`, err)
+ }
+ v, ok := LookupKeyType(name)
+ if !ok {
+ return fmt.Errorf(`unknown KeyType: %q`, name)
+ }
+ *s = v
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/options.yaml b/vendor/github.com/lestrrat-go/jwx/v3/jwa/options.yaml
new file mode 100644
index 0000000000..dd498c680e
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/options.yaml
@@ -0,0 +1,41 @@
+package_name: jwa
+output: jwa/options_gen.go
+interfaces:
+ - name: NewAlgorithmOption
+ methods:
+ - newSignatureAlgorithmOption
+ - newKeyEncryptionAlgorithmOption
+ - newSignatureKeyEncryptionAlgorithmOption
+ comment: |
+ NewAlgorithmOption represents an option that can be passed to any of the constructor functions
+ - name: NewSignatureAlgorithmOption
+ methods:
+ - newSignatureAlgorithmOption
+ comment: |
+ NewSignatureAlgorithmOption represents an option that can be passed to the NewSignatureAlgorithm
+ - name: NewKeyEncryptionAlgorithmOption
+ methods:
+ - newKeyEncryptionAlgorithmOption
+ comment: |
+ NewKeyEncryptionAlgorithmOption represents an option that can be passed to the NewKeyEncryptionAlgorithm
+ - name: NewSignatureKeyEncryptionAlgorithmOption
+ comment: |
+ NewSignatureKeyEncryptionAlgorithmOption represents an option that can be passed to both
+ NewSignatureAlgorithm and NewKeyEncryptionAlgorithm
+ methods:
+ - newSignatureAlgorithmOption
+ - newKeyEncryptionAlgorithmOption
+options:
+ - ident: IsSymmetric
+ interface: NewSignatureKeyEncryptionAlgorithmOption
+ argument_type: bool
+ comment: |
+ IsSymmetric specifies that the algorithm is symmetric
+ - ident: Deprecated
+ interface: NewAlgorithmOption
+ argument_type: bool
+ comment: |
+ WithDeprecated specifies that the algorithm is deprecated. In order to
+ un-deprecate an algorithm, you will have to create a new algorithm
+ with the same values but with the Deprecated option set to false, and
+ then call RegisterXXXXAlgorithm with the new algorithm.
\ No newline at end of file
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/options_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwa/options_gen.go
new file mode 100644
index 0000000000..9394ac587a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/options_gen.go
@@ -0,0 +1,91 @@
+// Code generated by tools/cmd/genoptions/main.go. DO NOT EDIT.
+
+package jwa
+
+import (
+ "github.com/lestrrat-go/option/v2"
+)
+
+type Option = option.Interface
+
+// NewAlgorithmOption represents an option that can be passed to any of the constructor functions
+type NewAlgorithmOption interface {
+ Option
+ newSignatureAlgorithmOption()
+ newKeyEncryptionAlgorithmOption()
+ newSignatureKeyEncryptionAlgorithmOption()
+}
+
+type newAlgorithmOption struct {
+ Option
+}
+
+func (*newAlgorithmOption) newSignatureAlgorithmOption() {}
+
+func (*newAlgorithmOption) newKeyEncryptionAlgorithmOption() {}
+
+func (*newAlgorithmOption) newSignatureKeyEncryptionAlgorithmOption() {}
+
+// NewKeyEncryptionAlgorithmOption represents an option that can be passed to the NewKeyEncryptionAlgorithm
+type NewKeyEncryptionAlgorithmOption interface {
+ Option
+ newKeyEncryptionAlgorithmOption()
+}
+
+type newKeyEncryptionAlgorithmOption struct {
+ Option
+}
+
+func (*newKeyEncryptionAlgorithmOption) newKeyEncryptionAlgorithmOption() {}
+
+// NewSignatureAlgorithmOption represents an option that can be passed to the NewSignatureAlgorithm
+type NewSignatureAlgorithmOption interface {
+ Option
+ newSignatureAlgorithmOption()
+}
+
+type newSignatureAlgorithmOption struct {
+ Option
+}
+
+func (*newSignatureAlgorithmOption) newSignatureAlgorithmOption() {}
+
+// NewSignatureKeyEncryptionAlgorithmOption represents an option that can be passed to both
+// NewSignatureAlgorithm and NewKeyEncryptionAlgorithm
+type NewSignatureKeyEncryptionAlgorithmOption interface {
+ Option
+ newSignatureAlgorithmOption()
+ newKeyEncryptionAlgorithmOption()
+}
+
+type newSignatureKeyEncryptionAlgorithmOption struct {
+ Option
+}
+
+func (*newSignatureKeyEncryptionAlgorithmOption) newSignatureAlgorithmOption() {}
+
+func (*newSignatureKeyEncryptionAlgorithmOption) newKeyEncryptionAlgorithmOption() {}
+
+type identDeprecated struct{}
+type identIsSymmetric struct{}
+
+func (identDeprecated) String() string {
+ return "WithDeprecated"
+}
+
+func (identIsSymmetric) String() string {
+ return "WithIsSymmetric"
+}
+
+// WithDeprecated specifies that the algorithm is deprecated. In order to
+// un-deprecate an algorithm, you will have to create a new algorithm
+// with the same values but with the Deprecated option set to false, and
+// then call RegisterXXXXAlgorithm with the new algorithm.
+func WithDeprecated(v bool) NewAlgorithmOption {
+ return &newAlgorithmOption{option.New(identDeprecated{}, v)}
+}
+
+// IsSymmetric specifies that the algorithm is symmetric
+func WithIsSymmetric(v bool) NewSignatureKeyEncryptionAlgorithmOption {
+ return &newSignatureKeyEncryptionAlgorithmOption{option.New(identIsSymmetric{}, v)}
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/secp2561k.go b/vendor/github.com/lestrrat-go/jwx/v3/jwa/secp2561k.go
new file mode 100644
index 0000000000..e7a6be754d
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/secp2561k.go
@@ -0,0 +1,15 @@
+//go:build jwx_es256k
+// +build jwx_es256k
+
+package jwa
+
+var secp256k1Algorithm = NewEllipticCurveAlgorithm("secp256k1")
+
+// This constant is only available if compiled with jwx_es256k build tag
+func Secp256k1() EllipticCurveAlgorithm {
+ return secp256k1Algorithm
+}
+
+func init() {
+ RegisterEllipticCurveAlgorithm(secp256k1Algorithm)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/signature_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwa/signature_gen.go
new file mode 100644
index 0000000000..653d7d56af
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/signature_gen.go
@@ -0,0 +1,242 @@
+// Code generated by tools/cmd/genjwa/main.go. DO NOT EDIT.
+
+package jwa
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "sync"
+)
+
+var muAllSignatureAlgorithm sync.RWMutex
+var allSignatureAlgorithm = map[string]SignatureAlgorithm{}
+var muListSignatureAlgorithm sync.RWMutex
+var listSignatureAlgorithm []SignatureAlgorithm
+var builtinSignatureAlgorithm = map[string]struct{}{}
+
+func init() {
+ // builtin values for SignatureAlgorithm
+ algorithms := make([]SignatureAlgorithm, 15)
+ algorithms[0] = NewSignatureAlgorithm("ES256")
+ algorithms[1] = NewSignatureAlgorithm("ES256K")
+ algorithms[2] = NewSignatureAlgorithm("ES384")
+ algorithms[3] = NewSignatureAlgorithm("ES512")
+ algorithms[4] = NewSignatureAlgorithm("EdDSA")
+ algorithms[5] = NewSignatureAlgorithm("HS256", WithIsSymmetric(true))
+ algorithms[6] = NewSignatureAlgorithm("HS384", WithIsSymmetric(true))
+ algorithms[7] = NewSignatureAlgorithm("HS512", WithIsSymmetric(true))
+ algorithms[8] = NewSignatureAlgorithm("none")
+ algorithms[9] = NewSignatureAlgorithm("PS256")
+ algorithms[10] = NewSignatureAlgorithm("PS384")
+ algorithms[11] = NewSignatureAlgorithm("PS512")
+ algorithms[12] = NewSignatureAlgorithm("RS256")
+ algorithms[13] = NewSignatureAlgorithm("RS384")
+ algorithms[14] = NewSignatureAlgorithm("RS512")
+
+ RegisterSignatureAlgorithm(algorithms...)
+}
+
+// ES256 returns an object representing ECDSA signature algorithm using P-256 curve and SHA-256.
+func ES256() SignatureAlgorithm {
+ return lookupBuiltinSignatureAlgorithm("ES256")
+}
+
+// ES256K returns an object representing ECDSA signature algorithm using secp256k1 curve and SHA-256.
+func ES256K() SignatureAlgorithm {
+ return lookupBuiltinSignatureAlgorithm("ES256K")
+}
+
+// ES384 returns an object representing ECDSA signature algorithm using P-384 curve and SHA-384.
+func ES384() SignatureAlgorithm {
+ return lookupBuiltinSignatureAlgorithm("ES384")
+}
+
+// ES512 returns an object representing ECDSA signature algorithm using P-521 curve and SHA-512.
+func ES512() SignatureAlgorithm {
+ return lookupBuiltinSignatureAlgorithm("ES512")
+}
+
+// EdDSA returns an object representing EdDSA signature algorithms.
+func EdDSA() SignatureAlgorithm {
+ return lookupBuiltinSignatureAlgorithm("EdDSA")
+}
+
+// HS256 returns an object representing HMAC signature algorithm using SHA-256.
+func HS256() SignatureAlgorithm {
+ return lookupBuiltinSignatureAlgorithm("HS256")
+}
+
+// HS384 returns an object representing HMAC signature algorithm using SHA-384.
+func HS384() SignatureAlgorithm {
+ return lookupBuiltinSignatureAlgorithm("HS384")
+}
+
+// HS512 returns an object representing HMAC signature algorithm using SHA-512.
+func HS512() SignatureAlgorithm {
+ return lookupBuiltinSignatureAlgorithm("HS512")
+}
+
+// NoSignature returns an object representing the lack of a signature algorithm. Using this value specifies that the content should not be signed, which you should avoid doing.
+func NoSignature() SignatureAlgorithm {
+ return lookupBuiltinSignatureAlgorithm("none")
+}
+
+// PS256 returns an object representing RSASSA-PSS signature algorithm using SHA-256 and MGF1-SHA256.
+func PS256() SignatureAlgorithm {
+ return lookupBuiltinSignatureAlgorithm("PS256")
+}
+
+// PS384 returns an object representing RSASSA-PSS signature algorithm using SHA-384 and MGF1-SHA384.
+func PS384() SignatureAlgorithm {
+ return lookupBuiltinSignatureAlgorithm("PS384")
+}
+
+// PS512 returns an object representing RSASSA-PSS signature algorithm using SHA-512 and MGF1-SHA512.
+func PS512() SignatureAlgorithm {
+ return lookupBuiltinSignatureAlgorithm("PS512")
+}
+
+// RS256 returns an object representing RSASSA-PKCS-v1.5 signature algorithm using SHA-256.
+func RS256() SignatureAlgorithm {
+ return lookupBuiltinSignatureAlgorithm("RS256")
+}
+
+// RS384 returns an object representing RSASSA-PKCS-v1.5 signature algorithm using SHA-384.
+func RS384() SignatureAlgorithm {
+ return lookupBuiltinSignatureAlgorithm("RS384")
+}
+
+// RS512 returns an object representing RSASSA-PKCS-v1.5 signature algorithm using SHA-512.
+func RS512() SignatureAlgorithm {
+ return lookupBuiltinSignatureAlgorithm("RS512")
+}
+
+func lookupBuiltinSignatureAlgorithm(name string) SignatureAlgorithm {
+ muAllSignatureAlgorithm.RLock()
+ v, ok := allSignatureAlgorithm[name]
+ muAllSignatureAlgorithm.RUnlock()
+ if !ok {
+ panic(fmt.Sprintf(`jwa: SignatureAlgorithm %q not registered`, name))
+ }
+ return v
+}
+
+// SignatureAlgorithm represents the various signature algorithms as described in https://tools.ietf.org/html/rfc7518#section-3.1
+type SignatureAlgorithm struct {
+ name string
+ deprecated bool
+ isSymmetric bool
+}
+
+func (s SignatureAlgorithm) String() string {
+ return s.name
+}
+
+// IsDeprecated returns true if the SignatureAlgorithm object is deprecated.
+func (s SignatureAlgorithm) IsDeprecated() bool {
+ return s.deprecated
+}
+
+// IsSymmetric returns true if the SignatureAlgorithm object is symmetric. Symmetric algorithms use the same key for both encryption and decryption.
+func (s SignatureAlgorithm) IsSymmetric() bool {
+ return s.isSymmetric
+}
+
+// EmptySignatureAlgorithm returns an empty SignatureAlgorithm object, used as a zero value.
+func EmptySignatureAlgorithm() SignatureAlgorithm {
+ return SignatureAlgorithm{}
+}
+
+// NewSignatureAlgorithm creates a new SignatureAlgorithm object with the given name.
+func NewSignatureAlgorithm(name string, options ...NewSignatureAlgorithmOption) SignatureAlgorithm {
+ var deprecated bool
+ var isSymmetric bool
+ for _, option := range options {
+ switch option.Ident() {
+ case identIsSymmetric{}:
+ if err := option.Value(&isSymmetric); err != nil {
+ panic("jwa.NewSignatureAlgorithm: WithIsSymmetric option must be a boolean")
+ }
+ case identDeprecated{}:
+ if err := option.Value(&deprecated); err != nil {
+ panic("jwa.NewSignatureAlgorithm: WithDeprecated option must be a boolean")
+ }
+ }
+ }
+ return SignatureAlgorithm{name: name, deprecated: deprecated, isSymmetric: isSymmetric}
+}
+
+// LookupSignatureAlgorithm returns the SignatureAlgorithm object for the given name.
+func LookupSignatureAlgorithm(name string) (SignatureAlgorithm, bool) {
+ muAllSignatureAlgorithm.RLock()
+ v, ok := allSignatureAlgorithm[name]
+ muAllSignatureAlgorithm.RUnlock()
+ return v, ok
+}
+
+// RegisterSignatureAlgorithm registers a new SignatureAlgorithm. The signature value must be immutable
+// and safe to be used by multiple goroutines, as it is going to be shared with all other users of this library.
+func RegisterSignatureAlgorithm(algorithms ...SignatureAlgorithm) {
+ muAllSignatureAlgorithm.Lock()
+ for _, alg := range algorithms {
+ allSignatureAlgorithm[alg.String()] = alg
+ }
+ muAllSignatureAlgorithm.Unlock()
+ rebuildSignatureAlgorithm()
+}
+
+// UnregisterSignatureAlgorithm unregisters a SignatureAlgorithm from its known database.
+// Non-existent entries, as well as built-in algorithms will silently be ignored.
+func UnregisterSignatureAlgorithm(algorithms ...SignatureAlgorithm) {
+ muAllSignatureAlgorithm.Lock()
+ for _, alg := range algorithms {
+ if _, ok := builtinSignatureAlgorithm[alg.String()]; ok {
+ continue
+ }
+ delete(allSignatureAlgorithm, alg.String())
+ }
+ muAllSignatureAlgorithm.Unlock()
+ rebuildSignatureAlgorithm()
+}
+
+func rebuildSignatureAlgorithm() {
+ list := make([]SignatureAlgorithm, 0, len(allSignatureAlgorithm))
+ muAllSignatureAlgorithm.RLock()
+ for _, v := range allSignatureAlgorithm {
+ list = append(list, v)
+ }
+ muAllSignatureAlgorithm.RUnlock()
+ sort.Slice(list, func(i, j int) bool {
+ return list[i].String() < list[j].String()
+ })
+ muListSignatureAlgorithm.Lock()
+ listSignatureAlgorithm = list
+ muListSignatureAlgorithm.Unlock()
+}
+
+// SignatureAlgorithms returns a list of all available values for SignatureAlgorithm.
+func SignatureAlgorithms() []SignatureAlgorithm {
+ muListSignatureAlgorithm.RLock()
+ defer muListSignatureAlgorithm.RUnlock()
+ return listSignatureAlgorithm
+}
+
+// MarshalJSON serializes the SignatureAlgorithm object to a JSON string.
+func (s SignatureAlgorithm) MarshalJSON() ([]byte, error) {
+ return json.Marshal(s.String())
+}
+
+// UnmarshalJSON deserializes the JSON string to a SignatureAlgorithm object.
+func (s *SignatureAlgorithm) UnmarshalJSON(data []byte) error {
+ var name string
+ if err := json.Unmarshal(data, &name); err != nil {
+ return fmt.Errorf(`failed to unmarshal SignatureAlgorithm: %w`, err)
+ }
+ v, ok := LookupSignatureAlgorithm(name)
+ if !ok {
+ return fmt.Errorf(`unknown SignatureAlgorithm: %q`, name)
+ }
+ *s = v
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwe/BUILD.bazel
new file mode 100644
index 0000000000..0719efd2dc
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/BUILD.bazel
@@ -0,0 +1,70 @@
+load("@rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "jwe",
+ srcs = [
+ "compress.go",
+ "decrypt.go",
+ "encrypt.go",
+ "errors.go",
+ "filter.go",
+ "headers.go",
+ "headers_gen.go",
+ "interface.go",
+ "io.go",
+ "jwe.go",
+ "key_provider.go",
+ "message.go",
+ "options.go",
+ "options_gen.go",
+ ],
+ importpath = "github.com/lestrrat-go/jwx/v3/jwe",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//cert",
+ "//internal/base64",
+ "//transform",
+ "//internal/json",
+ "//internal/tokens",
+ "//internal/keyconv",
+ "//internal/pool",
+ "//jwa",
+ "//jwe/internal/aescbc",
+ "//jwe/internal/cipher",
+ "//jwe/internal/content_crypt",
+ "//jwe/internal/keygen",
+ "//jwe/jwebb",
+ "//jwk",
+ "@com_github_lestrrat_go_blackmagic//:blackmagic",
+ "@com_github_lestrrat_go_option_v2//:option",
+ "@org_golang_x_crypto//pbkdf2",
+ ],
+)
+
+go_test(
+ name = "jwe_test",
+ srcs = [
+ "filter_test.go",
+ "gh402_test.go",
+ "headers_test.go",
+ "jwe_test.go",
+ "message_test.go",
+ "options_gen_test.go",
+ "speed_test.go",
+ ],
+ embed = [":jwe"],
+ deps = [
+ "//cert",
+ "//internal/json",
+ "//internal/jwxtest",
+ "//jwa",
+ "//jwk",
+ "@com_github_stretchr_testify//require",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":jwe",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/README.md b/vendor/github.com/lestrrat-go/jwx/v3/jwe/README.md
new file mode 100644
index 0000000000..c85d05bbbe
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/README.md
@@ -0,0 +1,94 @@
+# JWE [](https://pkg.go.dev/github.com/lestrrat-go/jwx/v3/jwe)
+
+Package jwe implements JWE as described in [RFC7516](https://tools.ietf.org/html/rfc7516)
+
+* Encrypt and Decrypt arbitrary data
+* Content compression and decompression
+* Add arbitrary fields in the JWE header object
+
+How-to style documentation can be found in the [docs directory](../docs).
+
+Examples are located in the examples directory ([jwe_example_test.go](../examples/jwe_example_test.go))
+
+Supported key encryption algorithm:
+
+| Algorithm | Supported? | Constant in [jwa](../jwa) |
+|:-----------------------------------------|:-----------|:-------------------------|
+| RSA-PKCS1v1.5 | YES | jwa.RSA1_5 |
+| RSA-OAEP-SHA1 | YES | jwa.RSA_OAEP |
+| RSA-OAEP-SHA256 | YES | jwa.RSA_OAEP_256 |
+| AES key wrap (128) | YES | jwa.A128KW |
+| AES key wrap (192) | YES | jwa.A192KW |
+| AES key wrap (256) | YES | jwa.A256KW |
+| Direct encryption | YES (1) | jwa.DIRECT |
+| ECDH-ES | YES (1) | jwa.ECDH_ES |
+| ECDH-ES + AES key wrap (128) | YES | jwa.ECDH_ES_A128KW |
+| ECDH-ES + AES key wrap (192) | YES | jwa.ECDH_ES_A192KW |
+| ECDH-ES + AES key wrap (256) | YES | jwa.ECDH_ES_A256KW |
+| AES-GCM key wrap (128) | YES | jwa.A128GCMKW |
+| AES-GCM key wrap (192) | YES | jwa.A192GCMKW |
+| AES-GCM key wrap (256) | YES | jwa.A256GCMKW |
+| PBES2 + HMAC-SHA256 + AES key wrap (128) | YES | jwa.PBES2_HS256_A128KW |
+| PBES2 + HMAC-SHA384 + AES key wrap (192) | YES | jwa.PBES2_HS384_A192KW |
+| PBES2 + HMAC-SHA512 + AES key wrap (256) | YES | jwa.PBES2_HS512_A256KW |
+
+* Note 1: Single-recipient only
+
+Supported content encryption algorithm:
+
+| Algorithm | Supported? | Constant in [jwa](../jwa) |
+|:----------------------------|:-----------|:--------------------------|
+| AES-CBC + HMAC-SHA256 (128) | YES | jwa.A128CBC_HS256 |
+| AES-CBC + HMAC-SHA384 (192) | YES | jwa.A192CBC_HS384 |
+| AES-CBC + HMAC-SHA512 (256) | YES | jwa.A256CBC_HS512 |
+| AES-GCM (128) | YES | jwa.A128GCM |
+| AES-GCM (192) | YES | jwa.A192GCM |
+| AES-GCM (256) | YES | jwa.A256GCM |
+
+# SYNOPSIS
+
+## Encrypt data
+
+```go
+func ExampleEncrypt() {
+ privkey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ log.Printf("failed to generate private key: %s", err)
+ return
+ }
+
+ payload := []byte("Lorem Ipsum")
+
+ encrypted, err := jwe.Encrypt(payload, jwe.WithKey(jwa.RSA1_5, &privkey.PublicKey), jwe.WithContentEncryption(jwa.A128CBC_HS256))
+ if err != nil {
+ log.Printf("failed to encrypt payload: %s", err)
+ return
+ }
+ _ = encrypted
+ // OUTPUT:
+}
+```
+
+## Decrypt data
+
+```go
+func ExampleDecrypt() {
+ privkey, encrypted, err := exampleGenPayload()
+ if err != nil {
+ log.Printf("failed to generate encrypted payload: %s", err)
+ return
+ }
+
+ decrypted, err := jwe.Decrypt(encrypted, jwe.WithKey(jwa.RSA1_5, privkey))
+ if err != nil {
+ log.Printf("failed to decrypt: %s", err)
+ return
+ }
+
+ if string(decrypted) != "Lorem Ipsum" {
+ log.Printf("WHAT?!")
+ return
+ }
+ // OUTPUT:
+}
+```
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/compress.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/compress.go
new file mode 100644
index 0000000000..a1ed158fb0
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/compress.go
@@ -0,0 +1,62 @@
+package jwe
+
+import (
+ "bytes"
+ "compress/flate"
+ "fmt"
+ "io"
+
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+)
+
+func uncompress(src []byte, maxBufferSize int64) ([]byte, error) {
+ var dst bytes.Buffer
+ r := flate.NewReader(bytes.NewReader(src))
+ defer r.Close()
+ var buf [16384]byte
+ var sofar int64
+ for {
+ n, readErr := r.Read(buf[:])
+ sofar += int64(n)
+ if sofar > maxBufferSize {
+ return nil, fmt.Errorf(`compressed payload exceeds maximum allowed size`)
+ }
+ if readErr != nil {
+ // if we have a read error, and it's not EOF, then we need to stop
+ if readErr != io.EOF {
+ return nil, fmt.Errorf(`failed to read inflated data: %w`, readErr)
+ }
+ }
+
+ if _, err := dst.Write(buf[:n]); err != nil {
+ return nil, fmt.Errorf(`failed to write inflated data: %w`, err)
+ }
+
+ if readErr != nil {
+ // if it got here, then readErr == io.EOF, we're done
+ return dst.Bytes(), nil
+ }
+ }
+}
+
+func compress(plaintext []byte) ([]byte, error) {
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+
+ w, _ := flate.NewWriter(buf, 1)
+ in := plaintext
+ for len(in) > 0 {
+ n, err := w.Write(in)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to write to compression writer: %w`, err)
+ }
+ in = in[n:]
+ }
+ if err := w.Close(); err != nil {
+ return nil, fmt.Errorf(`failed to close compression writer: %w`, err)
+ }
+
+ ret := make([]byte, buf.Len())
+ copy(ret, buf.Bytes())
+ return ret, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/decrypt.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/decrypt.go
new file mode 100644
index 0000000000..9429d84b1b
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/decrypt.go
@@ -0,0 +1,227 @@
+package jwe
+
+import (
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt"
+ "github.com/lestrrat-go/jwx/v3/jwe/jwebb"
+)
+
+// decrypter is responsible for taking various components to decrypt a message.
+// its operation is not concurrency safe. You must provide locking yourself
+//
+//nolint:govet
+type decrypter struct {
+ aad []byte
+ apu []byte
+ apv []byte
+ cek *[]byte
+ computedAad []byte
+ iv []byte
+ keyiv []byte
+ keysalt []byte
+ keytag []byte
+ tag []byte
+ privkey any
+ pubkey any
+ ctalg jwa.ContentEncryptionAlgorithm
+ keyalg jwa.KeyEncryptionAlgorithm
+ cipher content_crypt.Cipher
+ keycount int
+}
+
+// newDecrypter Creates a new Decrypter instance. You must supply the
+// rest of parameters via their respective setter methods before
+// calling Decrypt().
+//
+// privkey must be a private key in its "raw" format (i.e. something like
+// *rsa.PrivateKey, instead of jwk.Key)
+//
+// You should consider this object immutable once you assign values to it.
+func newDecrypter(keyalg jwa.KeyEncryptionAlgorithm, ctalg jwa.ContentEncryptionAlgorithm, privkey any) *decrypter {
+ return &decrypter{
+ ctalg: ctalg,
+ keyalg: keyalg,
+ privkey: privkey,
+ }
+}
+
+func (d *decrypter) AgreementPartyUInfo(apu []byte) *decrypter {
+ d.apu = apu
+ return d
+}
+
+func (d *decrypter) AgreementPartyVInfo(apv []byte) *decrypter {
+ d.apv = apv
+ return d
+}
+
+func (d *decrypter) AuthenticatedData(aad []byte) *decrypter {
+ d.aad = aad
+ return d
+}
+
+func (d *decrypter) ComputedAuthenticatedData(aad []byte) *decrypter {
+ d.computedAad = aad
+ return d
+}
+
+func (d *decrypter) ContentEncryptionAlgorithm(ctalg jwa.ContentEncryptionAlgorithm) *decrypter {
+ d.ctalg = ctalg
+ return d
+}
+
+func (d *decrypter) InitializationVector(iv []byte) *decrypter {
+ d.iv = iv
+ return d
+}
+
+func (d *decrypter) KeyCount(keycount int) *decrypter {
+ d.keycount = keycount
+ return d
+}
+
+func (d *decrypter) KeyInitializationVector(keyiv []byte) *decrypter {
+ d.keyiv = keyiv
+ return d
+}
+
+func (d *decrypter) KeySalt(keysalt []byte) *decrypter {
+ d.keysalt = keysalt
+ return d
+}
+
+func (d *decrypter) KeyTag(keytag []byte) *decrypter {
+ d.keytag = keytag
+ return d
+}
+
+// PublicKey sets the public key to be used in decoding EC based encryptions.
+// The key must be in its "raw" format (i.e. *ecdsa.PublicKey, instead of jwk.Key)
+func (d *decrypter) PublicKey(pubkey any) *decrypter {
+ d.pubkey = pubkey
+ return d
+}
+
+func (d *decrypter) Tag(tag []byte) *decrypter {
+ d.tag = tag
+ return d
+}
+
+func (d *decrypter) CEK(ptr *[]byte) *decrypter {
+ d.cek = ptr
+ return d
+}
+
+func (d *decrypter) ContentCipher() (content_crypt.Cipher, error) {
+ if d.cipher == nil {
+ cipher, err := jwebb.CreateContentCipher(d.ctalg.String())
+ if err != nil {
+ return nil, err
+ }
+ d.cipher = cipher
+ }
+
+ return d.cipher, nil
+}
+
+func (d *decrypter) Decrypt(recipient Recipient, ciphertext []byte, msg *Message) (plaintext []byte, err error) {
+ cek, keyerr := d.DecryptKey(recipient, msg)
+ if keyerr != nil {
+ err = fmt.Errorf(`failed to decrypt key: %w`, keyerr)
+ return
+ }
+
+ cipher, ciphererr := d.ContentCipher()
+ if ciphererr != nil {
+ err = fmt.Errorf(`failed to fetch content crypt cipher: %w`, ciphererr)
+ return
+ }
+
+ computedAad := d.computedAad
+ if d.aad != nil {
+ computedAad = append(append(computedAad, tokens.Period), d.aad...)
+ }
+
+ plaintext, err = cipher.Decrypt(cek, d.iv, ciphertext, d.tag, computedAad)
+ if err != nil {
+ err = fmt.Errorf(`failed to decrypt payload: %w`, err)
+ return
+ }
+
+ if d.cek != nil {
+ *d.cek = cek
+ }
+ return plaintext, nil
+}
+
+func (d *decrypter) DecryptKey(recipient Recipient, msg *Message) (cek []byte, err error) {
+ recipientKey := recipient.EncryptedKey()
+ if kd, ok := d.privkey.(KeyDecrypter); ok {
+ return kd.DecryptKey(d.keyalg, recipientKey, recipient, msg)
+ }
+
+ if jwebb.IsDirect(d.keyalg.String()) {
+ cek, ok := d.privkey.([]byte)
+ if !ok {
+ return nil, fmt.Errorf("decrypt key: []byte is required as the key for %s (got %T)", d.keyalg, d.privkey)
+ }
+ return jwebb.KeyDecryptDirect(recipientKey, recipientKey, d.keyalg.String(), cek)
+ }
+
+ if jwebb.IsPBES2(d.keyalg.String()) {
+ password, ok := d.privkey.([]byte)
+ if !ok {
+ return nil, fmt.Errorf("decrypt key: []byte is required as the password for %s (got %T)", d.keyalg, d.privkey)
+ }
+ salt := []byte(d.keyalg.String())
+ salt = append(salt, byte(0))
+ salt = append(salt, d.keysalt...)
+ return jwebb.KeyDecryptPBES2(recipientKey, recipientKey, d.keyalg.String(), password, salt, d.keycount)
+ }
+
+ if jwebb.IsAESGCMKW(d.keyalg.String()) {
+ sharedkey, ok := d.privkey.([]byte)
+ if !ok {
+ return nil, fmt.Errorf("decrypt key: []byte is required as the key for %s (got %T)", d.keyalg, d.privkey)
+ }
+ return jwebb.KeyDecryptAESGCMKW(recipientKey, recipientKey, d.keyalg.String(), sharedkey, d.keyiv, d.keytag)
+ }
+
+ if jwebb.IsECDHES(d.keyalg.String()) {
+ alg, keysize, keywrap, err := jwebb.KeyEncryptionECDHESKeySize(d.keyalg.String(), d.ctalg.String())
+ if err != nil {
+ return nil, fmt.Errorf(`failed to determine ECDH-ES key size: %w`, err)
+ }
+
+ if !keywrap {
+ return jwebb.KeyDecryptECDHES(recipientKey, cek, alg, d.apu, d.apv, d.privkey, d.pubkey, keysize)
+ }
+ return jwebb.KeyDecryptECDHESKeyWrap(recipientKey, recipientKey, d.keyalg.String(), d.apu, d.apv, d.privkey, d.pubkey, keysize)
+ }
+
+ if jwebb.IsRSA15(d.keyalg.String()) {
+ cipher, err := d.ContentCipher()
+ if err != nil {
+ return nil, fmt.Errorf(`failed to fetch content crypt cipher: %w`, err)
+ }
+ keysize := cipher.KeySize() / 2
+ return jwebb.KeyDecryptRSA15(recipientKey, recipientKey, d.privkey, keysize)
+ }
+
+ if jwebb.IsRSAOAEP(d.keyalg.String()) {
+ return jwebb.KeyDecryptRSAOAEP(recipientKey, recipientKey, d.keyalg.String(), d.privkey)
+ }
+
+ if jwebb.IsAESKW(d.keyalg.String()) {
+ sharedkey, ok := d.privkey.([]byte)
+ if !ok {
+ return nil, fmt.Errorf("[]byte is required as the key to decrypt %s", d.keyalg.String())
+ }
+ return jwebb.KeyDecryptAESKW(recipientKey, recipientKey, d.keyalg.String(), sharedkey)
+ }
+
+ return nil, fmt.Errorf(`unsupported algorithm for key decryption (%s)`, d.keyalg)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/encrypt.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/encrypt.go
new file mode 100644
index 0000000000..e75f342a3d
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/encrypt.go
@@ -0,0 +1,193 @@
+package jwe
+
+import (
+ "crypto/ecdh"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/internal/keyconv"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt"
+ "github.com/lestrrat-go/jwx/v3/jwe/internal/keygen"
+ "github.com/lestrrat-go/jwx/v3/jwe/jwebb"
+)
+
+// encrypter is responsible for taking various components to encrypt a key.
+// its operation is not concurrency safe. You must provide locking yourself
+//
+//nolint:govet
+type encrypter struct {
+ apu []byte
+ apv []byte
+ ctalg jwa.ContentEncryptionAlgorithm
+ keyalg jwa.KeyEncryptionAlgorithm
+ pubkey any
+ rawKey any
+ cipher content_crypt.Cipher
+}
+
+// newEncrypter creates a new Encrypter instance with all required parameters.
+// The content cipher is built internally during construction.
+//
+// pubkey must be a public key in its "raw" format (i.e. something like
+// *rsa.PublicKey, instead of jwk.Key)
+//
+// You should consider this object immutable once created.
+func newEncrypter(keyalg jwa.KeyEncryptionAlgorithm, ctalg jwa.ContentEncryptionAlgorithm, pubkey any, rawKey any, apu, apv []byte) (*encrypter, error) {
+ cipher, err := jwebb.CreateContentCipher(ctalg.String())
+ if err != nil {
+ return nil, fmt.Errorf(`failed to create content cipher: %w`, err)
+ }
+
+ return &encrypter{
+ apu: apu,
+ apv: apv,
+ ctalg: ctalg,
+ keyalg: keyalg,
+ pubkey: pubkey,
+ rawKey: rawKey,
+ cipher: cipher,
+ }, nil
+}
+
+func (e *encrypter) EncryptKey(cek []byte) (keygen.ByteSource, error) {
+ if ke, ok := e.pubkey.(KeyEncrypter); ok {
+ encrypted, err := ke.EncryptKey(cek)
+ if err != nil {
+ return nil, err
+ }
+ return keygen.ByteKey(encrypted), nil
+ }
+
+ if jwebb.IsDirect(e.keyalg.String()) {
+ sharedkey, ok := e.rawKey.([]byte)
+ if !ok {
+ return nil, fmt.Errorf("encrypt key: []byte is required as the key for %s (got %T)", e.keyalg, e.rawKey)
+ }
+ return jwebb.KeyEncryptDirect(cek, e.keyalg.String(), sharedkey)
+ }
+
+ if jwebb.IsPBES2(e.keyalg.String()) {
+ password, ok := e.rawKey.([]byte)
+ if !ok {
+ return nil, fmt.Errorf("encrypt key: []byte is required as the password for %s (got %T)", e.keyalg, e.rawKey)
+ }
+ return jwebb.KeyEncryptPBES2(cek, e.keyalg.String(), password)
+ }
+
+ if jwebb.IsAESGCMKW(e.keyalg.String()) {
+ sharedkey, ok := e.rawKey.([]byte)
+ if !ok {
+ return nil, fmt.Errorf("encrypt key: []byte is required as the key for %s (got %T)", e.keyalg, e.rawKey)
+ }
+ return jwebb.KeyEncryptAESGCMKW(cek, e.keyalg.String(), sharedkey)
+ }
+
+ if jwebb.IsECDHES(e.keyalg.String()) {
+ _, keysize, keywrap, err := jwebb.KeyEncryptionECDHESKeySize(e.keyalg.String(), e.ctalg.String())
+ if err != nil {
+ return nil, fmt.Errorf(`failed to determine ECDH-ES key size: %w`, err)
+ }
+
+ // Use rawKey for ECDH-ES operations - it should contain the actual key material
+ keyToUse := e.rawKey
+ if keyToUse == nil {
+ keyToUse = e.pubkey
+ }
+
+ switch key := keyToUse.(type) {
+ case *ecdsa.PublicKey:
+ // no op
+ case ecdsa.PublicKey:
+ keyToUse = &key
+ case *ecdsa.PrivateKey:
+ keyToUse = &key.PublicKey
+ case ecdsa.PrivateKey:
+ keyToUse = &key.PublicKey
+ case *ecdh.PublicKey:
+ // no op
+ case ecdh.PublicKey:
+ keyToUse = &key
+ case ecdh.PrivateKey:
+ keyToUse = key.PublicKey()
+ case *ecdh.PrivateKey:
+ keyToUse = key.PublicKey()
+ }
+
+ // Determine key type and call appropriate function
+ switch key := keyToUse.(type) {
+ case *ecdh.PublicKey:
+ if key.Curve() == ecdh.X25519() {
+ if !keywrap {
+ return jwebb.KeyEncryptECDHESX25519(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String())
+ }
+ return jwebb.KeyEncryptECDHESKeyWrapX25519(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String())
+ }
+
+ var ecdsaKey *ecdsa.PublicKey
+ if err := keyconv.ECDHToECDSA(&ecdsaKey, key); err != nil {
+ return nil, fmt.Errorf(`encrypt: failed to convert ECDH public key to ECDSA: %w`, err)
+ }
+ keyToUse = ecdsaKey
+ }
+
+ switch key := keyToUse.(type) {
+ case *ecdsa.PublicKey:
+ if !keywrap {
+ return jwebb.KeyEncryptECDHESECDSA(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String())
+ }
+ return jwebb.KeyEncryptECDHESKeyWrapECDSA(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String())
+ default:
+ return nil, fmt.Errorf(`encrypt: unsupported key type for ECDH-ES: %T`, keyToUse)
+ }
+ }
+
+ if jwebb.IsRSA15(e.keyalg.String()) {
+ keyToUse := e.rawKey
+ if keyToUse == nil {
+ keyToUse = e.pubkey
+ }
+
+ // Handle rsa.PublicKey by value - convert to pointer
+ if pk, ok := keyToUse.(rsa.PublicKey); ok {
+ keyToUse = &pk
+ }
+
+ var pubkey *rsa.PublicKey
+ if err := keyconv.RSAPublicKey(&pubkey, keyToUse); err != nil {
+ return nil, fmt.Errorf(`encrypt: failed to convert to RSA public key: %w`, err)
+ }
+
+ return jwebb.KeyEncryptRSA15(cek, e.keyalg.String(), pubkey)
+ }
+
+ if jwebb.IsRSAOAEP(e.keyalg.String()) {
+ keyToUse := e.rawKey
+ if keyToUse == nil {
+ keyToUse = e.pubkey
+ }
+
+ // Handle rsa.PublicKey by value - convert to pointer
+ if pk, ok := keyToUse.(rsa.PublicKey); ok {
+ keyToUse = &pk
+ }
+
+ var pubkey *rsa.PublicKey
+ if err := keyconv.RSAPublicKey(&pubkey, keyToUse); err != nil {
+ return nil, fmt.Errorf(`encrypt: failed to convert to RSA public key: %w`, err)
+ }
+
+ return jwebb.KeyEncryptRSAOAEP(cek, e.keyalg.String(), pubkey)
+ }
+
+ if jwebb.IsAESKW(e.keyalg.String()) {
+ sharedkey, ok := e.rawKey.([]byte)
+ if !ok {
+ return nil, fmt.Errorf("[]byte is required as the key to encrypt %s", e.keyalg.String())
+ }
+ return jwebb.KeyEncryptAESKW(cek, e.keyalg.String(), sharedkey)
+ }
+
+ return nil, fmt.Errorf(`unsupported algorithm for key encryption (%s)`, e.keyalg)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/errors.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/errors.go
new file mode 100644
index 0000000000..89d276fc44
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/errors.go
@@ -0,0 +1,90 @@
+package jwe
+
+import "errors"
+
+type encryptError struct {
+ error
+}
+
+func (e encryptError) Unwrap() error {
+ return e.error
+}
+
+func (encryptError) Is(err error) bool {
+ _, ok := err.(encryptError)
+ return ok
+}
+
+var errDefaultEncryptError = encryptError{errors.New(`encrypt error`)}
+
+// EncryptError returns an error that can be passed to `errors.Is` to check if the error is an error returned by `jwe.Encrypt`.
+func EncryptError() error {
+ return errDefaultEncryptError
+}
+
+type decryptError struct {
+ error
+}
+
+func (e decryptError) Unwrap() error {
+ return e.error
+}
+
+func (decryptError) Is(err error) bool {
+ _, ok := err.(decryptError)
+ return ok
+}
+
+var errDefaultDecryptError = decryptError{errors.New(`decrypt error`)}
+
+// DecryptError returns an error that can be passed to `errors.Is` to check if the error is an error returned by `jwe.Decrypt`.
+func DecryptError() error {
+ return errDefaultDecryptError
+}
+
+type recipientError struct {
+ error
+}
+
+func (e recipientError) Unwrap() error {
+ return e.error
+}
+
+func (recipientError) Is(err error) bool {
+ _, ok := err.(recipientError)
+ return ok
+}
+
+var errDefaultRecipientError = recipientError{errors.New(`recipient error`)}
+
+// RecipientError returns an error that can be passed to `errors.Is` to check if the error is
+// an error that occurred while attempting to decrypt a JWE message for a particular recipient.
+//
+// For example, if the JWE message failed to parse during `jwe.Decrypt`, it will be a
+// `jwe.DecryptError`, but NOT `jwe.RecipientError`. However, if the JWE message could not
+// be decrypted for any of the recipients, then it will be a `jwe.RecipientError`
+// (actually, it will be _multiple_ `jwe.RecipientError` errors, one for each recipient)
+func RecipientError() error {
+ return errDefaultRecipientError
+}
+
+type parseError struct {
+ error
+}
+
+func (e parseError) Unwrap() error {
+ return e.error
+}
+
+func (parseError) Is(err error) bool {
+ _, ok := err.(parseError)
+ return ok
+}
+
+var errDefaultParseError = parseError{errors.New(`parse error`)}
+
+// ParseError returns an error that can be passed to `errors.Is` to check if the error
+// is an error returned by `jwe.Parse` and related functions.
+func ParseError() error {
+ return errDefaultParseError
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/filter.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/filter.go
new file mode 100644
index 0000000000..14941604bf
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/filter.go
@@ -0,0 +1,36 @@
+package jwe
+
+import (
+ "github.com/lestrrat-go/jwx/v3/transform"
+)
+
+// HeaderFilter is an interface that allows users to filter JWE header fields.
+// It provides two methods: Filter and Reject; Filter returns a new header with only
+// the fields that match the filter criteria, while Reject returns a new header with
+// only the fields that DO NOT match the filter.
+//
+// EXPERIMENTAL: This API is experimental and its interface and behavior is
+// subject to change in future releases. This API is not subject to semver
+// compatibility guarantees.
+type HeaderFilter interface {
+ Filter(header Headers) (Headers, error)
+ Reject(header Headers) (Headers, error)
+}
+
+// StandardHeadersFilter returns a HeaderFilter that filters out standard JWE header fields.
+//
+// You can use this filter to create headers that either only have standard fields
+// or only custom fields.
+//
+// If you need to configure the filter more precisely, consider
+// using the HeaderNameFilter directly.
+func StandardHeadersFilter() HeaderFilter {
+ return stdHeadersFilter
+}
+
+var stdHeadersFilter = NewHeaderNameFilter(stdHeaderNames...)
+
+// NewHeaderNameFilter creates a new HeaderNameFilter with the specified field names.
+func NewHeaderNameFilter(names ...string) HeaderFilter {
+ return transform.NewNameBasedFilter[Headers](names...)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/headers.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/headers.go
new file mode 100644
index 0000000000..6e13afde76
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/headers.go
@@ -0,0 +1,95 @@
+package jwe
+
+import (
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+)
+
+type isZeroer interface {
+ isZero() bool
+}
+
+func (h *stdHeaders) isZero() bool {
+ return h.agreementPartyUInfo == nil &&
+ h.agreementPartyVInfo == nil &&
+ h.algorithm == nil &&
+ h.compression == nil &&
+ h.contentEncryption == nil &&
+ h.contentType == nil &&
+ h.critical == nil &&
+ h.ephemeralPublicKey == nil &&
+ h.jwk == nil &&
+ h.jwkSetURL == nil &&
+ h.keyID == nil &&
+ h.typ == nil &&
+ h.x509CertChain == nil &&
+ h.x509CertThumbprint == nil &&
+ h.x509CertThumbprintS256 == nil &&
+ h.x509URL == nil &&
+ len(h.privateParams) == 0
+}
+
+func (h *stdHeaders) Clone() (Headers, error) {
+ dst := NewHeaders()
+ if err := h.Copy(dst); err != nil {
+ return nil, fmt.Errorf(`failed to copy header contents to new object: %w`, err)
+ }
+ return dst, nil
+}
+
+func (h *stdHeaders) Copy(dst Headers) error {
+ for _, key := range h.Keys() {
+ var v any
+ if err := h.Get(key, &v); err != nil {
+ return fmt.Errorf(`jwe.Headers: Copy: failed to get header %q: %w`, key, err)
+ }
+
+ if err := dst.Set(key, v); err != nil {
+ return fmt.Errorf(`jwe.Headers: Copy: failed to set header %q: %w`, key, err)
+ }
+ }
+ return nil
+}
+
+func (h *stdHeaders) Merge(h2 Headers) (Headers, error) {
+ h3 := NewHeaders()
+
+ if h != nil {
+ if err := h.Copy(h3); err != nil {
+ return nil, fmt.Errorf(`failed to copy headers from receiver: %w`, err)
+ }
+ }
+
+ if h2 != nil {
+ if err := h2.Copy(h3); err != nil {
+ return nil, fmt.Errorf(`failed to copy headers from argument: %w`, err)
+ }
+ }
+
+ return h3, nil
+}
+
+func (h *stdHeaders) Encode() ([]byte, error) {
+ buf, err := json.Marshal(h)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to marshal headers to JSON prior to encoding: %w`, err)
+ }
+
+ return base64.Encode(buf), nil
+}
+
+func (h *stdHeaders) Decode(buf []byte) error {
+ // base64 json string -> json object representation of header
+ decoded, err := base64.Decode(buf)
+ if err != nil {
+ return fmt.Errorf(`failed to unmarshal base64 encoded buffer: %w`, err)
+ }
+
+ if err := json.Unmarshal(decoded, h); err != nil {
+ return fmt.Errorf(`failed to unmarshal buffer: %w`, err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/headers_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/headers_gen.go
new file mode 100644
index 0000000000..7773a5d812
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/headers_gen.go
@@ -0,0 +1,899 @@
+// Code generated by tools/cmd/genjwe/main.go. DO NOT EDIT.
+
+package jwe
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "sync"
+
+ "github.com/lestrrat-go/blackmagic"
+ "github.com/lestrrat-go/jwx/v3/cert"
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jwk"
+)
+
+const (
+ AgreementPartyUInfoKey = "apu"
+ AgreementPartyVInfoKey = "apv"
+ AlgorithmKey = "alg"
+ CompressionKey = "zip"
+ ContentEncryptionKey = "enc"
+ ContentTypeKey = "cty"
+ CriticalKey = "crit"
+ EphemeralPublicKeyKey = "epk"
+ JWKKey = "jwk"
+ JWKSetURLKey = "jku"
+ KeyIDKey = "kid"
+ TypeKey = "typ"
+ X509CertChainKey = "x5c"
+ X509CertThumbprintKey = "x5t"
+ X509CertThumbprintS256Key = "x5t#S256"
+ X509URLKey = "x5u"
+)
+
+// Headers describe a standard JWE Header set. It is part of the JWE message
+// and is used to represent both Protected and Unprotected headers,
+// which in turn can be found in each Recipient object.
+// If you are not sure how this works, it is strongly recommended that
+// you read RFC7516, especially the section
+// that describes the full JSON serialization format of JWE messages.
+//
+// In most cases, you likely want to use the protected headers, as this is the part of the encrypted content
+type Headers interface {
+ AgreementPartyUInfo() ([]byte, bool)
+ AgreementPartyVInfo() ([]byte, bool)
+ Algorithm() (jwa.KeyEncryptionAlgorithm, bool)
+ Compression() (jwa.CompressionAlgorithm, bool)
+ ContentEncryption() (jwa.ContentEncryptionAlgorithm, bool)
+ ContentType() (string, bool)
+ Critical() ([]string, bool)
+ EphemeralPublicKey() (jwk.Key, bool)
+ JWK() (jwk.Key, bool)
+ JWKSetURL() (string, bool)
+ KeyID() (string, bool)
+ Type() (string, bool)
+ X509CertChain() (*cert.Chain, bool)
+ X509CertThumbprint() (string, bool)
+ X509CertThumbprintS256() (string, bool)
+ X509URL() (string, bool)
+
+ // Get is used to extract the value of any field, including non-standard fields, out of the header.
+ //
+ // The first argument is the name of the field. The second argument is a pointer
+ // to a variable that will receive the value of the field. The method returns
+ // an error if the field does not exist, or if the value cannot be assigned to
+ // the destination variable. Note that a field is considered to "exist" even if
+ // the value is empty-ish (e.g. 0, false, ""), as long as it is explicitly set.
+ Get(string, any) error
+ Set(string, any) error
+ Remove(string) error
+ // Has returns true if the specified header has a value, even if
+ // the value is empty-ish (e.g. 0, false, "") as long as it has been
+ // explicitly set.
+ Has(string) bool
+ Encode() ([]byte, error)
+ Decode([]byte) error
+ Clone() (Headers, error)
+ Copy(Headers) error
+ Merge(Headers) (Headers, error)
+
+ // Keys returns a list of the keys contained in this header.
+ Keys() []string
+}
+
+// stdHeaderNames is a list of all standard header names defined in the JWE specification.
+var stdHeaderNames = []string{AgreementPartyUInfoKey, AgreementPartyVInfoKey, AlgorithmKey, CompressionKey, ContentEncryptionKey, ContentTypeKey, CriticalKey, EphemeralPublicKeyKey, JWKKey, JWKSetURLKey, KeyIDKey, TypeKey, X509CertChainKey, X509CertThumbprintKey, X509CertThumbprintS256Key, X509URLKey}
+
+type stdHeaders struct {
+ agreementPartyUInfo []byte
+ agreementPartyVInfo []byte
+ algorithm *jwa.KeyEncryptionAlgorithm
+ compression *jwa.CompressionAlgorithm
+ contentEncryption *jwa.ContentEncryptionAlgorithm
+ contentType *string
+ critical []string
+ ephemeralPublicKey jwk.Key
+ jwk jwk.Key
+ jwkSetURL *string
+ keyID *string
+ typ *string
+ x509CertChain *cert.Chain
+ x509CertThumbprint *string
+ x509CertThumbprintS256 *string
+ x509URL *string
+ privateParams map[string]any
+ mu *sync.RWMutex
+}
+
+func NewHeaders() Headers {
+ return &stdHeaders{
+ mu: &sync.RWMutex{},
+ privateParams: map[string]any{},
+ }
+}
+
+func (h *stdHeaders) AgreementPartyUInfo() ([]byte, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ return h.agreementPartyUInfo, h.agreementPartyUInfo != nil
+}
+
+func (h *stdHeaders) AgreementPartyVInfo() ([]byte, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ return h.agreementPartyVInfo, h.agreementPartyVInfo != nil
+}
+
+func (h *stdHeaders) Algorithm() (jwa.KeyEncryptionAlgorithm, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ if h.algorithm == nil {
+ return jwa.EmptyKeyEncryptionAlgorithm(), false
+ }
+ return *(h.algorithm), true
+}
+
+func (h *stdHeaders) Compression() (jwa.CompressionAlgorithm, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ if h.compression == nil {
+ return jwa.NoCompress(), false
+ }
+ return *(h.compression), true
+}
+
+func (h *stdHeaders) ContentEncryption() (jwa.ContentEncryptionAlgorithm, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ if h.contentEncryption == nil {
+ return jwa.EmptyContentEncryptionAlgorithm(), false
+ }
+ return *(h.contentEncryption), true
+}
+
+func (h *stdHeaders) ContentType() (string, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ if h.contentType == nil {
+ return "", false
+ }
+ return *(h.contentType), true
+}
+
+func (h *stdHeaders) Critical() ([]string, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ return h.critical, h.critical != nil
+}
+
+func (h *stdHeaders) EphemeralPublicKey() (jwk.Key, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ return h.ephemeralPublicKey, h.ephemeralPublicKey != nil
+}
+
+func (h *stdHeaders) JWK() (jwk.Key, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ return h.jwk, h.jwk != nil
+}
+
+func (h *stdHeaders) JWKSetURL() (string, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ if h.jwkSetURL == nil {
+ return "", false
+ }
+ return *(h.jwkSetURL), true
+}
+
+func (h *stdHeaders) KeyID() (string, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ if h.keyID == nil {
+ return "", false
+ }
+ return *(h.keyID), true
+}
+
+func (h *stdHeaders) Type() (string, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ if h.typ == nil {
+ return "", false
+ }
+ return *(h.typ), true
+}
+
+func (h *stdHeaders) X509CertChain() (*cert.Chain, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ return h.x509CertChain, h.x509CertChain != nil
+}
+
+func (h *stdHeaders) X509CertThumbprint() (string, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ if h.x509CertThumbprint == nil {
+ return "", false
+ }
+ return *(h.x509CertThumbprint), true
+}
+
+func (h *stdHeaders) X509CertThumbprintS256() (string, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ if h.x509CertThumbprintS256 == nil {
+ return "", false
+ }
+ return *(h.x509CertThumbprintS256), true
+}
+
+func (h *stdHeaders) X509URL() (string, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ if h.x509URL == nil {
+ return "", false
+ }
+ return *(h.x509URL), true
+}
+
+func (h *stdHeaders) PrivateParams() map[string]any {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ return h.privateParams
+}
+
+func (h *stdHeaders) Has(name string) bool {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ switch name {
+ case AgreementPartyUInfoKey:
+ return h.agreementPartyUInfo != nil
+ case AgreementPartyVInfoKey:
+ return h.agreementPartyVInfo != nil
+ case AlgorithmKey:
+ return h.algorithm != nil
+ case CompressionKey:
+ return h.compression != nil
+ case ContentEncryptionKey:
+ return h.contentEncryption != nil
+ case ContentTypeKey:
+ return h.contentType != nil
+ case CriticalKey:
+ return h.critical != nil
+ case EphemeralPublicKeyKey:
+ return h.ephemeralPublicKey != nil
+ case JWKKey:
+ return h.jwk != nil
+ case JWKSetURLKey:
+ return h.jwkSetURL != nil
+ case KeyIDKey:
+ return h.keyID != nil
+ case TypeKey:
+ return h.typ != nil
+ case X509CertChainKey:
+ return h.x509CertChain != nil
+ case X509CertThumbprintKey:
+ return h.x509CertThumbprint != nil
+ case X509CertThumbprintS256Key:
+ return h.x509CertThumbprintS256 != nil
+ case X509URLKey:
+ return h.x509URL != nil
+ default:
+ _, ok := h.privateParams[name]
+ return ok
+ }
+}
+
+func (h *stdHeaders) Get(name string, dst any) error {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ switch name {
+ case AgreementPartyUInfoKey:
+ if h.agreementPartyUInfo == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.agreementPartyUInfo); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ case AgreementPartyVInfoKey:
+ if h.agreementPartyVInfo == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.agreementPartyVInfo); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ case AlgorithmKey:
+ if h.algorithm == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.algorithm)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ case CompressionKey:
+ if h.compression == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.compression)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ case ContentEncryptionKey:
+ if h.contentEncryption == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.contentEncryption)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ case ContentTypeKey:
+ if h.contentType == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.contentType)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ case CriticalKey:
+ if h.critical == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.critical); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ case EphemeralPublicKeyKey:
+ if h.ephemeralPublicKey == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.ephemeralPublicKey); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ case JWKKey:
+ if h.jwk == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.jwk); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ case JWKSetURLKey:
+ if h.jwkSetURL == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.jwkSetURL)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ case KeyIDKey:
+ if h.keyID == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyID)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ case TypeKey:
+ if h.typ == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.typ)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ case X509CertChainKey:
+ if h.x509CertChain == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.x509CertChain); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ case X509CertThumbprintKey:
+ if h.x509CertThumbprint == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprint)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ case X509CertThumbprintS256Key:
+ if h.x509CertThumbprintS256 == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprintS256)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ case X509URLKey:
+ if h.x509URL == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509URL)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ default:
+ v, ok := h.privateParams[name]
+ if !ok {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, v); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ }
+ return nil
+}
+
+func (h *stdHeaders) Set(name string, value any) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ return h.setNoLock(name, value)
+}
+
+func (h *stdHeaders) setNoLock(name string, value any) error {
+ switch name {
+ case AgreementPartyUInfoKey:
+ if v, ok := value.([]byte); ok {
+ h.agreementPartyUInfo = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, AgreementPartyUInfoKey, value)
+ case AgreementPartyVInfoKey:
+ if v, ok := value.([]byte); ok {
+ h.agreementPartyVInfo = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, AgreementPartyVInfoKey, value)
+ case AlgorithmKey:
+ if v, ok := value.(jwa.KeyEncryptionAlgorithm); ok {
+ h.algorithm = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, AlgorithmKey, value)
+ case CompressionKey:
+ if v, ok := value.(jwa.CompressionAlgorithm); ok {
+ h.compression = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, CompressionKey, value)
+ case ContentEncryptionKey:
+ if v, ok := value.(jwa.ContentEncryptionAlgorithm); ok {
+ if v == jwa.EmptyContentEncryptionAlgorithm() {
+ return fmt.Errorf(`"enc" field cannot be an empty string`)
+ }
+ h.contentEncryption = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, ContentEncryptionKey, value)
+ case ContentTypeKey:
+ if v, ok := value.(string); ok {
+ h.contentType = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, ContentTypeKey, value)
+ case CriticalKey:
+ if v, ok := value.([]string); ok {
+ h.critical = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, CriticalKey, value)
+ case EphemeralPublicKeyKey:
+ if v, ok := value.(jwk.Key); ok {
+ h.ephemeralPublicKey = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, EphemeralPublicKeyKey, value)
+ case JWKKey:
+ if v, ok := value.(jwk.Key); ok {
+ h.jwk = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, JWKKey, value)
+ case JWKSetURLKey:
+ if v, ok := value.(string); ok {
+ h.jwkSetURL = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, JWKSetURLKey, value)
+ case KeyIDKey:
+ if v, ok := value.(string); ok {
+ h.keyID = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value)
+ case TypeKey:
+ if v, ok := value.(string); ok {
+ h.typ = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, TypeKey, value)
+ case X509CertChainKey:
+ if v, ok := value.(*cert.Chain); ok {
+ h.x509CertChain = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value)
+ case X509CertThumbprintKey:
+ if v, ok := value.(string); ok {
+ h.x509CertThumbprint = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value)
+ case X509CertThumbprintS256Key:
+ if v, ok := value.(string); ok {
+ h.x509CertThumbprintS256 = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value)
+ case X509URLKey:
+ if v, ok := value.(string); ok {
+ h.x509URL = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value)
+ default:
+ if h.privateParams == nil {
+ h.privateParams = map[string]any{}
+ }
+ h.privateParams[name] = value
+ }
+ return nil
+}
+
+func (h *stdHeaders) Remove(key string) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ switch key {
+ case AgreementPartyUInfoKey:
+ h.agreementPartyUInfo = nil
+ case AgreementPartyVInfoKey:
+ h.agreementPartyVInfo = nil
+ case AlgorithmKey:
+ h.algorithm = nil
+ case CompressionKey:
+ h.compression = nil
+ case ContentEncryptionKey:
+ h.contentEncryption = nil
+ case ContentTypeKey:
+ h.contentType = nil
+ case CriticalKey:
+ h.critical = nil
+ case EphemeralPublicKeyKey:
+ h.ephemeralPublicKey = nil
+ case JWKKey:
+ h.jwk = nil
+ case JWKSetURLKey:
+ h.jwkSetURL = nil
+ case KeyIDKey:
+ h.keyID = nil
+ case TypeKey:
+ h.typ = nil
+ case X509CertChainKey:
+ h.x509CertChain = nil
+ case X509CertThumbprintKey:
+ h.x509CertThumbprint = nil
+ case X509CertThumbprintS256Key:
+ h.x509CertThumbprintS256 = nil
+ case X509URLKey:
+ h.x509URL = nil
+ default:
+ delete(h.privateParams, key)
+ }
+ return nil
+}
+
+func (h *stdHeaders) UnmarshalJSON(buf []byte) error {
+ h.agreementPartyUInfo = nil
+ h.agreementPartyVInfo = nil
+ h.algorithm = nil
+ h.compression = nil
+ h.contentEncryption = nil
+ h.contentType = nil
+ h.critical = nil
+ h.ephemeralPublicKey = nil
+ h.jwk = nil
+ h.jwkSetURL = nil
+ h.keyID = nil
+ h.typ = nil
+ h.x509CertChain = nil
+ h.x509CertThumbprint = nil
+ h.x509CertThumbprintS256 = nil
+ h.x509URL = nil
+ dec := json.NewDecoder(bytes.NewReader(buf))
+LOOP:
+ for {
+ tok, err := dec.Token()
+ if err != nil {
+ return fmt.Errorf(`error reading token: %w`, err)
+ }
+ switch tok := tok.(type) {
+ case json.Delim:
+ // Assuming we're doing everything correctly, we should ONLY
+ // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here.
+ if tok == tokens.CloseCurlyBracket { // End of object
+ break LOOP
+ } else if tok != tokens.OpenCurlyBracket {
+ return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok)
+ }
+ case string: // Objects can only have string keys
+ switch tok {
+ case AgreementPartyUInfoKey:
+ if err := json.AssignNextBytesToken(&h.agreementPartyUInfo, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, AgreementPartyUInfoKey, err)
+ }
+ case AgreementPartyVInfoKey:
+ if err := json.AssignNextBytesToken(&h.agreementPartyVInfo, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, AgreementPartyVInfoKey, err)
+ }
+ case AlgorithmKey:
+ var decoded jwa.KeyEncryptionAlgorithm
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+ }
+ h.algorithm = &decoded
+ case CompressionKey:
+ var decoded jwa.CompressionAlgorithm
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, CompressionKey, err)
+ }
+ h.compression = &decoded
+ case ContentEncryptionKey:
+ var decoded jwa.ContentEncryptionAlgorithm
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, ContentEncryptionKey, err)
+ }
+ h.contentEncryption = &decoded
+ case ContentTypeKey:
+ if err := json.AssignNextStringToken(&h.contentType, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, ContentTypeKey, err)
+ }
+ case CriticalKey:
+ var decoded []string
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, CriticalKey, err)
+ }
+ h.critical = decoded
+ case EphemeralPublicKeyKey:
+ var buf json.RawMessage
+ if err := dec.Decode(&buf); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s:%w`, EphemeralPublicKeyKey, err)
+ }
+ key, err := jwk.ParseKey(buf)
+ if err != nil {
+ return fmt.Errorf(`failed to parse JWK for key %s: %w`, EphemeralPublicKeyKey, err)
+ }
+ h.ephemeralPublicKey = key
+ case JWKKey:
+ var buf json.RawMessage
+ if err := dec.Decode(&buf); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s:%w`, JWKKey, err)
+ }
+ key, err := jwk.ParseKey(buf)
+ if err != nil {
+ return fmt.Errorf(`failed to parse JWK for key %s: %w`, JWKKey, err)
+ }
+ h.jwk = key
+ case JWKSetURLKey:
+ if err := json.AssignNextStringToken(&h.jwkSetURL, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, JWKSetURLKey, err)
+ }
+ case KeyIDKey:
+ if err := json.AssignNextStringToken(&h.keyID, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err)
+ }
+ case TypeKey:
+ if err := json.AssignNextStringToken(&h.typ, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, TypeKey, err)
+ }
+ case X509CertChainKey:
+ var decoded cert.Chain
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err)
+ }
+ h.x509CertChain = &decoded
+ case X509CertThumbprintKey:
+ if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err)
+ }
+ case X509CertThumbprintS256Key:
+ if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err)
+ }
+ case X509URLKey:
+ if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err)
+ }
+ default:
+ decoded, err := registry.Decode(dec, tok)
+ if err != nil {
+ return err
+ }
+ h.setNoLock(tok, decoded)
+ }
+ default:
+ return fmt.Errorf(`invalid token %T`, tok)
+ }
+ }
+ return nil
+}
+
+func (h *stdHeaders) Keys() []string {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ keys := make([]string, 0, 16+len(h.privateParams))
+ if h.agreementPartyUInfo != nil {
+ keys = append(keys, AgreementPartyUInfoKey)
+ }
+ if h.agreementPartyVInfo != nil {
+ keys = append(keys, AgreementPartyVInfoKey)
+ }
+ if h.algorithm != nil {
+ keys = append(keys, AlgorithmKey)
+ }
+ if h.compression != nil {
+ keys = append(keys, CompressionKey)
+ }
+ if h.contentEncryption != nil {
+ keys = append(keys, ContentEncryptionKey)
+ }
+ if h.contentType != nil {
+ keys = append(keys, ContentTypeKey)
+ }
+ if h.critical != nil {
+ keys = append(keys, CriticalKey)
+ }
+ if h.ephemeralPublicKey != nil {
+ keys = append(keys, EphemeralPublicKeyKey)
+ }
+ if h.jwk != nil {
+ keys = append(keys, JWKKey)
+ }
+ if h.jwkSetURL != nil {
+ keys = append(keys, JWKSetURLKey)
+ }
+ if h.keyID != nil {
+ keys = append(keys, KeyIDKey)
+ }
+ if h.typ != nil {
+ keys = append(keys, TypeKey)
+ }
+ if h.x509CertChain != nil {
+ keys = append(keys, X509CertChainKey)
+ }
+ if h.x509CertThumbprint != nil {
+ keys = append(keys, X509CertThumbprintKey)
+ }
+ if h.x509CertThumbprintS256 != nil {
+ keys = append(keys, X509CertThumbprintS256Key)
+ }
+ if h.x509URL != nil {
+ keys = append(keys, X509URLKey)
+ }
+ for k := range h.privateParams {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+func (h stdHeaders) MarshalJSON() ([]byte, error) {
+ data := make(map[string]any)
+ keys := make([]string, 0, 16+len(h.privateParams))
+ h.mu.RLock()
+ if h.agreementPartyUInfo != nil {
+ data[AgreementPartyUInfoKey] = h.agreementPartyUInfo
+ keys = append(keys, AgreementPartyUInfoKey)
+ }
+ if h.agreementPartyVInfo != nil {
+ data[AgreementPartyVInfoKey] = h.agreementPartyVInfo
+ keys = append(keys, AgreementPartyVInfoKey)
+ }
+ if h.algorithm != nil {
+ data[AlgorithmKey] = *(h.algorithm)
+ keys = append(keys, AlgorithmKey)
+ }
+ if h.compression != nil {
+ data[CompressionKey] = *(h.compression)
+ keys = append(keys, CompressionKey)
+ }
+ if h.contentEncryption != nil {
+ data[ContentEncryptionKey] = *(h.contentEncryption)
+ keys = append(keys, ContentEncryptionKey)
+ }
+ if h.contentType != nil {
+ data[ContentTypeKey] = *(h.contentType)
+ keys = append(keys, ContentTypeKey)
+ }
+ if h.critical != nil {
+ data[CriticalKey] = h.critical
+ keys = append(keys, CriticalKey)
+ }
+ if h.ephemeralPublicKey != nil {
+ data[EphemeralPublicKeyKey] = h.ephemeralPublicKey
+ keys = append(keys, EphemeralPublicKeyKey)
+ }
+ if h.jwk != nil {
+ data[JWKKey] = h.jwk
+ keys = append(keys, JWKKey)
+ }
+ if h.jwkSetURL != nil {
+ data[JWKSetURLKey] = *(h.jwkSetURL)
+ keys = append(keys, JWKSetURLKey)
+ }
+ if h.keyID != nil {
+ data[KeyIDKey] = *(h.keyID)
+ keys = append(keys, KeyIDKey)
+ }
+ if h.typ != nil {
+ data[TypeKey] = *(h.typ)
+ keys = append(keys, TypeKey)
+ }
+ if h.x509CertChain != nil {
+ data[X509CertChainKey] = h.x509CertChain
+ keys = append(keys, X509CertChainKey)
+ }
+ if h.x509CertThumbprint != nil {
+ data[X509CertThumbprintKey] = *(h.x509CertThumbprint)
+ keys = append(keys, X509CertThumbprintKey)
+ }
+ if h.x509CertThumbprintS256 != nil {
+ data[X509CertThumbprintS256Key] = *(h.x509CertThumbprintS256)
+ keys = append(keys, X509CertThumbprintS256Key)
+ }
+ if h.x509URL != nil {
+ data[X509URLKey] = *(h.x509URL)
+ keys = append(keys, X509URLKey)
+ }
+ for k, v := range h.privateParams {
+ data[k] = v
+ keys = append(keys, k)
+ }
+ h.mu.RUnlock()
+
+ sort.Strings(keys)
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+ enc := json.NewEncoder(buf)
+ buf.WriteByte(tokens.OpenCurlyBracket)
+ for i, k := range keys {
+ if i > 0 {
+ buf.WriteRune(tokens.Comma)
+ }
+ buf.WriteRune(tokens.DoubleQuote)
+ buf.WriteString(k)
+ buf.WriteString(`":`)
+ v := data[k]
+ switch v := v.(type) {
+ case []byte:
+ buf.WriteRune(tokens.DoubleQuote)
+ buf.WriteString(base64.EncodeToString(v))
+ buf.WriteRune(tokens.DoubleQuote)
+ default:
+ if err := enc.Encode(v); err != nil {
+ return nil, fmt.Errorf(`failed to encode value for field %s`, k)
+ }
+ buf.Truncate(buf.Len() - 1)
+ }
+ }
+ buf.WriteByte(tokens.CloseCurlyBracket)
+ ret := make([]byte, buf.Len())
+ copy(ret, buf.Bytes())
+ return ret, nil
+}
+
+func (h *stdHeaders) clear() {
+ h.mu.Lock()
+ h.agreementPartyUInfo = nil
+ h.agreementPartyVInfo = nil
+ h.algorithm = nil
+ h.compression = nil
+ h.contentEncryption = nil
+ h.contentType = nil
+ h.critical = nil
+ h.ephemeralPublicKey = nil
+ h.jwk = nil
+ h.jwkSetURL = nil
+ h.keyID = nil
+ h.typ = nil
+ h.x509CertChain = nil
+ h.x509CertThumbprint = nil
+ h.x509CertThumbprintS256 = nil
+ h.x509URL = nil
+ h.privateParams = map[string]any{}
+ h.mu.Unlock()
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/interface.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/interface.go
new file mode 100644
index 0000000000..91ad8cb809
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/interface.go
@@ -0,0 +1,207 @@
+package jwe
+
+import (
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jwe/internal/keygen"
+)
+
+// KeyEncrypter is an interface for object that can encrypt a
+// content encryption key.
+//
+// You can use this in place of a regular key (i.e. in jwe.WithKey())
+// to encrypt the content encryption key in a JWE message without
+// having to expose the secret key in memory, for example, when you
+// want to use hardware security modules (HSMs) to encrypt the key.
+//
+// This API is experimental and may change without notice, even
+// in minor releases.
+type KeyEncrypter interface {
+ // Algorithm returns the algorithm used to encrypt the key.
+ Algorithm() jwa.KeyEncryptionAlgorithm
+
+ // EncryptKey encrypts the given content encryption key.
+ EncryptKey([]byte) ([]byte, error)
+}
+
+// KeyIDer is an interface for things that can return a key ID.
+//
+// As of this writing, this is solely used to identify KeyEncrypter
+// objects that also carry a key ID on its own.
+type KeyIDer interface {
+ KeyID() (string, bool)
+}
+
+// KeyDecrypter is an interface for objects that can decrypt a content
+// encryption key.
+//
+// You can use this in place of a regular key (i.e. in jwe.WithKey())
+// to decrypt the encrypted key in a JWE message without having to
+// expose the secret key in memory, for example, when you want to use
+// hardware security modules (HSMs) to decrypt the key.
+//
+// This API is experimental and may change without notice, even
+// in minor releases.
+type KeyDecrypter interface {
+ // Decrypt decrypts the encrypted key of a JWE message.
+ //
+ // Make sure you understand how JWE messages are structured.
+ //
+ // For example, while in most circumstances a JWE message will only have one recipient,
+ // a JWE message may contain multiple recipients, each with their own
+ // encrypted key. This method will be called for each recipient, instead of
+ // just once for a message.
+ //
+ // Also, header values could be found in either protected/unprotected headers
+ // of a JWE message, as well as in protected/unprotected headers for each recipient.
+ // When checking a header value, you can decide to use either one, or both, but you
+ // must be aware that there are multiple places to look for.
+ DecryptKey(alg jwa.KeyEncryptionAlgorithm, encryptedKey []byte, recipient Recipient, message *Message) ([]byte, error)
+}
+
+// Recipient holds the encrypted key and hints to decrypt the key
+type Recipient interface {
+ Headers() Headers
+ EncryptedKey() []byte
+ SetHeaders(Headers) error
+ SetEncryptedKey([]byte) error
+}
+
+type stdRecipient struct {
+ // Comments on each field are taken from https://datatracker.ietf.org/doc/html/rfc7516
+ //
+ // header
+ // The "header" member MUST be present and contain the value JWE Per-
+ // Recipient Unprotected Header when the JWE Per-Recipient
+ // Unprotected Header value is non-empty; otherwise, it MUST be
+ // absent. This value is represented as an unencoded JSON object,
+ // rather than as a string. These Header Parameter values are not
+ // integrity protected.
+ //
+ // At least one of the "header", "protected", and "unprotected" members
+ // MUST be present so that "alg" and "enc" Header Parameter values are
+ // conveyed for each recipient computation.
+ //
+ // JWX note: see Message.unprotectedHeaders
+ headers Headers
+
+ // encrypted_key
+ // The "encrypted_key" member MUST be present and contain the value
+ // BASE64URL(JWE Encrypted Key) when the JWE Encrypted Key value is
+ // non-empty; otherwise, it MUST be absent.
+ encryptedKey []byte
+}
+
+// Message contains the entire encrypted JWE message. You should not
+// expect to use Message for anything other than inspecting the
+// state of an encrypted message. This is because encryption is
+// highly context-sensitive, and once we parse the original payload
+// into an object, we may not always be able to recreate the exact
+// context in which the encryption happened.
+//
+// For example, it is totally valid for if the protected header's
+// integrity was calculated using a non-standard line breaks:
+//
+// {"a dummy":
+// "protected header"}
+//
+// Once parsed, though, we can only serialize the protected header as:
+//
+// {"a dummy":"protected header"}
+//
+// which would obviously result in a contradicting integrity value
+// if we tried to re-calculate it from a parsed message.
+//
+//nolint:govet
+type Message struct {
+ // Comments on each field are taken from https://datatracker.ietf.org/doc/html/rfc7516
+ //
+ // protected
+ // The "protected" member MUST be present and contain the value
+ // BASE64URL(UTF8(JWE Protected Header)) when the JWE Protected
+ // Header value is non-empty; otherwise, it MUST be absent. These
+ // Header Parameter values are integrity protected.
+ protectedHeaders Headers
+
+ // unprotected
+ // The "unprotected" member MUST be present and contain the value JWE
+ // Shared Unprotected Header when the JWE Shared Unprotected Header
+ // value is non-empty; otherwise, it MUST be absent. This value is
+ // represented as an unencoded JSON object, rather than as a string.
+ // These Header Parameter values are not integrity protected.
+ //
+ // JWX note: This field is NOT mutually exclusive with per-recipient
+ // headers within the implementation because... it's too much work.
+ // It is _never_ populated (we don't provide a way to do this) upon encryption.
+ // When decrypting, if present its values are always merged with
+ // per-recipient header.
+ unprotectedHeaders Headers
+
+ // iv
+ // The "iv" member MUST be present and contain the value
+ // BASE64URL(JWE Initialization Vector) when the JWE Initialization
+ // Vector value is non-empty; otherwise, it MUST be absent.
+ initializationVector []byte
+
+ // aad
+ // The "aad" member MUST be present and contain the value
+ // BASE64URL(JWE AAD)) when the JWE AAD value is non-empty;
+ // otherwise, it MUST be absent. A JWE AAD value can be included to
+ // supply a base64url-encoded value to be integrity protected but not
+ // encrypted.
+ authenticatedData []byte
+
+ // ciphertext
+ // The "ciphertext" member MUST be present and contain the value
+ // BASE64URL(JWE Ciphertext).
+ cipherText []byte
+
+ // tag
+ // The "tag" member MUST be present and contain the value
+ // BASE64URL(JWE Authentication Tag) when the JWE Authentication Tag
+ // value is non-empty; otherwise, it MUST be absent.
+ tag []byte
+
+ // recipients
+ // The "recipients" member value MUST be an array of JSON objects.
+ // Each object contains information specific to a single recipient.
+ // This member MUST be present with exactly one array element per
+ // recipient, even if some or all of the array element values are the
+ // empty JSON object "{}" (which can happen when all Header Parameter
+ // values are shared between all recipients and when no encrypted key
+ // is used, such as when doing Direct Encryption).
+ //
+ // Some Header Parameters, including the "alg" parameter, can be shared
+ // among all recipient computations. Header Parameters in the JWE
+ // Protected Header and JWE Shared Unprotected Header values are shared
+ // among all recipients.
+ //
+ // The Header Parameter values used when creating or validating per-
+ // recipient ciphertext and Authentication Tag values are the union of
+ // the three sets of Header Parameter values that may be present: (1)
+ // the JWE Protected Header represented in the "protected" member, (2)
+ // the JWE Shared Unprotected Header represented in the "unprotected"
+ // member, and (3) the JWE Per-Recipient Unprotected Header represented
+ // in the "header" member of the recipient's array element. The union
+ // of these sets of Header Parameters comprises the JOSE Header. The
+ // Header Parameter names in the three locations MUST be disjoint.
+ recipients []Recipient
+
+ // TODO: Additional members can be present in both the JSON objects defined
+ // above; if not understood by implementations encountering them, they
+ // MUST be ignored.
+ // privateParams map[string]any
+
+ // These two fields below are not available for the public consumers of this object.
+ // rawProtectedHeaders stores the original protected header buffer
+ rawProtectedHeaders []byte
+ // storeProtectedHeaders is a hint to be used in UnmarshalJSON().
+ // When this flag is true, UnmarshalJSON() will populate the
+ // rawProtectedHeaders field
+ storeProtectedHeaders bool
+}
+
+// populater is an interface for things that may modify the
+// JWE header. e.g. ByteWithECPrivateKey
+type populater interface {
+ Populate(keygen.Setter) error
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc/BUILD.bazel
new file mode 100644
index 0000000000..4ed4c53fa3
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc/BUILD.bazel
@@ -0,0 +1,22 @@
+load("@rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "aescbc",
+ srcs = ["aescbc.go"],
+ importpath = "github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc",
+ visibility = ["//:__subpackages__"],
+ deps = ["//internal/pool"],
+)
+
+go_test(
+ name = "aescbc_test",
+ srcs = ["aescbc_test.go"],
+ embed = [":aescbc"],
+ deps = ["@com_github_stretchr_testify//require"]
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":aescbc",
+ visibility = ["//jwe:__subpackages__"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc/aescbc.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc/aescbc.go
new file mode 100644
index 0000000000..b572674e2d
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc/aescbc.go
@@ -0,0 +1,270 @@
+package aescbc
+
+import (
+ "crypto/cipher"
+ "crypto/hmac"
+ "crypto/sha256"
+ "crypto/sha512"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "hash"
+ "sync/atomic"
+
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+)
+
+const (
+ NonceSize = 16
+)
+
+const defaultBufSize int64 = 256 * 1024 * 1024
+
+var maxBufSize atomic.Int64
+
+func init() {
+ SetMaxBufferSize(defaultBufSize)
+}
+
+func SetMaxBufferSize(siz int64) {
+ if siz <= 0 {
+ siz = defaultBufSize
+ }
+ maxBufSize.Store(siz)
+}
+
+func pad(buf []byte, n int) []byte {
+ rem := n - len(buf)%n
+ if rem == 0 {
+ return buf
+ }
+
+ bufsiz := len(buf) + rem
+ mbs := maxBufSize.Load()
+ if int64(bufsiz) > mbs {
+ panic(fmt.Errorf("failed to allocate buffer"))
+ }
+ newbuf := make([]byte, bufsiz)
+ copy(newbuf, buf)
+
+ for i := len(buf); i < len(newbuf); i++ {
+ newbuf[i] = byte(rem)
+ }
+ return newbuf
+}
+
+// ref. https://github.com/golang/go/blob/c3db64c0f45e8f2d75c5b59401e0fc925701b6f4/src/crypto/tls/conn.go#L279-L324
+//
+// extractPadding returns, in constant time, the length of the padding to remove
+// from the end of payload. It also returns a byte which is equal to 255 if the
+// padding was valid and 0 otherwise. See RFC 2246, Section 6.2.3.2.
+func extractPadding(payload []byte) (toRemove int, good byte) {
+ if len(payload) < 1 {
+ return 0, 0
+ }
+
+ paddingLen := payload[len(payload)-1]
+ t := uint(len(payload)) - uint(paddingLen)
+ // if len(payload) > paddingLen then the MSB of t is zero
+ good = byte(int32(^t) >> 31)
+
+ // The maximum possible padding length plus the actual length field
+ toCheck := 256
+ // The length of the padded data is public, so we can use an if here
+ if toCheck > len(payload) {
+ toCheck = len(payload)
+ }
+
+ for i := 1; i <= toCheck; i++ {
+ t := uint(paddingLen) - uint(i)
+ // if i <= paddingLen then the MSB of t is zero
+ mask := byte(int32(^t) >> 31)
+ b := payload[len(payload)-i]
+ good &^= mask&paddingLen ^ mask&b
+ }
+
+ // We AND together the bits of good and replicate the result across
+ // all the bits.
+ good &= good << 4
+ good &= good << 2
+ good &= good << 1
+ good = uint8(int8(good) >> 7)
+
+ // Zero the padding length on error. This ensures any unchecked bytes
+ // are included in the MAC. Otherwise, an attacker that could
+ // distinguish MAC failures from padding failures could mount an attack
+ // similar to POODLE in SSL 3.0: given a good ciphertext that uses a
+ // full block's worth of padding, replace the final block with another
+ // block. If the MAC check passed but the padding check failed, the
+ // last byte of that block decrypted to the block size.
+ //
+ // See also macAndPaddingGood logic below.
+ paddingLen &= good
+
+ toRemove = int(paddingLen)
+ return
+}
+
+type Hmac struct {
+ blockCipher cipher.Block
+ hash func() hash.Hash
+ keysize int
+ tagsize int
+ integrityKey []byte
+}
+
+type BlockCipherFunc func([]byte) (cipher.Block, error)
+
+func New(key []byte, f BlockCipherFunc) (hmac *Hmac, err error) {
+ keysize := len(key) / 2
+ ikey := key[:keysize]
+ ekey := key[keysize:]
+
+ bc, ciphererr := f(ekey)
+ if ciphererr != nil {
+ err = fmt.Errorf(`failed to execute block cipher function: %w`, ciphererr)
+ return
+ }
+
+ var hfunc func() hash.Hash
+ switch keysize {
+ case 16:
+ hfunc = sha256.New
+ case 24:
+ hfunc = sha512.New384
+ case 32:
+ hfunc = sha512.New
+ default:
+ return nil, fmt.Errorf("unsupported key size %d", keysize)
+ }
+
+ return &Hmac{
+ blockCipher: bc,
+ hash: hfunc,
+ integrityKey: ikey,
+ keysize: keysize,
+ tagsize: keysize, // NonceSize,
+ // While investigating GH #207, I stumbled upon another problem where
+ // the computed tags don't match on decrypt. After poking through the
+ // code using a bunch of debug statements, I've finally found out that
+ // tagsize = keysize makes the whole thing work.
+ }, nil
+}
+
+// NonceSize fulfills the crypto.AEAD interface
+func (c Hmac) NonceSize() int {
+ return NonceSize
+}
+
+// Overhead fulfills the crypto.AEAD interface
+func (c Hmac) Overhead() int {
+ return c.blockCipher.BlockSize() + c.tagsize
+}
+
+func (c Hmac) ComputeAuthTag(aad, nonce, ciphertext []byte) ([]byte, error) {
+ var buf [8]byte
+ binary.BigEndian.PutUint64(buf[:], uint64(len(aad)*8))
+
+ h := hmac.New(c.hash, c.integrityKey)
+
+ // compute the tag
+ // no need to check errors because Write never returns an error: https://pkg.go.dev/hash#Hash
+ //
+ // > Write (via the embedded io.Writer interface) adds more data to the running hash.
+ // > It never returns an error.
+ h.Write(aad)
+ h.Write(nonce)
+ h.Write(ciphertext)
+ h.Write(buf[:])
+ s := h.Sum(nil)
+ return s[:c.tagsize], nil
+}
+
+func ensureSize(dst []byte, n int) []byte {
+ // if the dst buffer has enough length just copy the relevant parts to it.
+ // Otherwise create a new slice that's big enough, and operate on that
+ // Note: I think go-jose has a bug in that it checks for cap(), but not len().
+ ret := dst
+ if diff := n - len(dst); diff > 0 {
+ // dst is not big enough
+ ret = make([]byte, n)
+ copy(ret, dst)
+ }
+ return ret
+}
+
+// Seal fulfills the crypto.AEAD interface
+func (c Hmac) Seal(dst, nonce, plaintext, data []byte) []byte {
+ ctlen := len(plaintext)
+ bufsiz := ctlen + c.Overhead()
+ mbs := maxBufSize.Load()
+
+ if int64(bufsiz) > mbs {
+ panic(fmt.Errorf("failed to allocate buffer"))
+ }
+ ciphertext := make([]byte, bufsiz)[:ctlen]
+ copy(ciphertext, plaintext)
+ ciphertext = pad(ciphertext, c.blockCipher.BlockSize())
+
+ cbc := cipher.NewCBCEncrypter(c.blockCipher, nonce)
+ cbc.CryptBlocks(ciphertext, ciphertext)
+
+ authtag, err := c.ComputeAuthTag(data, nonce, ciphertext)
+ if err != nil {
+ // Hmac implements cipher.AEAD interface. Seal can't return error.
+ // But currently it never reach here because of Hmac.ComputeAuthTag doesn't return error.
+ panic(fmt.Errorf("failed to seal on hmac: %v", err))
+ }
+
+ retlen := len(dst) + len(ciphertext) + len(authtag)
+
+ ret := ensureSize(dst, retlen)
+ out := ret[len(dst):]
+ n := copy(out, ciphertext)
+ copy(out[n:], authtag)
+
+ return ret
+}
+
+// Open fulfills the crypto.AEAD interface
+func (c Hmac) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
+ if len(ciphertext) < c.keysize {
+ return nil, fmt.Errorf(`invalid ciphertext (too short)`)
+ }
+
+ tagOffset := len(ciphertext) - c.tagsize
+ if tagOffset%c.blockCipher.BlockSize() != 0 {
+ return nil, fmt.Errorf(
+ "invalid ciphertext (invalid length: %d %% %d != 0)",
+ tagOffset,
+ c.blockCipher.BlockSize(),
+ )
+ }
+ tag := ciphertext[tagOffset:]
+ ciphertext = ciphertext[:tagOffset]
+
+ expectedTag, err := c.ComputeAuthTag(data, nonce, ciphertext[:tagOffset])
+ if err != nil {
+ return nil, fmt.Errorf(`failed to compute auth tag: %w`, err)
+ }
+
+ cbc := cipher.NewCBCDecrypter(c.blockCipher, nonce)
+ buf := pool.ByteSlice().GetCapacity(tagOffset)
+ defer pool.ByteSlice().Put(buf)
+ buf = buf[:tagOffset]
+
+ cbc.CryptBlocks(buf, ciphertext)
+
+ toRemove, good := extractPadding(buf)
+ cmp := subtle.ConstantTimeCompare(expectedTag, tag) & int(good)
+ if cmp != 1 {
+ return nil, errors.New(`invalid ciphertext`)
+ }
+
+ plaintext := buf[:len(buf)-toRemove]
+ ret := ensureSize(dst, len(plaintext))
+ out := ret[len(dst):]
+ copy(out, plaintext)
+ return ret, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/BUILD.bazel
new file mode 100644
index 0000000000..cf642c744d
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/BUILD.bazel
@@ -0,0 +1,34 @@
+load("@rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "cipher",
+ srcs = [
+ "cipher.go",
+ "interface.go",
+ ],
+ importpath = "github.com/lestrrat-go/jwx/v3/jwe/internal/cipher",
+ visibility = ["//:__subpackages__"],
+ deps = [
+ "//jwa",
+ "//jwe/internal/aescbc",
+ "//jwe/internal/keygen",
+ "//internal/tokens",
+ ],
+)
+
+go_test(
+ name = "cipher_test",
+ srcs = ["cipher_test.go"],
+ deps = [
+ ":cipher",
+ "//jwa",
+ "//internal/tokens",
+ "@com_github_stretchr_testify//require",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":cipher",
+ visibility = ["//jwe:__subpackages__"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/cipher.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/cipher.go
new file mode 100644
index 0000000000..9b9a40d00d
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/cipher.go
@@ -0,0 +1,169 @@
+package cipher
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+ "github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc"
+ "github.com/lestrrat-go/jwx/v3/jwe/internal/keygen"
+)
+
+var gcm = &gcmFetcher{}
+var cbc = &cbcFetcher{}
+
+func (f gcmFetcher) Fetch(key []byte, size int) (cipher.AEAD, error) {
+ if len(key) != size {
+ return nil, fmt.Errorf(`key size (%d) does not match expected key size (%d)`, len(key), size)
+ }
+ aescipher, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, fmt.Errorf(`cipher: failed to create AES cipher for GCM: %w`, err)
+ }
+
+ aead, err := cipher.NewGCM(aescipher)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to create GCM for cipher: %w`, err)
+ }
+ return aead, nil
+}
+
+func (f cbcFetcher) Fetch(key []byte, size int) (cipher.AEAD, error) {
+ if len(key) != size {
+ return nil, fmt.Errorf(`key size (%d) does not match expected key size (%d)`, len(key), size)
+ }
+ aead, err := aescbc.New(key, aes.NewCipher)
+ if err != nil {
+ return nil, fmt.Errorf(`cipher: failed to create AES cipher for CBC: %w`, err)
+ }
+ return aead, nil
+}
+
+func (c AesContentCipher) KeySize() int {
+ return c.keysize
+}
+
+func (c AesContentCipher) TagSize() int {
+ return c.tagsize
+}
+
+func NewAES(alg string) (*AesContentCipher, error) {
+ var keysize int
+ var tagsize int
+ var fetcher Fetcher
+ switch alg {
+ case tokens.A128GCM:
+ keysize = 16
+ tagsize = 16
+ fetcher = gcm
+ case tokens.A192GCM:
+ keysize = 24
+ tagsize = 16
+ fetcher = gcm
+ case tokens.A256GCM:
+ keysize = 32
+ tagsize = 16
+ fetcher = gcm
+ case tokens.A128CBC_HS256:
+ tagsize = 16
+ keysize = tagsize * 2
+ fetcher = cbc
+ case tokens.A192CBC_HS384:
+ tagsize = 24
+ keysize = tagsize * 2
+ fetcher = cbc
+ case tokens.A256CBC_HS512:
+ tagsize = 32
+ keysize = tagsize * 2
+ fetcher = cbc
+ default:
+ return nil, fmt.Errorf("failed to create AES content cipher: invalid algorithm (%s)", alg)
+ }
+
+ return &AesContentCipher{
+ keysize: keysize,
+ tagsize: tagsize,
+ fetch: fetcher,
+ }, nil
+}
+
+func (c AesContentCipher) Encrypt(cek, plaintext, aad []byte) (iv, ciphertxt, tag []byte, err error) {
+ var aead cipher.AEAD
+ aead, err = c.fetch.Fetch(cek, c.keysize)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf(`failed to fetch AEAD: %w`, err)
+ }
+
+ // Seal may panic (argh!), so protect ourselves from that
+ defer func() {
+ if e := recover(); e != nil {
+ switch e := e.(type) {
+ case error:
+ err = e
+ default:
+ err = fmt.Errorf("%s", e)
+ }
+ err = fmt.Errorf(`failed to encrypt: %w`, err)
+ }
+ }()
+
+ if c.NonceGenerator != nil {
+ iv, err = c.NonceGenerator(aead.NonceSize())
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf(`failed to generate nonce: %w`, err)
+ }
+ } else {
+ bs, err := keygen.Random(aead.NonceSize())
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf(`failed to generate random nonce: %w`, err)
+ }
+ iv = bs.Bytes()
+ }
+
+ combined := aead.Seal(nil, iv, plaintext, aad)
+ tagoffset := len(combined) - c.TagSize()
+
+ if tagoffset < 0 {
+ panic(fmt.Sprintf("tag offset is less than 0 (combined len = %d, tagsize = %d)", len(combined), c.TagSize()))
+ }
+
+ tag = combined[tagoffset:]
+ ciphertxt = make([]byte, tagoffset)
+ copy(ciphertxt, combined[:tagoffset])
+
+ return
+}
+
+func (c AesContentCipher) Decrypt(cek, iv, ciphertxt, tag, aad []byte) (plaintext []byte, err error) {
+ aead, err := c.fetch.Fetch(cek, c.keysize)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to fetch AEAD data: %w`, err)
+ }
+
+ // Open may panic (argh!), so protect ourselves from that
+ defer func() {
+ if e := recover(); e != nil {
+ switch e := e.(type) {
+ case error:
+ err = e
+ default:
+ err = fmt.Errorf(`%s`, e)
+ }
+ err = fmt.Errorf(`failed to decrypt: %w`, err)
+ return
+ }
+ }()
+
+ combined := make([]byte, len(ciphertxt)+len(tag))
+ copy(combined, ciphertxt)
+ copy(combined[len(ciphertxt):], tag)
+
+ buf, aeaderr := aead.Open(nil, iv, combined, aad)
+ if aeaderr != nil {
+ err = fmt.Errorf(`aead.Open failed: %w`, aeaderr)
+ return
+ }
+ plaintext = buf
+ return
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/interface.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/interface.go
new file mode 100644
index 0000000000..a03e15b159
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/interface.go
@@ -0,0 +1,32 @@
+package cipher
+
+import (
+ "crypto/cipher"
+)
+
+const (
+ TagSize = 16
+)
+
+// ContentCipher knows how to encrypt/decrypt the content given a content
+// encryption key and other data
+type ContentCipher interface {
+ KeySize() int
+ Encrypt(cek, aad, plaintext []byte) ([]byte, []byte, []byte, error)
+ Decrypt(cek, iv, aad, ciphertext, tag []byte) ([]byte, error)
+}
+
+type Fetcher interface {
+ Fetch([]byte, int) (cipher.AEAD, error)
+}
+
+type gcmFetcher struct{}
+type cbcFetcher struct{}
+
+// AesContentCipher represents a cipher based on AES
+type AesContentCipher struct {
+ NonceGenerator func(int) ([]byte, error)
+ fetch Fetcher
+ keysize int
+ tagsize int
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf/BUILD.bazel
new file mode 100644
index 0000000000..59aeb2cd27
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf/BUILD.bazel
@@ -0,0 +1,24 @@
+load("@rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "concatkdf",
+ srcs = ["concatkdf.go"],
+ importpath = "github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf",
+ visibility = ["//:__subpackages__"],
+)
+
+go_test(
+ name = "concatkdf_test",
+ srcs = ["concatkdf_test.go"],
+ embed = [":concatkdf"],
+ deps = [
+ "//jwa",
+ "@com_github_stretchr_testify//require",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":concatkdf",
+ visibility = ["//jwe:__subpackages__"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf/concatkdf.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf/concatkdf.go
new file mode 100644
index 0000000000..3691830a63
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf/concatkdf.go
@@ -0,0 +1,66 @@
+package concatkdf
+
+import (
+ "crypto"
+ "encoding/binary"
+ "fmt"
+)
+
+type KDF struct {
+ buf []byte
+ otherinfo []byte
+ z []byte
+ hash crypto.Hash
+}
+
+func ndata(src []byte) []byte {
+ buf := make([]byte, 4+len(src))
+ binary.BigEndian.PutUint32(buf, uint32(len(src)))
+ copy(buf[4:], src)
+ return buf
+}
+
+func New(hash crypto.Hash, alg, Z, apu, apv, pubinfo, privinfo []byte) *KDF {
+ algbuf := ndata(alg)
+ apubuf := ndata(apu)
+ apvbuf := ndata(apv)
+
+ concat := make([]byte, len(algbuf)+len(apubuf)+len(apvbuf)+len(pubinfo)+len(privinfo))
+ n := copy(concat, algbuf)
+ n += copy(concat[n:], apubuf)
+ n += copy(concat[n:], apvbuf)
+ n += copy(concat[n:], pubinfo)
+ copy(concat[n:], privinfo)
+
+ return &KDF{
+ hash: hash,
+ otherinfo: concat,
+ z: Z,
+ }
+}
+
+func (k *KDF) Read(out []byte) (int, error) {
+ var round uint32 = 1
+ h := k.hash.New()
+
+ for len(out) > len(k.buf) {
+ h.Reset()
+
+ if err := binary.Write(h, binary.BigEndian, round); err != nil {
+ return 0, fmt.Errorf(`failed to write round using kdf: %w`, err)
+ }
+ if _, err := h.Write(k.z); err != nil {
+ return 0, fmt.Errorf(`failed to write z using kdf: %w`, err)
+ }
+ if _, err := h.Write(k.otherinfo); err != nil {
+ return 0, fmt.Errorf(`failed to write other info using kdf: %w`, err)
+ }
+
+ k.buf = append(k.buf, h.Sum(nil)...)
+ round++
+ }
+
+ n := copy(out, k.buf[:len(out)])
+ k.buf = k.buf[len(out):]
+ return n, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/BUILD.bazel
new file mode 100644
index 0000000000..bb395200f8
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/BUILD.bazel
@@ -0,0 +1,21 @@
+load("@rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "content_crypt",
+ srcs = [
+ "content_crypt.go",
+ "interface.go",
+ ],
+ importpath = "github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt",
+ visibility = ["//:__subpackages__"],
+ deps = [
+ "//jwa",
+ "//jwe/internal/cipher",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":content_crypt",
+ visibility = ["//jwe:__subpackages__"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/content_crypt.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/content_crypt.go
new file mode 100644
index 0000000000..0ef45ed953
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/content_crypt.go
@@ -0,0 +1,43 @@
+package content_crypt //nolint:golint
+
+import (
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jwe/internal/cipher"
+)
+
+func (c Generic) Algorithm() jwa.ContentEncryptionAlgorithm {
+ return c.alg
+}
+
+func (c Generic) Encrypt(cek, plaintext, aad []byte) ([]byte, []byte, []byte, error) {
+ iv, encrypted, tag, err := c.cipher.Encrypt(cek, plaintext, aad)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf(`failed to crypt content: %w`, err)
+ }
+
+ return iv, encrypted, tag, nil
+}
+
+func (c Generic) Decrypt(cek, iv, ciphertext, tag, aad []byte) ([]byte, error) {
+ return c.cipher.Decrypt(cek, iv, ciphertext, tag, aad)
+}
+
+func NewGeneric(alg jwa.ContentEncryptionAlgorithm) (*Generic, error) {
+ c, err := cipher.NewAES(alg.String())
+ if err != nil {
+ return nil, fmt.Errorf(`aes crypt: failed to create content cipher: %w`, err)
+ }
+
+ return &Generic{
+ alg: alg,
+ cipher: c,
+ keysize: c.KeySize(),
+ tagsize: 16,
+ }, nil
+}
+
+func (c Generic) KeySize() int {
+ return c.keysize
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/interface.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/interface.go
new file mode 100644
index 0000000000..84b42fe07d
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/interface.go
@@ -0,0 +1,20 @@
+package content_crypt //nolint:golint
+
+import (
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jwe/internal/cipher"
+)
+
+// Generic encrypts a message by applying all the necessary
+// modifications to the keys and the contents
+type Generic struct {
+ alg jwa.ContentEncryptionAlgorithm
+ keysize int
+ tagsize int
+ cipher cipher.ContentCipher
+}
+
+type Cipher interface {
+ Decrypt([]byte, []byte, []byte, []byte, []byte) ([]byte, error)
+ KeySize() int
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/BUILD.bazel
new file mode 100644
index 0000000000..bde8eb68f7
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/BUILD.bazel
@@ -0,0 +1,24 @@
+load("@rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "keygen",
+ srcs = [
+ "interface.go",
+ "keygen.go",
+ ],
+ importpath = "github.com/lestrrat-go/jwx/v3/jwe/internal/keygen",
+ visibility = ["//:__subpackages__"],
+ deps = [
+ "//internal/ecutil",
+ "//jwa",
+ "//jwe/internal/concatkdf",
+ "//internal/tokens",
+ "//jwk",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":keygen",
+ visibility = ["//jwe:__subpackages__"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/interface.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/interface.go
new file mode 100644
index 0000000000..7f8fb961a2
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/interface.go
@@ -0,0 +1,40 @@
+package keygen
+
+// ByteKey is a generated key that only has the key's byte buffer
+// as its instance data. If a key needs to do more, such as providing
+// values to be set in a JWE header, that key type wraps a ByteKey
+type ByteKey []byte
+
+// ByteWithECPublicKey holds the EC private key that generated
+// the key along with the key itself. This is required to set the
+// proper values in the JWE headers
+type ByteWithECPublicKey struct {
+ ByteKey
+
+ PublicKey any
+}
+
+type ByteWithIVAndTag struct {
+ ByteKey
+
+ IV []byte
+ Tag []byte
+}
+
+type ByteWithSaltAndCount struct {
+ ByteKey
+
+ Salt []byte
+ Count int
+}
+
+// ByteSource is an interface for things that return a byte sequence.
+// This is used for KeyGenerator so that the result of computations can
+// carry more than just the generate byte sequence.
+type ByteSource interface {
+ Bytes() []byte
+}
+
+type Setter interface {
+ Set(string, any) error
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/keygen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/keygen.go
new file mode 100644
index 0000000000..daa7599d9f
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/keygen.go
@@ -0,0 +1,139 @@
+package keygen
+
+import (
+ "crypto"
+ "crypto/ecdh"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "encoding/binary"
+ "fmt"
+ "io"
+
+ "github.com/lestrrat-go/jwx/v3/internal/ecutil"
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+ "github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf"
+ "github.com/lestrrat-go/jwx/v3/jwk"
+)
+
+// Bytes returns the byte from this ByteKey
+func (k ByteKey) Bytes() []byte {
+ return []byte(k)
+}
+
+func Random(n int) (ByteSource, error) {
+ buf := make([]byte, n)
+ if _, err := io.ReadFull(rand.Reader, buf); err != nil {
+ return nil, fmt.Errorf(`failed to read from rand.Reader: %w`, err)
+ }
+ return ByteKey(buf), nil
+}
+
+// Ecdhes generates a new key using ECDH-ES
+func Ecdhes(alg string, enc string, keysize int, pubkey *ecdsa.PublicKey, apu, apv []byte) (ByteSource, error) {
+ priv, err := ecdsa.GenerateKey(pubkey.Curve, rand.Reader)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to generate key for ECDH-ES: %w`, err)
+ }
+
+ var algorithm string
+ if alg == tokens.ECDH_ES {
+ algorithm = enc
+ } else {
+ algorithm = alg
+ }
+
+ pubinfo := make([]byte, 4)
+ binary.BigEndian.PutUint32(pubinfo, uint32(keysize)*8)
+
+ if !priv.PublicKey.Curve.IsOnCurve(pubkey.X, pubkey.Y) {
+ return nil, fmt.Errorf(`public key used does not contain a point (X,Y) on the curve`)
+ }
+ z, _ := priv.PublicKey.Curve.ScalarMult(pubkey.X, pubkey.Y, priv.D.Bytes())
+ zBytes := ecutil.AllocECPointBuffer(z, priv.PublicKey.Curve)
+ defer ecutil.ReleaseECPointBuffer(zBytes)
+ kdf := concatkdf.New(crypto.SHA256, []byte(algorithm), zBytes, apu, apv, pubinfo, []byte{})
+ kek := make([]byte, keysize)
+ if _, err := kdf.Read(kek); err != nil {
+ return nil, fmt.Errorf(`failed to read kdf: %w`, err)
+ }
+
+ return ByteWithECPublicKey{
+ PublicKey: &priv.PublicKey,
+ ByteKey: ByteKey(kek),
+ }, nil
+}
+
+// X25519 generates a new key using ECDH-ES with X25519
+func X25519(alg string, enc string, keysize int, pubkey *ecdh.PublicKey) (ByteSource, error) {
+ priv, err := ecdh.X25519().GenerateKey(rand.Reader)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to generate key for X25519: %w`, err)
+ }
+
+ var algorithm string
+ if alg == tokens.ECDH_ES {
+ algorithm = enc
+ } else {
+ algorithm = alg
+ }
+
+ pubinfo := make([]byte, 4)
+ binary.BigEndian.PutUint32(pubinfo, uint32(keysize)*8)
+
+ zBytes, err := priv.ECDH(pubkey)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to compute Z: %w`, err)
+ }
+ kdf := concatkdf.New(crypto.SHA256, []byte(algorithm), zBytes, []byte{}, []byte{}, pubinfo, []byte{})
+ kek := make([]byte, keysize)
+ if _, err := kdf.Read(kek); err != nil {
+ return nil, fmt.Errorf(`failed to read kdf: %w`, err)
+ }
+
+ return ByteWithECPublicKey{
+ PublicKey: priv.PublicKey(),
+ ByteKey: ByteKey(kek),
+ }, nil
+}
+
+// HeaderPopulate populates the header with the required EC-DSA public key
+// information ('epk' key)
+func (k ByteWithECPublicKey) Populate(h Setter) error {
+ key, err := jwk.Import(k.PublicKey)
+ if err != nil {
+ return fmt.Errorf(`failed to create JWK: %w`, err)
+ }
+
+ if err := h.Set("epk", key); err != nil {
+ return fmt.Errorf(`failed to write header: %w`, err)
+ }
+ return nil
+}
+
+// HeaderPopulate populates the header with the required AES GCM
+// parameters ('iv' and 'tag')
+func (k ByteWithIVAndTag) Populate(h Setter) error {
+ if err := h.Set("iv", k.IV); err != nil {
+ return fmt.Errorf(`failed to write header: %w`, err)
+ }
+
+ if err := h.Set("tag", k.Tag); err != nil {
+ return fmt.Errorf(`failed to write header: %w`, err)
+ }
+
+ return nil
+}
+
+// HeaderPopulate populates the header with the required PBES2
+// parameters ('p2s' and 'p2c')
+func (k ByteWithSaltAndCount) Populate(h Setter) error {
+ if err := h.Set("p2c", k.Count); err != nil {
+ return fmt.Errorf(`failed to write header: %w`, err)
+ }
+
+ if err := h.Set("p2s", k.Salt); err != nil {
+ return fmt.Errorf(`failed to write header: %w`, err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/io.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/io.go
new file mode 100644
index 0000000000..a5d6aca8a3
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/io.go
@@ -0,0 +1,36 @@
+// Code generated by tools/cmd/genreadfile/main.go. DO NOT EDIT.
+
+package jwe
+
+import (
+ "fmt"
+ "io/fs"
+ "os"
+)
+
+type sysFS struct{}
+
+func (sysFS) Open(path string) (fs.File, error) {
+ return os.Open(path)
+}
+
+func ReadFile(path string, options ...ReadFileOption) (*Message, error) {
+
+ var srcFS fs.FS = sysFS{}
+ for _, option := range options {
+ switch option.Ident() {
+ case identFS{}:
+ if err := option.Value(&srcFS); err != nil {
+ return nil, fmt.Errorf("failed to set fs.FS: %w", err)
+ }
+ }
+ }
+
+ f, err := srcFS.Open(path)
+ if err != nil {
+ return nil, err
+ }
+
+ defer f.Close()
+ return ParseReader(f)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwe.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwe.go
new file mode 100644
index 0000000000..5728021ec7
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwe.go
@@ -0,0 +1,1036 @@
+//go:generate ../tools/cmd/genjwe.sh
+
+// Package jwe implements JWE as described in https://tools.ietf.org/html/rfc7516
+package jwe
+
+// #region imports
+import (
+ "bytes"
+ "context"
+ "crypto/ecdsa"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/lestrrat-go/blackmagic"
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+ "github.com/lestrrat-go/jwx/v3/jwk"
+
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc"
+ "github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt"
+ "github.com/lestrrat-go/jwx/v3/jwe/internal/keygen"
+)
+
+// #region globals
+
+var muSettings sync.RWMutex
+var maxPBES2Count = 10000
+var maxDecompressBufferSize int64 = 10 * 1024 * 1024 // 10MB
+
+func Settings(options ...GlobalOption) {
+ muSettings.Lock()
+ defer muSettings.Unlock()
+ for _, option := range options {
+ switch option.Ident() {
+ case identMaxPBES2Count{}:
+ if err := option.Value(&maxPBES2Count); err != nil {
+ panic(fmt.Sprintf("jwe.Settings: value for option WithMaxPBES2Count must be an int: %s", err))
+ }
+ case identMaxDecompressBufferSize{}:
+ if err := option.Value(&maxDecompressBufferSize); err != nil {
+ panic(fmt.Sprintf("jwe.Settings: value for option WithMaxDecompressBufferSize must be an int64: %s", err))
+ }
+ case identCBCBufferSize{}:
+ var v int64
+ if err := option.Value(&v); err != nil {
+ panic(fmt.Sprintf("jwe.Settings: value for option WithCBCBufferSize must be an int64: %s", err))
+ }
+ aescbc.SetMaxBufferSize(v)
+ }
+ }
+}
+
+const (
+ fmtInvalid = iota
+ fmtCompact
+ fmtJSON
+ fmtJSONPretty
+ fmtMax
+)
+
+var _ = fmtInvalid
+var _ = fmtMax
+
+var registry = json.NewRegistry()
+
+type recipientBuilder struct {
+ alg jwa.KeyEncryptionAlgorithm
+ key any
+ headers Headers
+}
+
+func (b *recipientBuilder) Build(r Recipient, cek []byte, calg jwa.ContentEncryptionAlgorithm, _ *content_crypt.Generic) ([]byte, error) {
+ // we need the raw key for later use
+ rawKey := b.key
+
+ var keyID string
+ if ke, ok := b.key.(KeyEncrypter); ok {
+ if kider, ok := ke.(KeyIDer); ok {
+ if v, ok := kider.KeyID(); ok {
+ keyID = v
+ }
+ }
+ } else if jwkKey, ok := b.key.(jwk.Key); ok {
+ // Meanwhile, grab the kid as well
+ if v, ok := jwkKey.KeyID(); ok {
+ keyID = v
+ }
+
+ var raw any
+ if err := jwk.Export(jwkKey, &raw); err != nil {
+ return nil, fmt.Errorf(`jwe.Encrypt: recipientBuilder: failed to retrieve raw key out of %T: %w`, b.key, err)
+ }
+
+ rawKey = raw
+ }
+
+ // Extract ECDH-ES specific parameters if needed
+ var apu, apv []byte
+ if b.headers != nil {
+ if val, ok := b.headers.AgreementPartyUInfo(); ok {
+ apu = val
+ }
+ if val, ok := b.headers.AgreementPartyVInfo(); ok {
+ apv = val
+ }
+ }
+
+ // Create the encrypter using the new jwebb pattern
+ enc, err := newEncrypter(b.alg, calg, b.key, rawKey, apu, apv)
+ if err != nil {
+ return nil, fmt.Errorf(`jwe.Encrypt: recipientBuilder: failed to create encrypter: %w`, err)
+ }
+
+ if hdrs := b.headers; hdrs != nil {
+ _ = r.SetHeaders(hdrs)
+ }
+
+ if err := r.Headers().Set(AlgorithmKey, b.alg); err != nil {
+ return nil, fmt.Errorf(`failed to set header: %w`, err)
+ }
+
+ if keyID != "" {
+ if err := r.Headers().Set(KeyIDKey, keyID); err != nil {
+ return nil, fmt.Errorf(`failed to set header: %w`, err)
+ }
+ }
+
+ var rawCEK []byte
+ enckey, err := enc.EncryptKey(cek)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to encrypt key: %w`, err)
+ }
+ if b.alg == jwa.ECDH_ES() || b.alg == jwa.DIRECT() {
+ rawCEK = enckey.Bytes()
+ } else {
+ if err := r.SetEncryptedKey(enckey.Bytes()); err != nil {
+ return nil, fmt.Errorf(`failed to set encrypted key: %w`, err)
+ }
+ }
+
+ if hp, ok := enckey.(populater); ok {
+ if err := hp.Populate(r.Headers()); err != nil {
+ return nil, fmt.Errorf(`failed to populate: %w`, err)
+ }
+ }
+
+ return rawCEK, nil
+}
+
+// Encrypt generates a JWE message for the given payload and returns
+// it in serialized form, which can be in either compact or
+// JSON format. Default is compact.
+//
+// You must pass at least one key to `jwe.Encrypt()` by using `jwe.WithKey()`
+// option.
+//
+// jwe.Encrypt(payload, jwe.WithKey(alg, key))
+// jwe.Encrypt(payload, jws.WithJSON(), jws.WithKey(alg1, key1), jws.WithKey(alg2, key2))
+//
+// Note that in the second example the `jws.WithJSON()` option is
+// specified as well. This is because the compact serialization
+// format does not support multiple recipients, and users must
+// specifically ask for the JSON serialization format.
+//
+// Read the documentation for `jwe.WithKey()` to learn more about the
+// possible values that can be used for `alg` and `key`.
+//
+// Look for options that return `jwe.EncryptOption` or `jws.EncryptDecryptOption`
+// for a complete list of options that can be passed to this function.
+func Encrypt(payload []byte, options ...EncryptOption) ([]byte, error) {
+ ec := encryptContextPool.Get()
+ defer encryptContextPool.Put(ec)
+ if err := ec.ProcessOptions(options); err != nil {
+ return nil, encryptError{fmt.Errorf(`jwe.Encrypt: failed to process options: %w`, err)}
+ }
+ ret, err := ec.EncryptMessage(payload, nil)
+ if err != nil {
+ return nil, encryptError{fmt.Errorf(`jwe.Encrypt: %w`, err)}
+ }
+ return ret, nil
+}
+
+// EncryptStatic is exactly like Encrypt, except it accepts a static
+// content encryption key (CEK). It is separated out from the main
+// Encrypt function such that the latter does not accidentally use a static
+// CEK.
+//
+// DO NOT attempt to use this function unless you completely understand the
+// security implications to using static CEKs. You have been warned.
+//
+// This function is currently considered EXPERIMENTAL, and is subject to
+// future changes across minor/micro versions.
+func EncryptStatic(payload, cek []byte, options ...EncryptOption) ([]byte, error) {
+ if len(cek) <= 0 {
+ return nil, encryptError{fmt.Errorf(`jwe.EncryptStatic: empty CEK`)}
+ }
+ ec := encryptContextPool.Get()
+ defer encryptContextPool.Put(ec)
+ if err := ec.ProcessOptions(options); err != nil {
+ return nil, encryptError{fmt.Errorf(`jwe.EncryptStatic: failed to process options: %w`, err)}
+ }
+ ret, err := ec.EncryptMessage(payload, cek)
+ if err != nil {
+ return nil, encryptError{fmt.Errorf(`jwe.EncryptStatic: %w`, err)}
+ }
+ return ret, nil
+}
+
+// decryptContext holds the state during JWE decryption, similar to JWS verifyContext
+type decryptContext struct {
+ keyProviders []KeyProvider
+ keyUsed any
+ cek *[]byte
+ dst *Message
+ maxDecompressBufferSize int64
+ //nolint:containedctx
+ ctx context.Context
+}
+
+var decryptContextPool = pool.New(allocDecryptContext, freeDecryptContext)
+
+func allocDecryptContext() *decryptContext {
+ return &decryptContext{
+ ctx: context.Background(),
+ }
+}
+
+func freeDecryptContext(dc *decryptContext) *decryptContext {
+ dc.keyProviders = dc.keyProviders[:0]
+ dc.keyUsed = nil
+ dc.cek = nil
+ dc.dst = nil
+ dc.maxDecompressBufferSize = 0
+ dc.ctx = context.Background()
+ return dc
+}
+
+func (dc *decryptContext) ProcessOptions(options []DecryptOption) error {
+ // Set default max decompress buffer size
+ muSettings.RLock()
+ dc.maxDecompressBufferSize = maxDecompressBufferSize
+ muSettings.RUnlock()
+
+ for _, option := range options {
+ switch option.Ident() {
+ case identMessage{}:
+ if err := option.Value(&dc.dst); err != nil {
+ return fmt.Errorf("jwe.decrypt: WithMessage must be a *jwe.Message: %w", err)
+ }
+ case identKeyProvider{}:
+ var kp KeyProvider
+ if err := option.Value(&kp); err != nil {
+ return fmt.Errorf("jwe.decrypt: WithKeyProvider must be a KeyProvider: %w", err)
+ }
+ dc.keyProviders = append(dc.keyProviders, kp)
+ case identKeyUsed{}:
+ if err := option.Value(&dc.keyUsed); err != nil {
+ return fmt.Errorf("jwe.decrypt: WithKeyUsed must be an any: %w", err)
+ }
+ case identKey{}:
+ var pair *withKey
+ if err := option.Value(&pair); err != nil {
+ return fmt.Errorf("jwe.decrypt: WithKey must be a *withKey: %w", err)
+ }
+ alg, ok := pair.alg.(jwa.KeyEncryptionAlgorithm)
+ if !ok {
+ return fmt.Errorf("jwe.decrypt: WithKey() option must be specified using jwa.KeyEncryptionAlgorithm (got %T)", pair.alg)
+ }
+ dc.keyProviders = append(dc.keyProviders, &staticKeyProvider{alg: alg, key: pair.key})
+ case identCEK{}:
+ if err := option.Value(&dc.cek); err != nil {
+ return fmt.Errorf("jwe.decrypt: WithCEK must be a *[]byte: %w", err)
+ }
+ case identMaxDecompressBufferSize{}:
+ if err := option.Value(&dc.maxDecompressBufferSize); err != nil {
+ return fmt.Errorf("jwe.decrypt: WithMaxDecompressBufferSize must be int64: %w", err)
+ }
+ case identContext{}:
+ if err := option.Value(&dc.ctx); err != nil {
+ return fmt.Errorf("jwe.decrypt: WithContext must be a context.Context: %w", err)
+ }
+ }
+ }
+
+ if len(dc.keyProviders) < 1 {
+ return fmt.Errorf(`jwe.Decrypt: no key providers have been provided (see jwe.WithKey(), jwe.WithKeySet(), and jwe.WithKeyProvider()`)
+ }
+
+ return nil
+}
+
+func (dc *decryptContext) DecryptMessage(buf []byte) ([]byte, error) {
+ msg, err := parseJSONOrCompact(buf, true)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to parse buffer for Decrypt: %w`, err)
+ }
+
+ // Process things that are common to the message
+ h, err := msg.protectedHeaders.Clone()
+ if err != nil {
+ return nil, fmt.Errorf(`failed to copy protected headers: %w`, err)
+ }
+ h, err = h.Merge(msg.unprotectedHeaders)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to merge headers for message decryption: %w`, err)
+ }
+
+ var aad []byte
+ if aadContainer := msg.authenticatedData; aadContainer != nil {
+ aad = base64.Encode(aadContainer)
+ }
+
+ var computedAad []byte
+ if len(msg.rawProtectedHeaders) > 0 {
+ computedAad = msg.rawProtectedHeaders
+ } else {
+ // this is probably not required once msg.Decrypt is deprecated
+ var err error
+ computedAad, err = msg.protectedHeaders.Encode()
+ if err != nil {
+ return nil, fmt.Errorf(`failed to encode protected headers: %w`, err)
+ }
+ }
+
+ // for each recipient, attempt to match the key providers
+ // if we have no recipients, pretend like we only have one
+ recipients := msg.recipients
+ if len(recipients) == 0 {
+ r := NewRecipient()
+ if err := r.SetHeaders(msg.protectedHeaders); err != nil {
+ return nil, fmt.Errorf(`failed to set headers to recipient: %w`, err)
+ }
+ recipients = append(recipients, r)
+ }
+
+ errs := make([]error, 0, len(recipients))
+ for _, recipient := range recipients {
+ decrypted, err := dc.tryRecipient(msg, recipient, h, aad, computedAad)
+ if err != nil {
+ errs = append(errs, recipientError{err})
+ continue
+ }
+ if dc.dst != nil {
+ *dc.dst = *msg
+ dc.dst.rawProtectedHeaders = nil
+ dc.dst.storeProtectedHeaders = false
+ }
+ return decrypted, nil
+ }
+ return nil, fmt.Errorf(`failed to decrypt any of the recipients: %w`, errors.Join(errs...))
+}
+
+func (dc *decryptContext) tryRecipient(msg *Message, recipient Recipient, protectedHeaders Headers, aad, computedAad []byte) ([]byte, error) {
+ var tried int
+ var lastError error
+ for i, kp := range dc.keyProviders {
+ var sink algKeySink
+ if err := kp.FetchKeys(dc.ctx, &sink, recipient, msg); err != nil {
+ return nil, fmt.Errorf(`key provider %d failed: %w`, i, err)
+ }
+
+ for _, pair := range sink.list {
+ tried++
+ // alg is converted here because pair.alg is of type jwa.KeyAlgorithm.
+ // this may seem ugly, but we're trying to avoid declaring separate
+ // structs for `alg jwa.KeyEncryptionAlgorithm` and `alg jwa.SignatureAlgorithm`
+ //nolint:forcetypeassert
+ alg := pair.alg.(jwa.KeyEncryptionAlgorithm)
+ key := pair.key
+
+ decrypted, err := dc.decryptContent(msg, alg, key, recipient, protectedHeaders, aad, computedAad)
+ if err != nil {
+ lastError = err
+ continue
+ }
+
+ if dc.keyUsed != nil {
+ if err := blackmagic.AssignIfCompatible(dc.keyUsed, key); err != nil {
+ return nil, fmt.Errorf(`failed to assign used key (%T) to %T: %w`, key, dc.keyUsed, err)
+ }
+ }
+ return decrypted, nil
+ }
+ }
+ return nil, fmt.Errorf(`jwe.Decrypt: tried %d keys, but failed to match any of the keys with recipient (last error = %s)`, tried, lastError)
+}
+
+func (dc *decryptContext) decryptContent(msg *Message, alg jwa.KeyEncryptionAlgorithm, key any, recipient Recipient, protectedHeaders Headers, aad, computedAad []byte) ([]byte, error) {
+ if jwkKey, ok := key.(jwk.Key); ok {
+ var raw any
+ if err := jwk.Export(jwkKey, &raw); err != nil {
+ return nil, fmt.Errorf(`failed to retrieve raw key from %T: %w`, key, err)
+ }
+ key = raw
+ }
+
+ ce, ok := msg.protectedHeaders.ContentEncryption()
+ if !ok {
+ return nil, fmt.Errorf(`jwe.Decrypt: failed to retrieve content encryption algorithm from protected headers`)
+ }
+ dec := newDecrypter(alg, ce, key).
+ AuthenticatedData(aad).
+ ComputedAuthenticatedData(computedAad).
+ InitializationVector(msg.initializationVector).
+ Tag(msg.tag).
+ CEK(dc.cek)
+
+ if v, ok := recipient.Headers().Algorithm(); !ok || v != alg {
+ // algorithms don't match
+ return nil, fmt.Errorf(`jwe.Decrypt: key (%q) and recipient (%q) algorithms do not match`, alg, v)
+ }
+
+ h2, err := protectedHeaders.Clone()
+ if err != nil {
+ return nil, fmt.Errorf(`jwe.Decrypt: failed to copy headers (1): %w`, err)
+ }
+
+ h2, err = h2.Merge(recipient.Headers())
+ if err != nil {
+ return nil, fmt.Errorf(`failed to copy headers (2): %w`, err)
+ }
+
+ switch alg {
+ case jwa.ECDH_ES(), jwa.ECDH_ES_A128KW(), jwa.ECDH_ES_A192KW(), jwa.ECDH_ES_A256KW():
+ var epk any
+ if err := h2.Get(EphemeralPublicKeyKey, &epk); err != nil {
+ return nil, fmt.Errorf(`failed to get 'epk' field: %w`, err)
+ }
+ switch epk := epk.(type) {
+ case jwk.ECDSAPublicKey:
+ var pubkey ecdsa.PublicKey
+ if err := jwk.Export(epk, &pubkey); err != nil {
+ return nil, fmt.Errorf(`failed to get public key: %w`, err)
+ }
+ dec.PublicKey(&pubkey)
+ case jwk.OKPPublicKey:
+ var pubkey any
+ if err := jwk.Export(epk, &pubkey); err != nil {
+ return nil, fmt.Errorf(`failed to get public key: %w`, err)
+ }
+ dec.PublicKey(pubkey)
+ default:
+ return nil, fmt.Errorf("unexpected 'epk' type %T for alg %s", epk, alg)
+ }
+
+ if apu, ok := h2.AgreementPartyUInfo(); ok && len(apu) > 0 {
+ dec.AgreementPartyUInfo(apu)
+ }
+ if apv, ok := h2.AgreementPartyVInfo(); ok && len(apv) > 0 {
+ dec.AgreementPartyVInfo(apv)
+ }
+ case jwa.A128GCMKW(), jwa.A192GCMKW(), jwa.A256GCMKW():
+ var ivB64 string
+ if err := h2.Get(InitializationVectorKey, &ivB64); err == nil {
+ iv, err := base64.DecodeString(ivB64)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to b64-decode 'iv': %w`, err)
+ }
+ dec.KeyInitializationVector(iv)
+ }
+ var tagB64 string
+ if err := h2.Get(TagKey, &tagB64); err == nil {
+ tag, err := base64.DecodeString(tagB64)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to b64-decode 'tag': %w`, err)
+ }
+ dec.KeyTag(tag)
+ }
+ case jwa.PBES2_HS256_A128KW(), jwa.PBES2_HS384_A192KW(), jwa.PBES2_HS512_A256KW():
+ var saltB64 string
+ if err := h2.Get(SaltKey, &saltB64); err != nil {
+ return nil, fmt.Errorf(`failed to get %q field`, SaltKey)
+ }
+
+ // check if WithUseNumber is effective, because it will change the
+ // type of the underlying value (#1140)
+ var countFlt float64
+ if json.UseNumber() {
+ var count json.Number
+ if err := h2.Get(CountKey, &count); err != nil {
+ return nil, fmt.Errorf(`failed to get %q field`, CountKey)
+ }
+ v, err := count.Float64()
+ if err != nil {
+ return nil, fmt.Errorf("failed to convert 'p2c' to float64: %w", err)
+ }
+ countFlt = v
+ } else {
+ var count float64
+ if err := h2.Get(CountKey, &count); err != nil {
+ return nil, fmt.Errorf(`failed to get %q field`, CountKey)
+ }
+ countFlt = count
+ }
+
+ muSettings.RLock()
+ maxCount := maxPBES2Count
+ muSettings.RUnlock()
+ if countFlt > float64(maxCount) {
+ return nil, fmt.Errorf("invalid 'p2c' value")
+ }
+ salt, err := base64.DecodeString(saltB64)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to b64-decode 'salt': %w`, err)
+ }
+ dec.KeySalt(salt)
+ dec.KeyCount(int(countFlt))
+ }
+
+ plaintext, err := dec.Decrypt(recipient, msg.cipherText, msg)
+ if err != nil {
+ return nil, fmt.Errorf(`jwe.Decrypt: decryption failed: %w`, err)
+ }
+
+ if v, ok := h2.Compression(); ok && v == jwa.Deflate() {
+ buf, err := uncompress(plaintext, dc.maxDecompressBufferSize)
+ if err != nil {
+ return nil, fmt.Errorf(`jwe.Derypt: failed to uncompress payload: %w`, err)
+ }
+ plaintext = buf
+ }
+
+ if plaintext == nil {
+ return nil, fmt.Errorf(`failed to find matching recipient`)
+ }
+
+ return plaintext, nil
+}
+
+// encryptContext holds the state during JWE encryption, similar to JWS signContext
+type encryptContext struct {
+ calg jwa.ContentEncryptionAlgorithm
+ compression jwa.CompressionAlgorithm
+ format int
+ builders []*recipientBuilder
+ protected Headers
+}
+
+var encryptContextPool = pool.New(allocEncryptContext, freeEncryptContext)
+
+func allocEncryptContext() *encryptContext {
+ return &encryptContext{
+ calg: jwa.A256GCM(),
+ compression: jwa.NoCompress(),
+ format: fmtCompact,
+ }
+}
+
+func freeEncryptContext(ec *encryptContext) *encryptContext {
+ ec.calg = jwa.A256GCM()
+ ec.compression = jwa.NoCompress()
+ ec.format = fmtCompact
+ ec.builders = ec.builders[:0]
+ ec.protected = nil
+ return ec
+}
+
+func (ec *encryptContext) ProcessOptions(options []EncryptOption) error {
+ var mergeProtected bool
+ var useRawCEK bool
+ for _, option := range options {
+ switch option.Ident() {
+ case identKey{}:
+ var wk *withKey
+ if err := option.Value(&wk); err != nil {
+ return fmt.Errorf("jwe.encrypt: WithKey must be a *withKey: %w", err)
+ }
+ v, ok := wk.alg.(jwa.KeyEncryptionAlgorithm)
+ if !ok {
+ return fmt.Errorf("jwe.encrypt: WithKey() option must be specified using jwa.KeyEncryptionAlgorithm (got %T)", wk.alg)
+ }
+ if v == jwa.DIRECT() || v == jwa.ECDH_ES() {
+ useRawCEK = true
+ }
+ ec.builders = append(ec.builders, &recipientBuilder{alg: v, key: wk.key, headers: wk.headers})
+ case identContentEncryptionAlgorithm{}:
+ var c jwa.ContentEncryptionAlgorithm
+ if err := option.Value(&c); err != nil {
+ return err
+ }
+ ec.calg = c
+ case identCompress{}:
+ var comp jwa.CompressionAlgorithm
+ if err := option.Value(&comp); err != nil {
+ return err
+ }
+ ec.compression = comp
+ case identMergeProtectedHeaders{}:
+ var mp bool
+ if err := option.Value(&mp); err != nil {
+ return err
+ }
+ mergeProtected = mp
+ case identProtectedHeaders{}:
+ var hdrs Headers
+ if err := option.Value(&hdrs); err != nil {
+ return err
+ }
+ if !mergeProtected || ec.protected == nil {
+ ec.protected = hdrs
+ } else {
+ merged, err := ec.protected.Merge(hdrs)
+ if err != nil {
+ return fmt.Errorf(`failed to merge headers: %w`, err)
+ }
+ ec.protected = merged
+ }
+ case identSerialization{}:
+ var fmtOpt int
+ if err := option.Value(&fmtOpt); err != nil {
+ return err
+ }
+ ec.format = fmtOpt
+ }
+ }
+
+ // We need to have at least one builder
+ switch l := len(ec.builders); {
+ case l == 0:
+ return fmt.Errorf(`missing key encryption builders: use jwe.WithKey() to specify one`)
+ case l > 1:
+ if ec.format == fmtCompact {
+ return fmt.Errorf(`cannot use compact serialization when multiple recipients exist (check the number of WithKey() argument, or use WithJSON())`)
+ }
+ }
+
+ if useRawCEK {
+ if len(ec.builders) != 1 {
+ return fmt.Errorf(`multiple recipients for ECDH-ES/DIRECT mode supported`)
+ }
+ }
+
+ return nil
+}
+
+var msgPool = pool.New(allocMessage, freeMessage)
+
+func allocMessage() *Message {
+ return &Message{
+ recipients: make([]Recipient, 0, 1),
+ }
+}
+
+func freeMessage(msg *Message) *Message {
+ msg.cipherText = nil
+ msg.initializationVector = nil
+ if hdr := msg.protectedHeaders; hdr != nil {
+ headerPool.Put(hdr)
+ }
+ msg.protectedHeaders = nil
+ msg.unprotectedHeaders = nil
+ msg.recipients = nil // reuse should be done elsewhere
+ msg.authenticatedData = nil
+ msg.tag = nil
+ msg.rawProtectedHeaders = nil
+ msg.storeProtectedHeaders = false
+ return msg
+}
+
+var headerPool = pool.New(NewHeaders, freeHeaders)
+
+func freeHeaders(h Headers) Headers {
+ if c, ok := h.(interface{ clear() }); ok {
+ c.clear()
+ }
+ return h
+}
+
+var recipientPool = pool.New(NewRecipient, freeRecipient)
+
+func freeRecipient(r Recipient) Recipient {
+ if h := r.Headers(); h != nil {
+ if c, ok := h.(interface{ clear() }); ok {
+ c.clear()
+ }
+ }
+
+ if sr, ok := r.(*stdRecipient); ok {
+ sr.encryptedKey = nil
+ }
+ return r
+}
+
+var recipientSlicePool = pool.NewSlicePool(allocRecipientSlice, freeRecipientSlice)
+
+func allocRecipientSlice() []Recipient {
+ return make([]Recipient, 0, 1)
+}
+
+func freeRecipientSlice(rs []Recipient) []Recipient {
+ for _, r := range rs {
+ recipientPool.Put(r)
+ }
+ return rs[:0]
+}
+
+func (ec *encryptContext) EncryptMessage(payload []byte, cek []byte) ([]byte, error) {
+ // Get protected headers from pool and copy contents from context
+ protected := headerPool.Get()
+ if userSupplied := ec.protected; userSupplied != nil {
+ ec.protected = nil // Clear from context
+ if err := userSupplied.Copy(protected); err != nil {
+ return nil, fmt.Errorf(`failed to copy protected headers: %w`, err)
+ }
+ }
+
+ // There is exactly one content encrypter.
+ contentcrypt, err := content_crypt.NewGeneric(ec.calg)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to create AES encrypter: %w`, err)
+ }
+
+ // Generate CEK if not provided
+ if len(cek) <= 0 {
+ bk, err := keygen.Random(contentcrypt.KeySize())
+ if err != nil {
+ return nil, fmt.Errorf(`failed to generate key: %w`, err)
+ }
+ cek = bk.Bytes()
+ }
+
+ var useRawCEK bool
+ for _, builder := range ec.builders {
+ if builder.alg == jwa.DIRECT() || builder.alg == jwa.ECDH_ES() {
+ useRawCEK = true
+ break
+ }
+ }
+
+ recipients := recipientSlicePool.GetCapacity(len(ec.builders))
+ defer recipientSlicePool.Put(recipients)
+
+ for i, builder := range ec.builders {
+ r := recipientPool.Get()
+ defer recipientPool.Put(r)
+
+ // some builders require hint from the contentcrypt object
+ rawCEK, err := builder.Build(r, cek, ec.calg, contentcrypt)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to create recipient #%d: %w`, i, err)
+ }
+ recipients = append(recipients, r)
+
+ // Kinda feels weird, but if useRawCEK == true, we asserted earlier
+ // that len(builders) == 1, so this is OK
+ if useRawCEK {
+ cek = rawCEK
+ }
+ }
+
+ if err := protected.Set(ContentEncryptionKey, ec.calg); err != nil {
+ return nil, fmt.Errorf(`failed to set "enc" in protected header: %w`, err)
+ }
+
+ if ec.compression != jwa.NoCompress() {
+ payload, err = compress(payload)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to compress payload before encryption: %w`, err)
+ }
+ if err := protected.Set(CompressionKey, ec.compression); err != nil {
+ return nil, fmt.Errorf(`failed to set "zip" in protected header: %w`, err)
+ }
+ }
+
+ // If there's only one recipient, you want to include that in the
+ // protected header
+ if len(recipients) == 1 {
+ h, err := protected.Merge(recipients[0].Headers())
+ if err != nil {
+ return nil, fmt.Errorf(`failed to merge protected headers: %w`, err)
+ }
+ protected = h
+ }
+
+ aad, err := protected.Encode()
+ if err != nil {
+ return nil, fmt.Errorf(`failed to base64 encode protected headers: %w`, err)
+ }
+
+ iv, ciphertext, tag, err := contentcrypt.Encrypt(cek, payload, aad)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to encrypt payload: %w`, err)
+ }
+
+ msg := msgPool.Get()
+ defer msgPool.Put(msg)
+
+ if err := msg.Set(CipherTextKey, ciphertext); err != nil {
+ return nil, fmt.Errorf(`failed to set %s: %w`, CipherTextKey, err)
+ }
+ if err := msg.Set(InitializationVectorKey, iv); err != nil {
+ return nil, fmt.Errorf(`failed to set %s: %w`, InitializationVectorKey, err)
+ }
+ if err := msg.Set(ProtectedHeadersKey, protected); err != nil {
+ return nil, fmt.Errorf(`failed to set %s: %w`, ProtectedHeadersKey, err)
+ }
+ if err := msg.Set(RecipientsKey, recipients); err != nil {
+ return nil, fmt.Errorf(`failed to set %s: %w`, RecipientsKey, err)
+ }
+ if err := msg.Set(TagKey, tag); err != nil {
+ return nil, fmt.Errorf(`failed to set %s: %w`, TagKey, err)
+ }
+
+ switch ec.format {
+ case fmtCompact:
+ return Compact(msg)
+ case fmtJSON:
+ return json.Marshal(msg)
+ case fmtJSONPretty:
+ return json.MarshalIndent(msg, "", " ")
+ default:
+ return nil, fmt.Errorf(`invalid serialization`)
+ }
+}
+
+// Decrypt takes encrypted payload, and information required to decrypt the
+// payload (e.g. the key encryption algorithm and the corresponding
+// key to decrypt the JWE message) in its optional arguments. See
+// the examples and list of options that return a DecryptOption for possible
+// values. Upon successful decryptiond returns the decrypted payload.
+//
+// The JWE message can be either compact or full JSON format.
+//
+// When using `jwe.WithKeyEncryptionAlgorithm()`, you can pass a `jwa.KeyAlgorithm`
+// for convenience: this is mainly to allow you to directly pass the result of `(jwk.Key).Algorithm()`.
+// However, do note that while `(jwk.Key).Algorithm()` could very well contain key encryption
+// algorithms, it could also contain other types of values, such as _signature algorithms_.
+// In order for `jwe.Decrypt` to work properly, the `alg` parameter must be of type
+// `jwa.KeyEncryptionAlgorithm` or otherwise it will cause an error.
+//
+// When using `jwe.WithKey()`, the value must be a private key.
+// It can be either in its raw format (e.g. *rsa.PrivateKey) or a jwk.Key
+//
+// When the encrypted message is also compressed, the decompressed payload must be
+// smaller than the size specified by the `jwe.WithMaxDecompressBufferSize` setting,
+// which defaults to 10MB. If the decompressed payload is larger than this size,
+// an error is returned.
+//
+// You can opt to change the MaxDecompressBufferSize setting globally, or on a
+// per-call basis by passing the `jwe.WithMaxDecompressBufferSize` option to
+// either `jwe.Settings()` or `jwe.Decrypt()`:
+//
+// jwe.Settings(jwe.WithMaxDecompressBufferSize(10*1024*1024)) // changes value globally
+// jwe.Decrypt(..., jwe.WithMaxDecompressBufferSize(250*1024)) // changes just for this call
+func Decrypt(buf []byte, options ...DecryptOption) ([]byte, error) {
+ dc := decryptContextPool.Get()
+ defer decryptContextPool.Put(dc)
+
+ if err := dc.ProcessOptions(options); err != nil {
+ return nil, decryptError{fmt.Errorf(`jwe.Decrypt: failed to process options: %w`, err)}
+ }
+
+ ret, err := dc.DecryptMessage(buf)
+ if err != nil {
+ return nil, decryptError{fmt.Errorf(`jwe.Decrypt: %w`, err)}
+ }
+ return ret, nil
+}
+
+// Parse parses the JWE message into a Message object. The JWE message
+// can be either compact or full JSON format.
+//
+// Parse() currently does not take any options, but the API accepts it
+// in anticipation of future addition.
+func Parse(buf []byte, _ ...ParseOption) (*Message, error) {
+ return parseJSONOrCompact(buf, false)
+}
+
+// errors are wrapped within this function, because we call it directly
+// from Decrypt as well.
+func parseJSONOrCompact(buf []byte, storeProtectedHeaders bool) (*Message, error) {
+ buf = bytes.TrimSpace(buf)
+ if len(buf) == 0 {
+ return nil, parseError{fmt.Errorf(`jwe.Parse: empty buffer`)}
+ }
+
+ var msg *Message
+ var err error
+ if buf[0] == tokens.OpenCurlyBracket {
+ msg, err = parseJSON(buf, storeProtectedHeaders)
+ } else {
+ msg, err = parseCompact(buf, storeProtectedHeaders)
+ }
+
+ if err != nil {
+ return nil, parseError{fmt.Errorf(`jwe.Parse: %w`, err)}
+ }
+ return msg, nil
+}
+
+// ParseString is the same as Parse, but takes a string.
+func ParseString(s string) (*Message, error) {
+ msg, err := Parse([]byte(s))
+ if err != nil {
+ return nil, parseError{fmt.Errorf(`jwe.ParseString: %w`, err)}
+ }
+ return msg, nil
+}
+
+// ParseReader is the same as Parse, but takes an io.Reader.
+func ParseReader(src io.Reader) (*Message, error) {
+ buf, err := io.ReadAll(src)
+ if err != nil {
+ return nil, parseError{fmt.Errorf(`jwe.ParseReader: failed to read from io.Reader: %w`, err)}
+ }
+ msg, err := Parse(buf)
+ if err != nil {
+ return nil, parseError{fmt.Errorf(`jwe.ParseReader: %w`, err)}
+ }
+ return msg, nil
+}
+
+func parseJSON(buf []byte, storeProtectedHeaders bool) (*Message, error) {
+ m := NewMessage()
+ m.storeProtectedHeaders = storeProtectedHeaders
+ if err := json.Unmarshal(buf, &m); err != nil {
+ return nil, fmt.Errorf(`failed to parse JSON: %w`, err)
+ }
+ return m, nil
+}
+
+func parseCompact(buf []byte, storeProtectedHeaders bool) (*Message, error) {
+ var parts [5][]byte
+ var ok bool
+
+ for i := range 4 {
+ parts[i], buf, ok = bytes.Cut(buf, []byte{tokens.Period})
+ if !ok {
+ return nil, fmt.Errorf(`compact JWE format must have five parts (%d)`, i+1)
+ }
+ }
+ // Validate that the last part does not contain more dots
+ if bytes.ContainsRune(buf, tokens.Period) {
+ return nil, errors.New(`compact JWE format must have five parts, not more`)
+ }
+ parts[4] = buf
+
+ hdrbuf, err := base64.Decode(parts[0])
+ if err != nil {
+ return nil, fmt.Errorf(`failed to parse first part of compact form: %w`, err)
+ }
+
+ protected := NewHeaders()
+ if err := json.Unmarshal(hdrbuf, protected); err != nil {
+ return nil, fmt.Errorf(`failed to parse header JSON: %w`, err)
+ }
+
+ ivbuf, err := base64.Decode(parts[2])
+ if err != nil {
+ return nil, fmt.Errorf(`failed to base64 decode iv: %w`, err)
+ }
+
+ ctbuf, err := base64.Decode(parts[3])
+ if err != nil {
+ return nil, fmt.Errorf(`failed to base64 decode content: %w`, err)
+ }
+
+ tagbuf, err := base64.Decode(parts[4])
+ if err != nil {
+ return nil, fmt.Errorf(`failed to base64 decode tag: %w`, err)
+ }
+
+ m := NewMessage()
+ if err := m.Set(CipherTextKey, ctbuf); err != nil {
+ return nil, fmt.Errorf(`failed to set %s: %w`, CipherTextKey, err)
+ }
+ if err := m.Set(InitializationVectorKey, ivbuf); err != nil {
+ return nil, fmt.Errorf(`failed to set %s: %w`, InitializationVectorKey, err)
+ }
+ if err := m.Set(ProtectedHeadersKey, protected); err != nil {
+ return nil, fmt.Errorf(`failed to set %s: %w`, ProtectedHeadersKey, err)
+ }
+
+ if err := m.makeDummyRecipient(string(parts[1]), protected); err != nil {
+ return nil, fmt.Errorf(`failed to setup recipient: %w`, err)
+ }
+
+ if err := m.Set(TagKey, tagbuf); err != nil {
+ return nil, fmt.Errorf(`failed to set %s: %w`, TagKey, err)
+ }
+
+ if storeProtectedHeaders {
+ // This is later used for decryption.
+ m.rawProtectedHeaders = parts[0]
+ }
+
+ return m, nil
+}
+
+type CustomDecoder = json.CustomDecoder
+type CustomDecodeFunc = json.CustomDecodeFunc
+
+// RegisterCustomField allows users to specify that a private field
+// be decoded as an instance of the specified type. This option has
+// a global effect.
+//
+// For example, suppose you have a custom field `x-birthday`, which
+// you want to represent as a string formatted in RFC3339 in JSON,
+// but want it back as `time.Time`.
+//
+// In such case you would register a custom field as follows
+//
+// jws.RegisterCustomField(`x-birthday`, time.Time{})
+//
+// Then you can use a `time.Time` variable to extract the value
+// of `x-birthday` field, instead of having to use `any`
+// and later convert it to `time.Time`
+//
+// var bday time.Time
+// _ = hdr.Get(`x-birthday`, &bday)
+//
+// If you need a more fine-tuned control over the decoding process,
+// you can register a `CustomDecoder`. For example, below shows
+// how to register a decoder that can parse RFC1123 format string:
+//
+// jwe.RegisterCustomField(`x-birthday`, jwe.CustomDecodeFunc(func(data []byte) (any, error) {
+// return time.Parse(time.RFC1123, string(data))
+// }))
+//
+// Please note that use of custom fields can be problematic if you
+// are using a library that does not implement MarshalJSON/UnmarshalJSON
+// and you try to roundtrip from an object to JSON, and then back to an object.
+// For example, in the above example, you can _parse_ time values formatted
+// in the format specified in RFC822, but when you convert an object into
+// JSON, it will be formatted in RFC3339, because that's what `time.Time`
+// likes to do. To avoid this, it's always better to use a custom type
+// that wraps your desired type (in this case `time.Time`) and implement
+// MarshalJSON and UnmashalJSON.
+func RegisterCustomField(name string, object any) {
+ registry.Register(name, object)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/BUILD.bazel
new file mode 100644
index 0000000000..c410a05cdf
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/BUILD.bazel
@@ -0,0 +1,43 @@
+load("@rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "jwebb",
+ srcs = [
+ "content_cipher.go",
+ "key_decrypt_asymmetric.go",
+ "key_decrypt_symmetric.go",
+ "key_encrypt_asymmetric.go",
+ "key_encrypt_symmetric.go",
+ "key_encryption.go",
+ "keywrap.go",
+ ],
+ importpath = "github.com/lestrrat-go/jwx/v3/jwe/jwebb",
+ visibility = ["//jwe:__subpackages__"],
+ deps = [
+ "//internal/keyconv",
+ "//internal/pool",
+ "//jwe/internal/cipher",
+ "//jwe/internal/concatkdf",
+ "//jwe/internal/content_crypt",
+ "//jwe/internal/keygen",
+ "//internal/tokens",
+ "@org_golang_x_crypto//pbkdf2",
+ ],
+)
+
+go_test(
+ name = "jwebb_test",
+ srcs = [
+ "decrypt_test.go",
+ "jwebb_test.go",
+ "keywrap_test.go",
+ ],
+ embed = [":jwebb"],
+ deps = [
+ "//internal/jwxtest",
+ "//jwa",
+ "//jwe/internal/keygen",
+ "//internal/tokens",
+ "@com_github_stretchr_testify//require",
+ ],
+)
\ No newline at end of file
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/content_cipher.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/content_cipher.go
new file mode 100644
index 0000000000..9078789d8d
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/content_cipher.go
@@ -0,0 +1,34 @@
+package jwebb
+
+import (
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+ "github.com/lestrrat-go/jwx/v3/jwe/internal/cipher"
+ "github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt"
+)
+
+// ContentEncryptionIsSupported checks if the content encryption algorithm is supported
+func ContentEncryptionIsSupported(alg string) bool {
+ switch alg {
+ case tokens.A128GCM, tokens.A192GCM, tokens.A256GCM,
+ tokens.A128CBC_HS256, tokens.A192CBC_HS384, tokens.A256CBC_HS512:
+ return true
+ default:
+ return false
+ }
+}
+
+// CreateContentCipher creates a content encryption cipher for the given algorithm string
+func CreateContentCipher(alg string) (content_crypt.Cipher, error) {
+ if !ContentEncryptionIsSupported(alg) {
+ return nil, fmt.Errorf(`invalid content cipher algorithm (%s)`, alg)
+ }
+
+ cipher, err := cipher.NewAES(alg)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to build content cipher for %s: %w`, alg, err)
+ }
+
+ return cipher, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/jwebb.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/jwebb.go
new file mode 100644
index 0000000000..3768acef8b
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/jwebb.go
@@ -0,0 +1,15 @@
+// Package jwebb provides the building blocks (hence the name "bb") for JWE operations.
+// It should be thought of as a low-level API, almost akin to internal packages
+// that should not be used directly by users of the jwx package. However, these exist
+// to provide a more efficient way to perform JWE operations without the overhead of
+// the higher-level jwe package to power-users who know what they are doing.
+//
+// This package is currently considered EXPERIMENTAL, and the API may change
+// without notice. It is not recommended to use this package unless you are
+// fully aware of the implications of using it.
+//
+// All bb packages in jwx follow the same design principles:
+// 1. Does minimal checking of input parameters (for performance); callers need to ensure that the parameters are valid.
+// 2. All exported functions are stringly typed (i.e. they do not take any parameters unless they absolutely have to).
+// 3. Does not rely on other public jwx packages (they are standalone, except for internal packages).
+package jwebb
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_decrypt_asymmetric.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_decrypt_asymmetric.go
new file mode 100644
index 0000000000..ac07993176
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_decrypt_asymmetric.go
@@ -0,0 +1,177 @@
+package jwebb
+
+import (
+ "crypto"
+ "crypto/aes"
+ "crypto/ecdh"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/binary"
+ "fmt"
+ "hash"
+
+ "github.com/lestrrat-go/jwx/v3/internal/keyconv"
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+ "github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf"
+ "github.com/lestrrat-go/jwx/v3/jwe/internal/keygen"
+)
+
+func contentEncryptionKeySize(ctalg string) (uint32, error) {
+ switch ctalg {
+ case tokens.A128GCM:
+ return tokens.KeySize16, nil
+ case tokens.A192GCM:
+ return tokens.KeySize24, nil
+ case tokens.A256GCM:
+ return tokens.KeySize32, nil
+ case tokens.A128CBC_HS256:
+ return tokens.KeySize32, nil
+ case tokens.A192CBC_HS384:
+ return tokens.KeySize48, nil
+ case tokens.A256CBC_HS512:
+ return tokens.KeySize64, nil
+ default:
+ return 0, fmt.Errorf(`unsupported content encryption algorithm %s`, ctalg)
+ }
+}
+
+func KeyEncryptionECDHESKeySize(alg, ctalg string) (string, uint32, bool, error) {
+ switch alg {
+ case tokens.ECDH_ES:
+ keysize, err := contentEncryptionKeySize(ctalg)
+ if err != nil {
+ return "", 0, false, err
+ }
+ return ctalg, keysize, false, nil
+ case tokens.ECDH_ES_A128KW:
+ return alg, tokens.KeySize16, true, nil
+ case tokens.ECDH_ES_A192KW:
+ return alg, tokens.KeySize24, true, nil
+ case tokens.ECDH_ES_A256KW:
+ return alg, tokens.KeySize32, true, nil
+ default:
+ return "", 0, false, fmt.Errorf(`unsupported key encryption algorithm %s`, alg)
+ }
+}
+
+func DeriveECDHES(alg string, apu, apv []byte, privkeyif, pubkeyif any, keysize uint32) ([]byte, error) {
+ pubinfo := make([]byte, 4)
+ binary.BigEndian.PutUint32(pubinfo, keysize*tokens.BitsPerByte)
+
+ var privkey *ecdh.PrivateKey
+ var pubkey *ecdh.PublicKey
+ if err := keyconv.ECDHPrivateKey(&privkey, privkeyif); err != nil {
+ return nil, fmt.Errorf(`jwebb.DeriveECDHES: %w`, err)
+ }
+ if err := keyconv.ECDHPublicKey(&pubkey, pubkeyif); err != nil {
+ return nil, fmt.Errorf(`jwebb.DeriveECDHES: %w`, err)
+ }
+
+ zBytes, err := privkey.ECDH(pubkey)
+ if err != nil {
+ return nil, fmt.Errorf(`jwebb.DeriveECDHES: unable to determine Z: %w`, err)
+ }
+ kdf := concatkdf.New(crypto.SHA256, []byte(alg), zBytes, apu, apv, pubinfo, []byte{})
+ key := make([]byte, keysize)
+ if _, err := kdf.Read(key); err != nil {
+ return nil, fmt.Errorf(`jwebb.DeriveECDHES: failed to read kdf: %w`, err)
+ }
+
+ return key, nil
+}
+
+func KeyDecryptECDHESKeyWrap(_, enckey []byte, alg string, apu, apv []byte, privkey, pubkey any, keysize uint32) ([]byte, error) {
+ key, err := DeriveECDHES(alg, apu, apv, privkey, pubkey, keysize)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to derive ECDHES encryption key: %w`, err)
+ }
+
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to create cipher for ECDH-ES key wrap: %w`, err)
+ }
+
+ return Unwrap(block, enckey)
+}
+
+func KeyDecryptECDHES(_, _ []byte, alg string, apu, apv []byte, privkey, pubkey any, keysize uint32) ([]byte, error) {
+ key, err := DeriveECDHES(alg, apu, apv, privkey, pubkey, keysize)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to derive ECDHES encryption key: %w`, err)
+ }
+ return key, nil
+}
+
+// RSA key decryption functions
+
+func KeyDecryptRSA15(_, enckey []byte, privkeyif any, keysize int) ([]byte, error) {
+ var privkey *rsa.PrivateKey
+ if err := keyconv.RSAPrivateKey(&privkey, privkeyif); err != nil {
+ return nil, fmt.Errorf(`jwebb.KeyDecryptRSA15: %w`, err)
+ }
+
+ // Perform some input validation.
+ expectedlen := privkey.PublicKey.N.BitLen() / tokens.BitsPerByte
+ if expectedlen != len(enckey) {
+ // Input size is incorrect, the encrypted payload should always match
+ // the size of the public modulus (e.g. using a 2048 bit key will
+ // produce 256 bytes of output). Reject this since it's invalid input.
+ return nil, fmt.Errorf(
+ "input size for key decrypt is incorrect (expected %d, got %d)",
+ expectedlen,
+ len(enckey),
+ )
+ }
+
+ // Generate a random CEK of the required size
+ bk, err := keygen.Random(keysize * tokens.RSAKeyGenMultiplier)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to generate key`)
+ }
+ cek := bk.Bytes()
+
+ // Use a defer/recover pattern to handle potential panics from DecryptPKCS1v15SessionKey
+ defer func() {
+ // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload
+ // because of an index out of bounds error, which we want to ignore.
+ // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover()
+ // only exists for preventing crashes with unpatched versions.
+ // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k
+ // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33
+ _ = recover()
+ }()
+
+ // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to
+ // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing
+ // the Million Message Attack on Cryptographic Message Syntax". We are
+ // therefore deliberately ignoring errors here.
+ _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, privkey, enckey, cek)
+
+ return cek, nil
+}
+
+func KeyDecryptRSAOAEP(_, enckey []byte, alg string, privkeyif any) ([]byte, error) {
+ var privkey *rsa.PrivateKey
+ if err := keyconv.RSAPrivateKey(&privkey, privkeyif); err != nil {
+ return nil, fmt.Errorf(`jwebb.KeyDecryptRSAOAEP: %w`, err)
+ }
+
+ var hash hash.Hash
+ switch alg {
+ case tokens.RSA_OAEP:
+ hash = sha1.New()
+ case tokens.RSA_OAEP_256:
+ hash = sha256.New()
+ case tokens.RSA_OAEP_384:
+ hash = sha512.New384()
+ case tokens.RSA_OAEP_512:
+ hash = sha512.New()
+ default:
+ return nil, fmt.Errorf(`failed to generate key encrypter for RSA-OAEP: RSA_OAEP/RSA_OAEP_256/RSA_OAEP_384/RSA_OAEP_512 required`)
+ }
+
+ return rsa.DecryptOAEP(hash, rand.Reader, privkey, enckey, []byte{})
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_decrypt_symmetric.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_decrypt_symmetric.go
new file mode 100644
index 0000000000..c09e30a34e
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_decrypt_symmetric.go
@@ -0,0 +1,91 @@
+package jwebb
+
+import (
+ "crypto/aes"
+ cryptocipher "crypto/cipher"
+ "crypto/sha256"
+ "crypto/sha512"
+ "fmt"
+ "hash"
+
+ "golang.org/x/crypto/pbkdf2"
+
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+)
+
+// AES key wrap decryption functions
+
+// Use constants from tokens package
+// No need to redefine them here
+
+func KeyDecryptAESKW(_, enckey []byte, _ string, sharedkey []byte) ([]byte, error) {
+ block, err := aes.NewCipher(sharedkey)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to create cipher from shared key: %w`, err)
+ }
+
+ cek, err := Unwrap(block, enckey)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to unwrap data: %w`, err)
+ }
+ return cek, nil
+}
+
+func KeyDecryptDirect(_, _ []byte, _ string, cek []byte) ([]byte, error) {
+ return cek, nil
+}
+
+func KeyDecryptPBES2(_, enckey []byte, alg string, password []byte, salt []byte, count int) ([]byte, error) {
+ var hashFunc func() hash.Hash
+ var keylen int
+
+ switch alg {
+ case tokens.PBES2_HS256_A128KW:
+ hashFunc = sha256.New
+ keylen = tokens.KeySize16
+ case tokens.PBES2_HS384_A192KW:
+ hashFunc = sha512.New384
+ keylen = tokens.KeySize24
+ case tokens.PBES2_HS512_A256KW:
+ hashFunc = sha512.New
+ keylen = tokens.KeySize32
+ default:
+ return nil, fmt.Errorf(`unsupported PBES2 algorithm: %s`, alg)
+ }
+
+ // Derive key using PBKDF2
+ derivedKey := pbkdf2.Key(password, salt, count, keylen, hashFunc)
+
+ // Use the derived key for AES key wrap
+ return KeyDecryptAESKW(nil, enckey, alg, derivedKey)
+}
+
+func KeyDecryptAESGCMKW(recipientKey, _ []byte, _ string, sharedkey []byte, iv []byte, tag []byte) ([]byte, error) {
+ if len(iv) != tokens.GCMIVSize {
+ return nil, fmt.Errorf("GCM requires 96-bit iv, got %d", len(iv)*tokens.BitsPerByte)
+ }
+ if len(tag) != tokens.GCMTagSize {
+ return nil, fmt.Errorf("GCM requires 128-bit tag, got %d", len(tag)*tokens.BitsPerByte)
+ }
+
+ block, err := aes.NewCipher(sharedkey)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to create new AES cipher: %w`, err)
+ }
+
+ aesgcm, err := cryptocipher.NewGCM(block)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to create new GCM wrap: %w`, err)
+ }
+
+ // Combine recipient key and tag for GCM decryption
+ ciphertext := recipientKey[:]
+ ciphertext = append(ciphertext, tag...)
+
+ jek, err := aesgcm.Open(nil, iv, ciphertext, nil)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to decode key: %w`, err)
+ }
+
+ return jek, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encrypt_asymmetric.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encrypt_asymmetric.go
new file mode 100644
index 0000000000..6f008173c8
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encrypt_asymmetric.go
@@ -0,0 +1,147 @@
+package jwebb
+
+import (
+ "crypto/aes"
+ "crypto/ecdh"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "fmt"
+ "hash"
+
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+ "github.com/lestrrat-go/jwx/v3/jwe/internal/keygen"
+)
+
+// KeyEncryptRSA15 encrypts the CEK using RSA PKCS#1 v1.5
+func KeyEncryptRSA15(cek []byte, _ string, pubkey *rsa.PublicKey) (keygen.ByteSource, error) {
+ encrypted, err := rsa.EncryptPKCS1v15(rand.Reader, pubkey, cek)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to encrypt using PKCS1v15: %w`, err)
+ }
+ return keygen.ByteKey(encrypted), nil
+}
+
+// KeyEncryptRSAOAEP encrypts the CEK using RSA OAEP
+func KeyEncryptRSAOAEP(cek []byte, alg string, pubkey *rsa.PublicKey) (keygen.ByteSource, error) {
+ var hash hash.Hash
+ switch alg {
+ case tokens.RSA_OAEP:
+ hash = sha1.New()
+ case tokens.RSA_OAEP_256:
+ hash = sha256.New()
+ case tokens.RSA_OAEP_384:
+ hash = sha512.New384()
+ case tokens.RSA_OAEP_512:
+ hash = sha512.New()
+ default:
+ return nil, fmt.Errorf(`failed to generate key encrypter for RSA-OAEP: RSA_OAEP/RSA_OAEP_256/RSA_OAEP_384/RSA_OAEP_512 required`)
+ }
+
+ encrypted, err := rsa.EncryptOAEP(hash, rand.Reader, pubkey, cek, []byte{})
+ if err != nil {
+ return nil, fmt.Errorf(`failed to OAEP encrypt: %w`, err)
+ }
+ return keygen.ByteKey(encrypted), nil
+}
+
+// generateECDHESKeyECDSA generates the key material for ECDSA keys using ECDH-ES
+func generateECDHESKeyECDSA(alg string, calg string, keysize uint32, pubkey *ecdsa.PublicKey, apu, apv []byte) (keygen.ByteWithECPublicKey, error) {
+ // Generate the key directly
+ kg, err := keygen.Ecdhes(alg, calg, int(keysize), pubkey, apu, apv)
+ if err != nil {
+ return keygen.ByteWithECPublicKey{}, fmt.Errorf(`failed to generate ECDSA key: %w`, err)
+ }
+
+ bwpk, ok := kg.(keygen.ByteWithECPublicKey)
+ if !ok {
+ return keygen.ByteWithECPublicKey{}, fmt.Errorf(`key generator generated invalid key (expected ByteWithECPublicKey)`)
+ }
+
+ return bwpk, nil
+}
+
+// generateECDHESKeyX25519 generates the key material for X25519 keys using ECDH-ES
+func generateECDHESKeyX25519(alg string, calg string, keysize uint32, pubkey *ecdh.PublicKey) (keygen.ByteWithECPublicKey, error) {
+ // Generate the key directly
+ kg, err := keygen.X25519(alg, calg, int(keysize), pubkey)
+ if err != nil {
+ return keygen.ByteWithECPublicKey{}, fmt.Errorf(`failed to generate X25519 key: %w`, err)
+ }
+
+ bwpk, ok := kg.(keygen.ByteWithECPublicKey)
+ if !ok {
+ return keygen.ByteWithECPublicKey{}, fmt.Errorf(`key generator generated invalid key (expected ByteWithECPublicKey)`)
+ }
+
+ return bwpk, nil
+}
+
+// KeyEncryptECDHESKeyWrapECDSA encrypts the CEK using ECDH-ES with key wrapping for ECDSA keys
+func KeyEncryptECDHESKeyWrapECDSA(cek []byte, alg string, apu, apv []byte, pubkey *ecdsa.PublicKey, keysize uint32, calg string) (keygen.ByteSource, error) {
+ bwpk, err := generateECDHESKeyECDSA(alg, calg, keysize, pubkey, apu, apv)
+ if err != nil {
+ return nil, err
+ }
+
+ // For key wrapping algorithms, wrap the CEK with the generated key
+ block, err := aes.NewCipher(bwpk.Bytes())
+ if err != nil {
+ return nil, fmt.Errorf(`failed to generate cipher from generated key: %w`, err)
+ }
+
+ jek, err := Wrap(block, cek)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to wrap data: %w`, err)
+ }
+
+ bwpk.ByteKey = keygen.ByteKey(jek)
+ return bwpk, nil
+}
+
+// KeyEncryptECDHESKeyWrapX25519 encrypts the CEK using ECDH-ES with key wrapping for X25519 keys
+func KeyEncryptECDHESKeyWrapX25519(cek []byte, alg string, _ []byte, _ []byte, pubkey *ecdh.PublicKey, keysize uint32, calg string) (keygen.ByteSource, error) {
+ bwpk, err := generateECDHESKeyX25519(alg, calg, keysize, pubkey)
+ if err != nil {
+ return nil, err
+ }
+
+ // For key wrapping algorithms, wrap the CEK with the generated key
+ block, err := aes.NewCipher(bwpk.Bytes())
+ if err != nil {
+ return nil, fmt.Errorf(`failed to generate cipher from generated key: %w`, err)
+ }
+
+ jek, err := Wrap(block, cek)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to wrap data: %w`, err)
+ }
+
+ bwpk.ByteKey = keygen.ByteKey(jek)
+ return bwpk, nil
+}
+
+// KeyEncryptECDHESECDSA encrypts using ECDH-ES direct (no key wrapping) for ECDSA keys
+func KeyEncryptECDHESECDSA(_ []byte, alg string, apu, apv []byte, pubkey *ecdsa.PublicKey, keysize uint32, calg string) (keygen.ByteSource, error) {
+ bwpk, err := generateECDHESKeyECDSA(alg, calg, keysize, pubkey, apu, apv)
+ if err != nil {
+ return nil, err
+ }
+
+ // For direct ECDH-ES, return the generated key directly
+ return bwpk, nil
+}
+
+// KeyEncryptECDHESX25519 encrypts using ECDH-ES direct (no key wrapping) for X25519 keys
+func KeyEncryptECDHESX25519(_ []byte, alg string, _, _ []byte, pubkey *ecdh.PublicKey, keysize uint32, calg string) (keygen.ByteSource, error) {
+ bwpk, err := generateECDHESKeyX25519(alg, calg, keysize, pubkey)
+ if err != nil {
+ return nil, err
+ }
+
+ // For direct ECDH-ES, return the generated key directly
+ return bwpk, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encrypt_symmetric.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encrypt_symmetric.go
new file mode 100644
index 0000000000..d489aaba28
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encrypt_symmetric.go
@@ -0,0 +1,115 @@
+package jwebb
+
+import (
+ "crypto/aes"
+ cryptocipher "crypto/cipher"
+ "crypto/rand"
+ "crypto/sha256"
+ "crypto/sha512"
+ "fmt"
+ "hash"
+ "io"
+
+ "golang.org/x/crypto/pbkdf2"
+
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+ "github.com/lestrrat-go/jwx/v3/jwe/internal/keygen"
+)
+
+// KeyEncryptAESKW encrypts the CEK using AES key wrap
+func KeyEncryptAESKW(cek []byte, _ string, sharedkey []byte) (keygen.ByteSource, error) {
+ block, err := aes.NewCipher(sharedkey)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to create cipher from shared key: %w`, err)
+ }
+
+ encrypted, err := Wrap(block, cek)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to wrap data: %w`, err)
+ }
+ return keygen.ByteKey(encrypted), nil
+}
+
+// KeyEncryptDirect returns the CEK directly for DIRECT algorithm
+func KeyEncryptDirect(_ []byte, _ string, sharedkey []byte) (keygen.ByteSource, error) {
+ return keygen.ByteKey(sharedkey), nil
+}
+
+// KeyEncryptPBES2 encrypts the CEK using PBES2 password-based encryption
+func KeyEncryptPBES2(cek []byte, alg string, password []byte) (keygen.ByteSource, error) {
+ var hashFunc func() hash.Hash
+ var keylen int
+
+ switch alg {
+ case tokens.PBES2_HS256_A128KW:
+ hashFunc = sha256.New
+ keylen = tokens.KeySize16
+ case tokens.PBES2_HS384_A192KW:
+ hashFunc = sha512.New384
+ keylen = tokens.KeySize24
+ case tokens.PBES2_HS512_A256KW:
+ hashFunc = sha512.New
+ keylen = tokens.KeySize32
+ default:
+ return nil, fmt.Errorf(`unsupported PBES2 algorithm: %s`, alg)
+ }
+
+ count := tokens.PBES2DefaultIterations
+ salt := make([]byte, keylen)
+ _, err := io.ReadFull(rand.Reader, salt)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to get random salt: %w`, err)
+ }
+
+ fullsalt := []byte(alg)
+ fullsalt = append(fullsalt, byte(tokens.PBES2NullByteSeparator))
+ fullsalt = append(fullsalt, salt...)
+
+ // Derive key using PBKDF2
+ derivedKey := pbkdf2.Key(password, fullsalt, count, keylen, hashFunc)
+
+ // Use the derived key for AES key wrap
+ block, err := aes.NewCipher(derivedKey)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to create cipher from derived key: %w`, err)
+ }
+ encrypted, err := Wrap(block, cek)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to wrap data: %w`, err)
+ }
+
+ return keygen.ByteWithSaltAndCount{
+ ByteKey: encrypted,
+ Salt: salt,
+ Count: count,
+ }, nil
+}
+
+// KeyEncryptAESGCMKW encrypts the CEK using AES GCM key wrap
+func KeyEncryptAESGCMKW(cek []byte, _ string, sharedkey []byte) (keygen.ByteSource, error) {
+ block, err := aes.NewCipher(sharedkey)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to create new AES cipher: %w`, err)
+ }
+
+ aesgcm, err := cryptocipher.NewGCM(block)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to create new GCM wrap: %w`, err)
+ }
+
+ iv := make([]byte, aesgcm.NonceSize())
+ _, err = io.ReadFull(rand.Reader, iv)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to get random iv: %w`, err)
+ }
+
+ encrypted := aesgcm.Seal(nil, iv, cek, nil)
+ tag := encrypted[len(encrypted)-aesgcm.Overhead():]
+ ciphertext := encrypted[:len(encrypted)-aesgcm.Overhead()]
+
+ return keygen.ByteWithIVAndTag{
+ ByteKey: ciphertext,
+ IV: iv,
+ Tag: tag,
+ }, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encryption.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encryption.go
new file mode 100644
index 0000000000..ce39352fc5
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encryption.go
@@ -0,0 +1,70 @@
+package jwebb
+
+import (
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+)
+
+// IsECDHES checks if the algorithm is an ECDH-ES based algorithm
+func IsECDHES(alg string) bool {
+ switch alg {
+ case tokens.ECDH_ES, tokens.ECDH_ES_A128KW, tokens.ECDH_ES_A192KW, tokens.ECDH_ES_A256KW:
+ return true
+ default:
+ return false
+ }
+}
+
+// IsRSA15 checks if the algorithm is RSA1_5
+func IsRSA15(alg string) bool {
+ return alg == tokens.RSA1_5
+}
+
+// IsRSAOAEP checks if the algorithm is an RSA-OAEP based algorithm
+func IsRSAOAEP(alg string) bool {
+ switch alg {
+ case tokens.RSA_OAEP, tokens.RSA_OAEP_256, tokens.RSA_OAEP_384, tokens.RSA_OAEP_512:
+ return true
+ default:
+ return false
+ }
+}
+
+// IsAESKW checks if the algorithm is an AES key wrap algorithm
+func IsAESKW(alg string) bool {
+ switch alg {
+ case tokens.A128KW, tokens.A192KW, tokens.A256KW:
+ return true
+ default:
+ return false
+ }
+}
+
+// IsAESGCMKW checks if the algorithm is an AES-GCM key wrap algorithm
+func IsAESGCMKW(alg string) bool {
+ switch alg {
+ case tokens.A128GCMKW, tokens.A192GCMKW, tokens.A256GCMKW:
+ return true
+ default:
+ return false
+ }
+}
+
+// IsPBES2 checks if the algorithm is a PBES2 based algorithm
+func IsPBES2(alg string) bool {
+ switch alg {
+ case tokens.PBES2_HS256_A128KW, tokens.PBES2_HS384_A192KW, tokens.PBES2_HS512_A256KW:
+ return true
+ default:
+ return false
+ }
+}
+
+// IsDirect checks if the algorithm is direct encryption
+func IsDirect(alg string) bool {
+ return alg == tokens.DIRECT
+}
+
+// IsSymmetric checks if the algorithm is a symmetric key encryption algorithm
+func IsSymmetric(alg string) bool {
+ return IsAESKW(alg) || IsAESGCMKW(alg) || IsPBES2(alg) || IsDirect(alg)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/keywrap.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/keywrap.go
new file mode 100644
index 0000000000..0792d6cb8e
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/keywrap.go
@@ -0,0 +1,110 @@
+package jwebb
+
+import (
+ "crypto/cipher"
+ "crypto/subtle"
+ "encoding/binary"
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+)
+
+var keywrapDefaultIV = []byte{0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6}
+
+func Wrap(kek cipher.Block, cek []byte) ([]byte, error) {
+ if len(cek)%tokens.KeywrapBlockSize != 0 {
+ return nil, fmt.Errorf(`keywrap input must be %d byte blocks`, tokens.KeywrapBlockSize)
+ }
+
+ n := len(cek) / tokens.KeywrapChunkLen
+ r := make([][]byte, n)
+
+ for i := range n {
+ r[i] = make([]byte, tokens.KeywrapChunkLen)
+ copy(r[i], cek[i*tokens.KeywrapChunkLen:])
+ }
+
+ buffer := pool.ByteSlice().GetCapacity(tokens.KeywrapChunkLen * 2)
+ defer pool.ByteSlice().Put(buffer)
+ // the byte slice has the capacity, but len is 0
+ buffer = buffer[:tokens.KeywrapChunkLen*2]
+
+ tBytes := pool.ByteSlice().GetCapacity(tokens.KeywrapChunkLen)
+ defer pool.ByteSlice().Put(tBytes)
+ // the byte slice has the capacity, but len is 0
+ tBytes = tBytes[:tokens.KeywrapChunkLen]
+
+ copy(buffer, keywrapDefaultIV)
+
+ for t := range tokens.KeywrapRounds * n {
+ copy(buffer[tokens.KeywrapChunkLen:], r[t%n])
+
+ kek.Encrypt(buffer, buffer)
+
+ binary.BigEndian.PutUint64(tBytes, uint64(t+1))
+
+ for i := range tokens.KeywrapChunkLen {
+ buffer[i] = buffer[i] ^ tBytes[i]
+ }
+ copy(r[t%n], buffer[tokens.KeywrapChunkLen:])
+ }
+
+ out := make([]byte, (n+1)*tokens.KeywrapChunkLen)
+ copy(out, buffer[:tokens.KeywrapChunkLen])
+ for i := range r {
+ copy(out[(i+1)*tokens.KeywrapBlockSize:], r[i])
+ }
+
+ return out, nil
+}
+
+func Unwrap(block cipher.Block, ciphertxt []byte) ([]byte, error) {
+ if len(ciphertxt)%tokens.KeywrapChunkLen != 0 {
+ return nil, fmt.Errorf(`keyunwrap input must be %d byte blocks`, tokens.KeywrapChunkLen)
+ }
+
+ n := (len(ciphertxt) / tokens.KeywrapChunkLen) - 1
+ r := make([][]byte, n)
+
+ for i := range r {
+ r[i] = make([]byte, tokens.KeywrapChunkLen)
+ copy(r[i], ciphertxt[(i+1)*tokens.KeywrapChunkLen:])
+ }
+
+ buffer := pool.ByteSlice().GetCapacity(tokens.KeywrapChunkLen * 2)
+ defer pool.ByteSlice().Put(buffer)
+ // the byte slice has the capacity, but len is 0
+ buffer = buffer[:tokens.KeywrapChunkLen*2]
+
+ tBytes := pool.ByteSlice().GetCapacity(tokens.KeywrapChunkLen)
+ defer pool.ByteSlice().Put(tBytes)
+ // the byte slice has the capacity, but len is 0
+ tBytes = tBytes[:tokens.KeywrapChunkLen]
+
+ copy(buffer[:tokens.KeywrapChunkLen], ciphertxt[:tokens.KeywrapChunkLen])
+
+ for t := tokens.KeywrapRounds*n - 1; t >= 0; t-- {
+ binary.BigEndian.PutUint64(tBytes, uint64(t+1))
+
+ for i := range tokens.KeywrapChunkLen {
+ buffer[i] = buffer[i] ^ tBytes[i]
+ }
+ copy(buffer[tokens.KeywrapChunkLen:], r[t%n])
+
+ block.Decrypt(buffer, buffer)
+
+ copy(r[t%n], buffer[tokens.KeywrapChunkLen:])
+ }
+
+ if subtle.ConstantTimeCompare(buffer[:tokens.KeywrapChunkLen], keywrapDefaultIV) == 0 {
+ return nil, fmt.Errorf(`key unwrap: failed to unwrap key`)
+ }
+
+ out := make([]byte, n*tokens.KeywrapChunkLen)
+ for i := range r {
+ copy(out[i*tokens.KeywrapChunkLen:], r[i])
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/key_provider.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/key_provider.go
new file mode 100644
index 0000000000..05adc04517
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/key_provider.go
@@ -0,0 +1,163 @@
+package jwe
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jwk"
+)
+
+// KeyProvider is responsible for providing key(s) to encrypt or decrypt a payload.
+// Multiple `jwe.KeyProvider`s can be passed to `jwe.Encrypt()` or `jwe.Decrypt()`
+//
+// `jwe.Encrypt()` can only accept static key providers via `jwe.WithKey()`,
+// while `jwe.Decrypt()` can accept `jwe.WithKey()`, `jwe.WithKeySet()`,
+// and `jwe.WithKeyProvider()`.
+//
+// Understanding how this works is crucial to learn how this package works.
+// Here we will use `jwe.Decrypt()` as an example to show how the `KeyProvider`
+// works.
+//
+// `jwe.Encrypt()` is straightforward: the content encryption key is encrypted
+// using the provided keys, and JWS recipient objects are created for each.
+//
+// `jwe.Decrypt()` is a bit more involved, because there are cases you
+// will want to compute/deduce/guess the keys that you would like to
+// use for decryption.
+//
+// The first thing that `jwe.Decrypt()` needs to do is to collect the
+// KeyProviders from the option list that the user provided (presented in pseudocode):
+//
+// keyProviders := filterKeyProviders(options)
+//
+// Then, remember that a JWE message may contain multiple recipients in the
+// message. For each recipient, we call on the KeyProviders to give us
+// the key(s) to use on this CEK:
+//
+// for r in msg.Recipients {
+// for kp in keyProviders {
+// kp.FetchKeys(ctx, sink, r, msg)
+// ...
+// }
+// }
+//
+// The `sink` argument passed to the KeyProvider is a temporary storage
+// for the keys (either a jwk.Key or a "raw" key). The `KeyProvider`
+// is responsible for sending keys into the `sink`.
+//
+// When called, the `KeyProvider` created by `jwe.WithKey()` sends the same key,
+// `jwe.WithKeySet()` sends keys that matches a particular `kid` and `alg`,
+// and finally `jwe.WithKeyProvider()` allows you to execute arbitrary
+// logic to provide keys. If you are providing a custom `KeyProvider`,
+// you should execute the necessary checks or retrieval of keys, and
+// then send the key(s) to the sink:
+//
+// sink.Key(alg, key)
+//
+// These keys are then retrieved and tried for each recipient, until
+// a match is found:
+//
+// keys := sink.Keys()
+// for key in keys {
+// if decryptJWEKey(recipient.EncryptedKey(), key) {
+// return OK
+// }
+// }
+type KeyProvider interface {
+ FetchKeys(context.Context, KeySink, Recipient, *Message) error
+}
+
+// KeySink is a data storage where `jwe.KeyProvider` objects should
+// send their keys to.
+type KeySink interface {
+ Key(jwa.KeyEncryptionAlgorithm, any)
+}
+
+type algKeyPair struct {
+ alg jwa.KeyAlgorithm
+ key any
+}
+
+type algKeySink struct {
+ mu sync.Mutex
+ list []algKeyPair
+}
+
+func (s *algKeySink) Key(alg jwa.KeyEncryptionAlgorithm, key any) {
+ s.mu.Lock()
+ s.list = append(s.list, algKeyPair{alg, key})
+ s.mu.Unlock()
+}
+
+type staticKeyProvider struct {
+ alg jwa.KeyEncryptionAlgorithm
+ key any
+}
+
+func (kp *staticKeyProvider) FetchKeys(_ context.Context, sink KeySink, _ Recipient, _ *Message) error {
+ sink.Key(kp.alg, kp.key)
+ return nil
+}
+
+type keySetProvider struct {
+ set jwk.Set
+ requireKid bool
+}
+
+func (kp *keySetProvider) selectKey(sink KeySink, key jwk.Key, _ Recipient, _ *Message) error {
+ if usage, ok := key.KeyUsage(); ok {
+ if usage != "" && usage != jwk.ForEncryption.String() {
+ return nil
+ }
+ }
+
+ if v, ok := key.Algorithm(); ok {
+ kalg, ok := jwa.LookupKeyEncryptionAlgorithm(v.String())
+ if !ok {
+ return fmt.Errorf(`invalid key encryption algorithm %s`, v)
+ }
+
+ sink.Key(kalg, key)
+ return nil
+ }
+
+ return nil
+}
+
+func (kp *keySetProvider) FetchKeys(_ context.Context, sink KeySink, r Recipient, msg *Message) error {
+ if kp.requireKid {
+ var key jwk.Key
+
+ wantedKid, ok := r.Headers().KeyID()
+ if !ok || wantedKid == "" {
+ return fmt.Errorf(`failed to find matching key: no key ID ("kid") specified in token but multiple keys available in key set`)
+ }
+ // Otherwise we better be able to look up the key, baby.
+ v, ok := kp.set.LookupKeyID(wantedKid)
+ if !ok {
+ return fmt.Errorf(`failed to find key with key ID %q in key set`, wantedKid)
+ }
+ key = v
+
+ return kp.selectKey(sink, key, r, msg)
+ }
+
+ for i := range kp.set.Len() {
+ key, _ := kp.set.Key(i)
+ if err := kp.selectKey(sink, key, r, msg); err != nil {
+ continue
+ }
+ }
+ return nil
+}
+
+// KeyProviderFunc is a type of KeyProvider that is implemented by
+// a single function. You can use this to create ad-hoc `KeyProvider`
+// instances.
+type KeyProviderFunc func(context.Context, KeySink, Recipient, *Message) error
+
+func (kp KeyProviderFunc) FetchKeys(ctx context.Context, sink KeySink, r Recipient, msg *Message) error {
+ return kp(ctx, sink, r, msg)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/message.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/message.go
new file mode 100644
index 0000000000..13cf3dec83
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/message.go
@@ -0,0 +1,546 @@
+package jwe
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+)
+
+// NewRecipient creates a Recipient object
+func NewRecipient() Recipient {
+ return &stdRecipient{
+ headers: NewHeaders(),
+ }
+}
+
+func (r *stdRecipient) SetHeaders(h Headers) error {
+ r.headers = h
+ return nil
+}
+
+func (r *stdRecipient) SetEncryptedKey(v []byte) error {
+ r.encryptedKey = v
+ return nil
+}
+
+func (r *stdRecipient) Headers() Headers {
+ return r.headers
+}
+
+func (r *stdRecipient) EncryptedKey() []byte {
+ return r.encryptedKey
+}
+
+type recipientMarshalProxy struct {
+ Headers Headers `json:"header"`
+ EncryptedKey string `json:"encrypted_key"`
+}
+
+func (r *stdRecipient) UnmarshalJSON(buf []byte) error {
+ var proxy recipientMarshalProxy
+ proxy.Headers = NewHeaders()
+ if err := json.Unmarshal(buf, &proxy); err != nil {
+ return fmt.Errorf(`failed to unmarshal json into recipient: %w`, err)
+ }
+
+ r.headers = proxy.Headers
+ decoded, err := base64.DecodeString(proxy.EncryptedKey)
+ if err != nil {
+ return fmt.Errorf(`failed to decode "encrypted_key": %w`, err)
+ }
+ r.encryptedKey = decoded
+ return nil
+}
+
+func (r *stdRecipient) MarshalJSON() ([]byte, error) {
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+
+ buf.WriteString(`{"header":`)
+ hdrbuf, err := json.Marshal(r.headers)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to marshal recipient header: %w`, err)
+ }
+ buf.Write(hdrbuf)
+ buf.WriteString(`,"encrypted_key":"`)
+ buf.WriteString(base64.EncodeToString(r.encryptedKey))
+ buf.WriteString(`"}`)
+
+ ret := make([]byte, buf.Len())
+ copy(ret, buf.Bytes())
+ return ret, nil
+}
+
+// NewMessage creates a new message
+func NewMessage() *Message {
+ return &Message{}
+}
+
+func (m *Message) AuthenticatedData() []byte {
+ return m.authenticatedData
+}
+
+func (m *Message) CipherText() []byte {
+ return m.cipherText
+}
+
+func (m *Message) InitializationVector() []byte {
+ return m.initializationVector
+}
+
+func (m *Message) Tag() []byte {
+ return m.tag
+}
+
+func (m *Message) ProtectedHeaders() Headers {
+ return m.protectedHeaders
+}
+
+func (m *Message) Recipients() []Recipient {
+ return m.recipients
+}
+
+func (m *Message) UnprotectedHeaders() Headers {
+ return m.unprotectedHeaders
+}
+
+const (
+ AuthenticatedDataKey = "aad"
+ CipherTextKey = "ciphertext"
+ CountKey = "p2c"
+ InitializationVectorKey = "iv"
+ ProtectedHeadersKey = "protected"
+ RecipientsKey = "recipients"
+ SaltKey = "p2s"
+ TagKey = "tag"
+ UnprotectedHeadersKey = "unprotected"
+ HeadersKey = "header"
+ EncryptedKeyKey = "encrypted_key"
+)
+
+func (m *Message) Set(k string, v any) error {
+ switch k {
+ case AuthenticatedDataKey:
+ buf, ok := v.([]byte)
+ if !ok {
+ return fmt.Errorf(`invalid value %T for %s key`, v, AuthenticatedDataKey)
+ }
+ m.authenticatedData = buf
+ case CipherTextKey:
+ buf, ok := v.([]byte)
+ if !ok {
+ return fmt.Errorf(`invalid value %T for %s key`, v, CipherTextKey)
+ }
+ m.cipherText = buf
+ case InitializationVectorKey:
+ buf, ok := v.([]byte)
+ if !ok {
+ return fmt.Errorf(`invalid value %T for %s key`, v, InitializationVectorKey)
+ }
+ m.initializationVector = buf
+ case ProtectedHeadersKey:
+ cv, ok := v.(Headers)
+ if !ok {
+ return fmt.Errorf(`invalid value %T for %s key`, v, ProtectedHeadersKey)
+ }
+ m.protectedHeaders = cv
+ case RecipientsKey:
+ cv, ok := v.([]Recipient)
+ if !ok {
+ return fmt.Errorf(`invalid value %T for %s key`, v, RecipientsKey)
+ }
+ m.recipients = cv
+ case TagKey:
+ buf, ok := v.([]byte)
+ if !ok {
+ return fmt.Errorf(`invalid value %T for %s key`, v, TagKey)
+ }
+ m.tag = buf
+ case UnprotectedHeadersKey:
+ cv, ok := v.(Headers)
+ if !ok {
+ return fmt.Errorf(`invalid value %T for %s key`, v, UnprotectedHeadersKey)
+ }
+ m.unprotectedHeaders = cv
+ default:
+ if m.unprotectedHeaders == nil {
+ m.unprotectedHeaders = NewHeaders()
+ }
+ return m.unprotectedHeaders.Set(k, v)
+ }
+ return nil
+}
+
+type messageMarshalProxy struct {
+ AuthenticatedData string `json:"aad,omitempty"`
+ CipherText string `json:"ciphertext"`
+ InitializationVector string `json:"iv,omitempty"`
+ ProtectedHeaders json.RawMessage `json:"protected"`
+ Recipients []json.RawMessage `json:"recipients,omitempty"`
+ Tag string `json:"tag,omitempty"`
+ UnprotectedHeaders Headers `json:"unprotected,omitempty"`
+
+ // For flattened structure. Headers is NOT a Headers type,
+ // so that we can detect its presence by checking proxy.Headers != nil
+ Headers json.RawMessage `json:"header,omitempty"`
+ EncryptedKey string `json:"encrypted_key,omitempty"`
+}
+
+type jsonKV struct {
+ Key string
+ Value string
+}
+
+func (m *Message) MarshalJSON() ([]byte, error) {
+ // This is slightly convoluted, but we need to encode the
+ // protected headers, so we do it by hand
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+ enc := json.NewEncoder(buf)
+
+ var fields []jsonKV
+
+ if cipherText := m.CipherText(); len(cipherText) > 0 {
+ buf.Reset()
+ if err := enc.Encode(base64.EncodeToString(cipherText)); err != nil {
+ return nil, fmt.Errorf(`failed to encode %s field: %w`, CipherTextKey, err)
+ }
+ fields = append(fields, jsonKV{
+ Key: CipherTextKey,
+ Value: strings.TrimSpace(buf.String()),
+ })
+ }
+
+ if iv := m.InitializationVector(); len(iv) > 0 {
+ buf.Reset()
+ if err := enc.Encode(base64.EncodeToString(iv)); err != nil {
+ return nil, fmt.Errorf(`failed to encode %s field: %w`, InitializationVectorKey, err)
+ }
+ fields = append(fields, jsonKV{
+ Key: InitializationVectorKey,
+ Value: strings.TrimSpace(buf.String()),
+ })
+ }
+
+ var encodedProtectedHeaders []byte
+ if h := m.ProtectedHeaders(); h != nil {
+ v, err := h.Encode()
+ if err != nil {
+ return nil, fmt.Errorf(`failed to encode protected headers: %w`, err)
+ }
+
+ encodedProtectedHeaders = v
+ if len(encodedProtectedHeaders) <= 2 { // '{}'
+ encodedProtectedHeaders = nil
+ } else {
+ fields = append(fields, jsonKV{
+ Key: ProtectedHeadersKey,
+ Value: fmt.Sprintf("%q", encodedProtectedHeaders),
+ })
+ }
+ }
+
+ if aad := m.AuthenticatedData(); len(aad) > 0 {
+ aad = base64.Encode(aad)
+ if encodedProtectedHeaders != nil {
+ tmp := append(encodedProtectedHeaders, tokens.Period)
+ aad = append(tmp, aad...)
+ }
+
+ buf.Reset()
+ if err := enc.Encode(aad); err != nil {
+ return nil, fmt.Errorf(`failed to encode %s field: %w`, AuthenticatedDataKey, err)
+ }
+ fields = append(fields, jsonKV{
+ Key: AuthenticatedDataKey,
+ Value: strings.TrimSpace(buf.String()),
+ })
+ }
+
+ if recipients := m.Recipients(); len(recipients) > 0 {
+ if len(recipients) == 1 { // Use flattened format
+ if hdrs := recipients[0].Headers(); hdrs != nil {
+ buf.Reset()
+ if err := enc.Encode(hdrs); err != nil {
+ return nil, fmt.Errorf(`failed to encode %s field: %w`, HeadersKey, err)
+ }
+ fields = append(fields, jsonKV{
+ Key: HeadersKey,
+ Value: strings.TrimSpace(buf.String()),
+ })
+ }
+
+ if ek := recipients[0].EncryptedKey(); len(ek) > 0 {
+ buf.Reset()
+ if err := enc.Encode(base64.EncodeToString(ek)); err != nil {
+ return nil, fmt.Errorf(`failed to encode %s field: %w`, EncryptedKeyKey, err)
+ }
+ fields = append(fields, jsonKV{
+ Key: EncryptedKeyKey,
+ Value: strings.TrimSpace(buf.String()),
+ })
+ }
+ } else {
+ buf.Reset()
+ if err := enc.Encode(recipients); err != nil {
+ return nil, fmt.Errorf(`failed to encode %s field: %w`, RecipientsKey, err)
+ }
+ fields = append(fields, jsonKV{
+ Key: RecipientsKey,
+ Value: strings.TrimSpace(buf.String()),
+ })
+ }
+ }
+
+ if tag := m.Tag(); len(tag) > 0 {
+ buf.Reset()
+ if err := enc.Encode(base64.EncodeToString(tag)); err != nil {
+ return nil, fmt.Errorf(`failed to encode %s field: %w`, TagKey, err)
+ }
+ fields = append(fields, jsonKV{
+ Key: TagKey,
+ Value: strings.TrimSpace(buf.String()),
+ })
+ }
+
+ if h := m.UnprotectedHeaders(); h != nil {
+ unprotected, err := json.Marshal(h)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to encode unprotected headers: %w`, err)
+ }
+
+ if len(unprotected) > 2 {
+ fields = append(fields, jsonKV{
+ Key: UnprotectedHeadersKey,
+ Value: fmt.Sprintf("%q", unprotected),
+ })
+ }
+ }
+
+ sort.Slice(fields, func(i, j int) bool {
+ return fields[i].Key < fields[j].Key
+ })
+ buf.Reset()
+ fmt.Fprintf(buf, `{`)
+ for i, kv := range fields {
+ if i > 0 {
+ fmt.Fprintf(buf, `,`)
+ }
+ fmt.Fprintf(buf, `%q:%s`, kv.Key, kv.Value)
+ }
+ fmt.Fprintf(buf, `}`)
+
+ ret := make([]byte, buf.Len())
+ copy(ret, buf.Bytes())
+ return ret, nil
+}
+
+func (m *Message) UnmarshalJSON(buf []byte) error {
+ var proxy messageMarshalProxy
+ proxy.UnprotectedHeaders = NewHeaders()
+
+ if err := json.Unmarshal(buf, &proxy); err != nil {
+ return fmt.Errorf(`failed to unmashal JSON into message: %w`, err)
+ }
+
+ // Get the string value
+ var protectedHeadersStr string
+ if err := json.Unmarshal(proxy.ProtectedHeaders, &protectedHeadersStr); err != nil {
+ return fmt.Errorf(`failed to decode protected headers (1): %w`, err)
+ }
+
+ // It's now in _quoted_ base64 string. Decode it
+ protectedHeadersRaw, err := base64.DecodeString(protectedHeadersStr)
+ if err != nil {
+ return fmt.Errorf(`failed to base64 decoded protected headers buffer: %w`, err)
+ }
+
+ h := NewHeaders()
+ if err := json.Unmarshal(protectedHeadersRaw, h); err != nil {
+ return fmt.Errorf(`failed to decode protected headers (2): %w`, err)
+ }
+
+ // if this were a flattened message, we would see a "header" and "ciphertext"
+ // field. TODO: do both of these conditions need to meet, or just one?
+ if proxy.Headers != nil || len(proxy.EncryptedKey) > 0 {
+ recipient := NewRecipient()
+ hdrs := NewHeaders()
+ if err := json.Unmarshal(proxy.Headers, hdrs); err != nil {
+ return fmt.Errorf(`failed to decode headers field: %w`, err)
+ }
+
+ if err := recipient.SetHeaders(hdrs); err != nil {
+ return fmt.Errorf(`failed to set new headers: %w`, err)
+ }
+
+ if v := proxy.EncryptedKey; len(v) > 0 {
+ buf, err := base64.DecodeString(v)
+ if err != nil {
+ return fmt.Errorf(`failed to decode encrypted key: %w`, err)
+ }
+ if err := recipient.SetEncryptedKey(buf); err != nil {
+ return fmt.Errorf(`failed to set encrypted key: %w`, err)
+ }
+ }
+
+ m.recipients = append(m.recipients, recipient)
+ } else {
+ for i, recipientbuf := range proxy.Recipients {
+ recipient := NewRecipient()
+ if err := json.Unmarshal(recipientbuf, recipient); err != nil {
+ return fmt.Errorf(`failed to decode recipient at index %d: %w`, i, err)
+ }
+
+ m.recipients = append(m.recipients, recipient)
+ }
+ }
+
+ if src := proxy.AuthenticatedData; len(src) > 0 {
+ v, err := base64.DecodeString(src)
+ if err != nil {
+ return fmt.Errorf(`failed to decode "aad": %w`, err)
+ }
+ m.authenticatedData = v
+ }
+
+ if src := proxy.CipherText; len(src) > 0 {
+ v, err := base64.DecodeString(src)
+ if err != nil {
+ return fmt.Errorf(`failed to decode "ciphertext": %w`, err)
+ }
+ m.cipherText = v
+ }
+
+ if src := proxy.InitializationVector; len(src) > 0 {
+ v, err := base64.DecodeString(src)
+ if err != nil {
+ return fmt.Errorf(`failed to decode "iv": %w`, err)
+ }
+ m.initializationVector = v
+ }
+
+ if src := proxy.Tag; len(src) > 0 {
+ v, err := base64.DecodeString(src)
+ if err != nil {
+ return fmt.Errorf(`failed to decode "tag": %w`, err)
+ }
+ m.tag = v
+ }
+
+ m.protectedHeaders = h
+ if m.storeProtectedHeaders {
+ // this is later used for decryption
+ m.rawProtectedHeaders = base64.Encode(protectedHeadersRaw)
+ }
+
+ if iz, ok := proxy.UnprotectedHeaders.(isZeroer); ok {
+ if !iz.isZero() {
+ m.unprotectedHeaders = proxy.UnprotectedHeaders
+ }
+ }
+
+ if len(m.recipients) == 0 {
+ if err := m.makeDummyRecipient(proxy.EncryptedKey, m.protectedHeaders); err != nil {
+ return fmt.Errorf(`failed to setup recipient: %w`, err)
+ }
+ }
+
+ return nil
+}
+
+func (m *Message) makeDummyRecipient(enckeybuf string, protected Headers) error {
+ // Recipients in this case should not contain the content encryption key,
+ // so move that out
+ hdrs, err := protected.Clone()
+ if err != nil {
+ return fmt.Errorf(`failed to clone headers: %w`, err)
+ }
+
+ if err := hdrs.Remove(ContentEncryptionKey); err != nil {
+ return fmt.Errorf(`failed to remove %#v from public header: %w`, ContentEncryptionKey, err)
+ }
+
+ enckey, err := base64.DecodeString(enckeybuf)
+ if err != nil {
+ return fmt.Errorf(`failed to decode encrypted key: %w`, err)
+ }
+
+ if err := m.Set(RecipientsKey, []Recipient{
+ &stdRecipient{
+ headers: hdrs,
+ encryptedKey: enckey,
+ },
+ }); err != nil {
+ return fmt.Errorf(`failed to set %s: %w`, RecipientsKey, err)
+ }
+ return nil
+}
+
+// Compact generates a JWE message in compact serialization format from a
+// `*jwe.Message` object. The object contain exactly one recipient, or
+// an error is returned.
+//
+// This function currently does not take any options, but the function
+// signature contains `options` for possible future expansion of the API
+func Compact(m *Message, _ ...CompactOption) ([]byte, error) {
+ if len(m.recipients) != 1 {
+ return nil, fmt.Errorf(`wrong number of recipients for compact serialization`)
+ }
+
+ recipient := m.recipients[0]
+
+ // The protected header must be a merge between the message-wide
+ // protected header AND the recipient header
+
+ // There's something wrong if m.protectedHeaders is nil, but
+ // it could happen
+ if m.protectedHeaders == nil {
+ return nil, fmt.Errorf(`invalid protected header`)
+ }
+
+ hcopy, err := m.protectedHeaders.Clone()
+ if err != nil {
+ return nil, fmt.Errorf(`failed to copy protected header: %w`, err)
+ }
+ hcopy, err = hcopy.Merge(m.unprotectedHeaders)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to merge unprotected header: %w`, err)
+ }
+ hcopy, err = hcopy.Merge(recipient.Headers())
+ if err != nil {
+ return nil, fmt.Errorf(`failed to merge recipient header: %w`, err)
+ }
+
+ protected, err := hcopy.Encode()
+ if err != nil {
+ return nil, fmt.Errorf(`failed to encode header: %w`, err)
+ }
+
+ encryptedKey := base64.Encode(recipient.EncryptedKey())
+ iv := base64.Encode(m.initializationVector)
+ cipher := base64.Encode(m.cipherText)
+ tag := base64.Encode(m.tag)
+
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+
+ buf.Grow(len(protected) + len(encryptedKey) + len(iv) + len(cipher) + len(tag) + 4)
+ buf.Write(protected)
+ buf.WriteByte(tokens.Period)
+ buf.Write(encryptedKey)
+ buf.WriteByte(tokens.Period)
+ buf.Write(iv)
+ buf.WriteByte(tokens.Period)
+ buf.Write(cipher)
+ buf.WriteByte(tokens.Period)
+ buf.Write(tag)
+
+ result := make([]byte, buf.Len())
+ copy(result, buf.Bytes())
+ return result, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.go
new file mode 100644
index 0000000000..c9137eecf4
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.go
@@ -0,0 +1,108 @@
+package jwe
+
+import (
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jwk"
+ "github.com/lestrrat-go/option/v2"
+)
+
+// Specify contents of the protected header. Some fields such as
+// "enc" and "zip" will be overwritten when encryption is performed.
+//
+// There is no equivalent for unprotected headers in this implementation
+func WithProtectedHeaders(h Headers) EncryptOption {
+ cloned, _ := h.Clone()
+ return &encryptOption{option.New(identProtectedHeaders{}, cloned)}
+}
+
+type withKey struct {
+ alg jwa.KeyAlgorithm
+ key any
+ headers Headers
+}
+
+type WithKeySuboption interface {
+ Option
+ withKeySuboption()
+}
+
+type withKeySuboption struct {
+ Option
+}
+
+func (*withKeySuboption) withKeySuboption() {}
+
+// WithPerRecipientHeaders is used to pass header values for each recipient.
+// Note that these headers are by definition _unprotected_.
+func WithPerRecipientHeaders(hdr Headers) WithKeySuboption {
+ return &withKeySuboption{option.New(identPerRecipientHeaders{}, hdr)}
+}
+
+// WithKey is used to pass a static algorithm/key pair to either `jwe.Encrypt()` or `jwe.Decrypt()`.
+// either a raw key or `jwk.Key` may be passed as `key`.
+//
+// The `alg` parameter is the identifier for the key encryption algorithm that should be used.
+// It is of type `jwa.KeyAlgorithm` but in reality you can only pass `jwa.KeyEncryptionAlgorithm`
+// types. It is this way so that the value in `(jwk.Key).Algorithm()` can be directly
+// passed to the option. If you specify other algorithm types such as `jwa.SignatureAlgorithm`,
+// then you will get an error when `jwe.Encrypt()` or `jwe.Decrypt()` is executed.
+//
+// Unlike `jwe.WithKeySet()`, the `kid` field does not need to match for the key
+// to be tried.
+func WithKey(alg jwa.KeyAlgorithm, key any, options ...WithKeySuboption) EncryptDecryptOption {
+ var hdr Headers
+ for _, option := range options {
+ switch option.Ident() {
+ case identPerRecipientHeaders{}:
+ if err := option.Value(&hdr); err != nil {
+ panic(`jwe.WithKey() requires Headers value for WithPerRecipientHeaders option`)
+ }
+ }
+ }
+
+ return &encryptDecryptOption{option.New(identKey{}, &withKey{
+ alg: alg,
+ key: key,
+ headers: hdr,
+ })}
+}
+
+func WithKeySet(set jwk.Set, options ...WithKeySetSuboption) DecryptOption {
+ requireKid := true
+ for _, option := range options {
+ switch option.Ident() {
+ case identRequireKid{}:
+ if err := option.Value(&requireKid); err != nil {
+ panic(`jwe.WithKeySet() requires bool value for WithRequireKid option`)
+ }
+ }
+ }
+
+ return WithKeyProvider(&keySetProvider{
+ set: set,
+ requireKid: requireKid,
+ })
+}
+
+// WithJSON specifies that the result of `jwe.Encrypt()` is serialized in
+// JSON format.
+//
+// If you pass multiple keys to `jwe.Encrypt()`, it will fail unless
+// you also pass this option.
+func WithJSON(options ...WithJSONSuboption) EncryptOption {
+ var pretty bool
+ for _, option := range options {
+ switch option.Ident() {
+ case identPretty{}:
+ if err := option.Value(&pretty); err != nil {
+ panic(`jwe.WithJSON() requires bool value for WithPretty option`)
+ }
+ }
+ }
+
+ format := fmtJSON
+ if pretty {
+ format = fmtJSONPretty
+ }
+ return &encryptOption{option.New(identSerialization{}, format)}
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.yaml b/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.yaml
new file mode 100644
index 0000000000..b7fb0262de
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.yaml
@@ -0,0 +1,172 @@
+package_name: jwe
+output: jwe/options_gen.go
+interfaces:
+ - name: GlobalOption
+ comment: |
+ GlobalOption describes options that changes global settings for this package
+ - name: GlobalDecryptOption
+ comment: |
+ GlobalDecryptOption describes options that changes global settings and for each call of the `jwe.Decrypt` function
+ methods:
+ - globalOption
+ - decryptOption
+ - name: CompactOption
+ comment: |
+ CompactOption describes options that can be passed to `jwe.Compact`
+ - name: DecryptOption
+ comment: |
+ DecryptOption describes options that can be passed to `jwe.Decrypt`
+ - name: EncryptOption
+ comment: |
+ EncryptOption describes options that can be passed to `jwe.Encrypt`
+ - name: EncryptDecryptOption
+ methods:
+ - encryptOption
+ - decryptOption
+ comment: |
+ EncryptDecryptOption describes options that can be passed to either `jwe.Encrypt` or `jwe.Decrypt`
+ - name: WithJSONSuboption
+ concrete_type: withJSONSuboption
+ comment: |
+ JSONSuboption describes suboptions that can be passed to `jwe.WithJSON()` option
+ - name: WithKeySetSuboption
+ comment: |
+ WithKeySetSuboption is a suboption passed to the WithKeySet() option
+ - name: ParseOption
+ methods:
+ - readFileOption
+ comment: |
+ ReadFileOption is a type of `Option` that can be passed to `jwe.Parse`
+ - name: ReadFileOption
+ comment: |
+ ReadFileOption is a type of `Option` that can be passed to `jwe.ReadFile`
+options:
+ - ident: Key
+ skip_option: true
+ - ident: Pretty
+ skip_option: true
+ - ident: ProtectedHeaders
+ skip_option: true
+ - ident: PerRecipientHeaders
+ skip_option: true
+ - ident: KeyProvider
+ interface: DecryptOption
+ argument_type: KeyProvider
+ - ident: Context
+ interface: DecryptOption
+ argument_type: context.Context
+ comment: |
+ WithContext specifies the context.Context object to use when decrypting a JWE message.
+ If not provided, context.Background() will be used.
+ - ident: Serialization
+ option_name: WithCompact
+ interface: EncryptOption
+ constant_value: fmtCompact
+ comment: |
+ WithCompact specifies that the result of `jwe.Encrypt()` is serialized in
+ compact format.
+
+ By default `jwe.Encrypt()` will opt to use compact format, so you usually
+ do not need to specify this option other than to be explicit about it
+ - ident: Compress
+ interface: EncryptOption
+ argument_type: jwa.CompressionAlgorithm
+ comment: |
+ WithCompress specifies the compression algorithm to use when encrypting
+ a payload using `jwe.Encrypt` (Yes, we know it can only be "" or "DEF",
+ but the way the specification is written it could allow for more options,
+ and therefore this option takes an argument)
+ - ident: ContentEncryptionAlgorithm
+ interface: EncryptOption
+ option_name: WithContentEncryption
+ argument_type: jwa.ContentEncryptionAlgorithm
+ comment: |
+ WithContentEncryptionAlgorithm specifies the algorithm to encrypt the
+ JWE message content with. If not provided, `jwa.A256GCM` is used.
+ - ident: Message
+ interface: DecryptOption
+ argument_type: '*Message'
+ comment: |
+ WithMessage provides a message object to be populated by `jwe.Decrypt`
+ Using this option allows you to decrypt AND obtain the `jwe.Message`
+ in one go.
+ - ident: RequireKid
+ interface: WithKeySetSuboption
+ argument_type: bool
+ comment: |
+ WithRequiredKid specifies whether the keys in the jwk.Set should
+ only be matched if the target JWE message's Key ID and the Key ID
+ in the given key matches.
+ - ident: Pretty
+ interface: WithJSONSuboption
+ argument_type: bool
+ comment: |
+ WithPretty specifies whether the JSON output should be formatted and
+ indented
+ - ident: MergeProtectedHeaders
+ interface: EncryptOption
+ argument_type: bool
+ comment: |
+ WithMergeProtectedHeaders specify that when given multiple headers
+ as options to `jwe.Encrypt`, these headers should be merged instead
+ of overwritten
+ - ident: FS
+ interface: ReadFileOption
+ argument_type: fs.FS
+ comment: |
+ WithFS specifies the source `fs.FS` object to read the file from.
+ - ident: KeyUsed
+ interface: DecryptOption
+ argument_type: 'any'
+ comment: |
+ WithKeyUsed allows you to specify the `jwe.Decrypt()` function to
+ return the key used for decryption. This may be useful when
+ you specify multiple key sources or if you pass a `jwk.Set`
+ and you want to know which key was successful at decrypting the
+ CEK.
+
+ `v` must be a pointer to an empty `any`. Do not use
+ `jwk.Key` here unless you are 100% sure that all keys that you
+ have provided are instances of `jwk.Key` (remember that the
+ jwx API allows users to specify a raw key such as *rsa.PublicKey)
+ - ident: CEK
+ interface: DecryptOption
+ argument_type: '*[]byte'
+ comment: |
+ WithCEK allows users to specify a variable to store the CEK used in the
+ message upon successful decryption. The variable must be a pointer to
+ a byte slice, and it will only be populated if the decryption is successful.
+
+ This option is currently considered EXPERIMENTAL, and is subject to
+ future changes across minor/micro versions.
+ - ident: MaxPBES2Count
+ interface: GlobalOption
+ argument_type: int
+ comment: |
+ WithMaxPBES2Count specifies the maximum number of PBES2 iterations
+ to use when decrypting a message. If not specified, the default
+ value of 10,000 is used.
+
+ This option has a global effect.
+ - ident: MaxDecompressBufferSize
+ interface: GlobalDecryptOption
+ argument_type: int64
+ comment: |
+ WithMaxDecompressBufferSize specifies the maximum buffer size for used when
+ decompressing the payload of a JWE message. If a compressed JWE payload
+ exceeds this amount when decompressed, jwe.Decrypt will return an error.
+ The default value is 10MB.
+
+ This option can be used for `jwe.Settings()`, which changes the behavior
+ globally, or for `jwe.Decrypt()`, which changes the behavior for that
+ specific call.
+ - ident: CBCBufferSize
+ interface: GlobalOption
+ argument_type: int64
+ comment: |
+ WithCBCBufferSize specifies the maximum buffer size for internal
+ calculations, such as when AES-CBC is performed. The default value is 256MB.
+ If set to an invalid value, the default value is used.
+ In v2, this option was called MaxBufferSize.
+
+ This option has a global effect.
\ No newline at end of file
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/options_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/options_gen.go
new file mode 100644
index 0000000000..2a15c141b4
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/options_gen.go
@@ -0,0 +1,350 @@
+// Code generated by tools/cmd/genoptions/main.go. DO NOT EDIT.
+
+package jwe
+
+import (
+ "context"
+ "io/fs"
+
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/option/v2"
+)
+
+type Option = option.Interface
+
+// CompactOption describes options that can be passed to `jwe.Compact`
+type CompactOption interface {
+ Option
+ compactOption()
+}
+
+type compactOption struct {
+ Option
+}
+
+func (*compactOption) compactOption() {}
+
+// DecryptOption describes options that can be passed to `jwe.Decrypt`
+type DecryptOption interface {
+ Option
+ decryptOption()
+}
+
+type decryptOption struct {
+ Option
+}
+
+func (*decryptOption) decryptOption() {}
+
+// EncryptDecryptOption describes options that can be passed to either `jwe.Encrypt` or `jwe.Decrypt`
+type EncryptDecryptOption interface {
+ Option
+ encryptOption()
+ decryptOption()
+}
+
+type encryptDecryptOption struct {
+ Option
+}
+
+func (*encryptDecryptOption) encryptOption() {}
+
+func (*encryptDecryptOption) decryptOption() {}
+
+// EncryptOption describes options that can be passed to `jwe.Encrypt`
+type EncryptOption interface {
+ Option
+ encryptOption()
+}
+
+type encryptOption struct {
+ Option
+}
+
+func (*encryptOption) encryptOption() {}
+
+// GlobalDecryptOption describes options that changes global settings and for each call of the `jwe.Decrypt` function
+type GlobalDecryptOption interface {
+ Option
+ globalOption()
+ decryptOption()
+}
+
+type globalDecryptOption struct {
+ Option
+}
+
+func (*globalDecryptOption) globalOption() {}
+
+func (*globalDecryptOption) decryptOption() {}
+
+// GlobalOption describes options that changes global settings for this package
+type GlobalOption interface {
+ Option
+ globalOption()
+}
+
+type globalOption struct {
+ Option
+}
+
+func (*globalOption) globalOption() {}
+
+// ReadFileOption is a type of `Option` that can be passed to `jwe.Parse`
+type ParseOption interface {
+ Option
+ readFileOption()
+}
+
+type parseOption struct {
+ Option
+}
+
+func (*parseOption) readFileOption() {}
+
+// ReadFileOption is a type of `Option` that can be passed to `jwe.ReadFile`
+type ReadFileOption interface {
+ Option
+ readFileOption()
+}
+
+type readFileOption struct {
+ Option
+}
+
+func (*readFileOption) readFileOption() {}
+
+// JSONSuboption describes suboptions that can be passed to `jwe.WithJSON()` option
+type WithJSONSuboption interface {
+ Option
+ withJSONSuboption()
+}
+
+type withJSONSuboption struct {
+ Option
+}
+
+func (*withJSONSuboption) withJSONSuboption() {}
+
+// WithKeySetSuboption is a suboption passed to the WithKeySet() option
+type WithKeySetSuboption interface {
+ Option
+ withKeySetSuboption()
+}
+
+type withKeySetSuboption struct {
+ Option
+}
+
+func (*withKeySetSuboption) withKeySetSuboption() {}
+
+type identCBCBufferSize struct{}
+type identCEK struct{}
+type identCompress struct{}
+type identContentEncryptionAlgorithm struct{}
+type identContext struct{}
+type identFS struct{}
+type identKey struct{}
+type identKeyProvider struct{}
+type identKeyUsed struct{}
+type identMaxDecompressBufferSize struct{}
+type identMaxPBES2Count struct{}
+type identMergeProtectedHeaders struct{}
+type identMessage struct{}
+type identPerRecipientHeaders struct{}
+type identPretty struct{}
+type identProtectedHeaders struct{}
+type identRequireKid struct{}
+type identSerialization struct{}
+
+func (identCBCBufferSize) String() string {
+ return "WithCBCBufferSize"
+}
+
+func (identCEK) String() string {
+ return "WithCEK"
+}
+
+func (identCompress) String() string {
+ return "WithCompress"
+}
+
+func (identContentEncryptionAlgorithm) String() string {
+ return "WithContentEncryption"
+}
+
+func (identContext) String() string {
+ return "WithContext"
+}
+
+func (identFS) String() string {
+ return "WithFS"
+}
+
+func (identKey) String() string {
+ return "WithKey"
+}
+
+func (identKeyProvider) String() string {
+ return "WithKeyProvider"
+}
+
+func (identKeyUsed) String() string {
+ return "WithKeyUsed"
+}
+
+func (identMaxDecompressBufferSize) String() string {
+ return "WithMaxDecompressBufferSize"
+}
+
+func (identMaxPBES2Count) String() string {
+ return "WithMaxPBES2Count"
+}
+
+func (identMergeProtectedHeaders) String() string {
+ return "WithMergeProtectedHeaders"
+}
+
+func (identMessage) String() string {
+ return "WithMessage"
+}
+
+func (identPerRecipientHeaders) String() string {
+ return "WithPerRecipientHeaders"
+}
+
+func (identPretty) String() string {
+ return "WithPretty"
+}
+
+func (identProtectedHeaders) String() string {
+ return "WithProtectedHeaders"
+}
+
+func (identRequireKid) String() string {
+ return "WithRequireKid"
+}
+
+func (identSerialization) String() string {
+ return "WithSerialization"
+}
+
+// WithCBCBufferSize specifies the maximum buffer size for internal
+// calculations, such as when AES-CBC is performed. The default value is 256MB.
+// If set to an invalid value, the default value is used.
+// In v2, this option was called MaxBufferSize.
+//
+// This option has a global effect.
+func WithCBCBufferSize(v int64) GlobalOption {
+ return &globalOption{option.New(identCBCBufferSize{}, v)}
+}
+
+// WithCEK allows users to specify a variable to store the CEK used in the
+// message upon successful decryption. The variable must be a pointer to
+// a byte slice, and it will only be populated if the decryption is successful.
+//
+// This option is currently considered EXPERIMENTAL, and is subject to
+// future changes across minor/micro versions.
+func WithCEK(v *[]byte) DecryptOption {
+ return &decryptOption{option.New(identCEK{}, v)}
+}
+
+// WithCompress specifies the compression algorithm to use when encrypting
+// a payload using `jwe.Encrypt` (Yes, we know it can only be "" or "DEF",
+// but the way the specification is written it could allow for more options,
+// and therefore this option takes an argument)
+func WithCompress(v jwa.CompressionAlgorithm) EncryptOption {
+ return &encryptOption{option.New(identCompress{}, v)}
+}
+
+// WithContentEncryptionAlgorithm specifies the algorithm to encrypt the
+// JWE message content with. If not provided, `jwa.A256GCM` is used.
+func WithContentEncryption(v jwa.ContentEncryptionAlgorithm) EncryptOption {
+ return &encryptOption{option.New(identContentEncryptionAlgorithm{}, v)}
+}
+
+// WithContext specifies the context.Context object to use when decrypting a JWE message.
+// If not provided, context.Background() will be used.
+func WithContext(v context.Context) DecryptOption {
+ return &decryptOption{option.New(identContext{}, v)}
+}
+
+// WithFS specifies the source `fs.FS` object to read the file from.
+func WithFS(v fs.FS) ReadFileOption {
+ return &readFileOption{option.New(identFS{}, v)}
+}
+
+func WithKeyProvider(v KeyProvider) DecryptOption {
+ return &decryptOption{option.New(identKeyProvider{}, v)}
+}
+
+// WithKeyUsed allows you to specify the `jwe.Decrypt()` function to
+// return the key used for decryption. This may be useful when
+// you specify multiple key sources or if you pass a `jwk.Set`
+// and you want to know which key was successful at decrypting the
+// CEK.
+//
+// `v` must be a pointer to an empty `any`. Do not use
+// `jwk.Key` here unless you are 100% sure that all keys that you
+// have provided are instances of `jwk.Key` (remember that the
+// jwx API allows users to specify a raw key such as *rsa.PublicKey)
+func WithKeyUsed(v any) DecryptOption {
+ return &decryptOption{option.New(identKeyUsed{}, v)}
+}
+
+// WithMaxDecompressBufferSize specifies the maximum buffer size for used when
+// decompressing the payload of a JWE message. If a compressed JWE payload
+// exceeds this amount when decompressed, jwe.Decrypt will return an error.
+// The default value is 10MB.
+//
+// This option can be used for `jwe.Settings()`, which changes the behavior
+// globally, or for `jwe.Decrypt()`, which changes the behavior for that
+// specific call.
+func WithMaxDecompressBufferSize(v int64) GlobalDecryptOption {
+ return &globalDecryptOption{option.New(identMaxDecompressBufferSize{}, v)}
+}
+
+// WithMaxPBES2Count specifies the maximum number of PBES2 iterations
+// to use when decrypting a message. If not specified, the default
+// value of 10,000 is used.
+//
+// This option has a global effect.
+func WithMaxPBES2Count(v int) GlobalOption {
+ return &globalOption{option.New(identMaxPBES2Count{}, v)}
+}
+
+// WithMergeProtectedHeaders specify that when given multiple headers
+// as options to `jwe.Encrypt`, these headers should be merged instead
+// of overwritten
+func WithMergeProtectedHeaders(v bool) EncryptOption {
+ return &encryptOption{option.New(identMergeProtectedHeaders{}, v)}
+}
+
+// WithMessage provides a message object to be populated by `jwe.Decrypt`
+// Using this option allows you to decrypt AND obtain the `jwe.Message`
+// in one go.
+func WithMessage(v *Message) DecryptOption {
+ return &decryptOption{option.New(identMessage{}, v)}
+}
+
+// WithPretty specifies whether the JSON output should be formatted and
+// indented
+func WithPretty(v bool) WithJSONSuboption {
+ return &withJSONSuboption{option.New(identPretty{}, v)}
+}
+
+// WithRequiredKid specifies whether the keys in the jwk.Set should
+// only be matched if the target JWE message's Key ID and the Key ID
+// in the given key matches.
+func WithRequireKid(v bool) WithKeySetSuboption {
+ return &withKeySetSuboption{option.New(identRequireKid{}, v)}
+}
+
+// WithCompact specifies that the result of `jwe.Encrypt()` is serialized in
+// compact format.
+//
+// By default `jwe.Encrypt()` will opt to use compact format, so you usually
+// do not need to specify this option other than to be explicit about it
+func WithCompact() EncryptOption {
+ return &encryptOption{option.New(identSerialization{}, fmtCompact)}
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwk/BUILD.bazel
new file mode 100644
index 0000000000..8e82e1f009
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/BUILD.bazel
@@ -0,0 +1,87 @@
+load("@rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "jwk",
+ srcs = [
+ "cache.go",
+ "convert.go",
+ "ecdsa.go",
+ "ecdsa_gen.go",
+ "errors.go",
+ "fetch.go",
+ "filter.go",
+ "interface.go",
+ "interface_gen.go",
+ "io.go",
+ "jwk.go",
+ "key_ops.go",
+ "okp.go",
+ "okp_gen.go",
+ "options.go",
+ "options_gen.go",
+ "parser.go",
+ "rsa.go",
+ "rsa_gen.go",
+ "set.go",
+ "symmetric.go",
+ "symmetric_gen.go",
+ "usage.go",
+ "whitelist.go",
+ "x509.go",
+ ],
+ importpath = "github.com/lestrrat-go/jwx/v3/jwk",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//cert",
+ "//internal/base64",
+ "//internal/ecutil",
+ "//transform",
+ "//internal/json",
+ "//internal/pool",
+ "//internal/tokens",
+ "//jwa",
+ "//jwk/ecdsa",
+ "//jwk/jwkbb",
+ "@com_github_lestrrat_go_blackmagic//:blackmagic",
+ "@com_github_lestrrat_go_httprc_v3//:httprc",
+ "@com_github_lestrrat_go_option_v2//:option",
+ ],
+)
+
+go_test(
+ name = "jwk_test",
+ srcs = [
+ "filter_test.go",
+ "headers_test.go",
+ "jwk_internal_test.go",
+ "jwk_test.go",
+ "options_gen_test.go",
+ "refresh_test.go",
+ "set_test.go",
+ "x5c_test.go",
+ ],
+ data = glob(["testdata/**"]),
+ embed = [":jwk"],
+ deps = [
+ "//cert",
+ "//internal/base64",
+ "//internal/jose",
+ "//internal/json",
+ "//internal/jwxtest",
+ "//internal/tokens",
+ "//jwa",
+ "//jwk/ecdsa",
+ "//jws",
+ "@com_github_lestrrat_go_blackmagic//:blackmagic",
+ "@com_github_lestrrat_go_httprc_v3//:httprc",
+ "@com_github_lestrrat_go_httprc_v3//tracesink",
+ "@com_github_stretchr_testify//assert",
+ "@com_github_stretchr_testify//require",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":jwk",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/README.md b/vendor/github.com/lestrrat-go/jwx/v3/jwk/README.md
new file mode 100644
index 0000000000..741dd4647d
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/README.md
@@ -0,0 +1,215 @@
+# JWK [](https://pkg.go.dev/github.com/lestrrat-go/jwx/v3/jwk)
+
+Package jwk implements JWK as described in [RFC7517](https://tools.ietf.org/html/rfc7517).
+If you are looking to use JWT wit JWKs, look no further than [github.com/lestrrat-go/jwx](../jwt).
+
+* Parse and work with RSA/EC/Symmetric/OKP JWK types
+ * Convert to and from JSON
+ * Convert to and from raw key types (e.g. *rsa.PrivateKey)
+* Ability to keep a JWKS fresh using *jwk.AutoRefresh
+
+## Supported key types:
+
+| kty | Curve | Go Key Type |
+|:----|:------------------------|:----------------------------------------------|
+| RSA | N/A | rsa.PrivateKey / rsa.PublicKey (2) |
+| EC | P-256
P-384
P-521
secp256k1 (1) | ecdsa.PrivateKey / ecdsa.PublicKey (2) |
+| oct | N/A | []byte |
+| OKP | Ed25519 (1) | ed25519.PrivateKey / ed25519.PublicKey (2) |
+| | X25519 (1) | (jwx/)x25519.PrivateKey / x25519.PublicKey (2)|
+
+* Note 1: Experimental
+* Note 2: Either value or pointers accepted (e.g. rsa.PrivateKey or *rsa.PrivateKey)
+
+# Documentation
+
+Please read the [API reference](https://pkg.go.dev/github.com/lestrrat-go/jwx/v3/jwk), or
+the how-to style documentation on how to use JWK can be found in the [docs directory](../docs/04-jwk.md).
+
+# Auto-Refresh a key during a long-running process
+
+
+```go
+package examples_test
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/lestrrat-go/httprc/v3"
+ "github.com/lestrrat-go/jwx/v3/jwk"
+)
+
+func Example_jwk_cache() {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ const googleCerts = `https://www.googleapis.com/oauth2/v3/certs`
+
+ // First, set up the `jwk.Cache` object. You need to pass it a
+ // `context.Context` object to control the lifecycle of the background fetching goroutine.
+ c, err := jwk.NewCache(ctx, httprc.NewClient())
+ if err != nil {
+ fmt.Printf("failed to create cache: %s\n", err)
+ return
+ }
+
+ // Tell *jwk.Cache that we only want to refresh this JWKS periodically.
+ if err := c.Register(ctx, googleCerts); err != nil {
+ fmt.Printf("failed to register google JWKS: %s\n", err)
+ return
+ }
+
+ // Pretend that this is your program's main loop
+MAIN:
+ for {
+ select {
+ case <-ctx.Done():
+ break MAIN
+ default:
+ }
+ keyset, err := c.Lookup(ctx, googleCerts)
+ if err != nil {
+ fmt.Printf("failed to fetch google JWKS: %s\n", err)
+ return
+ }
+ _ = keyset
+ // The returned `keyset` will always be "reasonably" new.
+ //
+ // By "reasonably" we mean that we cannot guarantee that the keys will be refreshed
+ // immediately after it has been rotated in the remote source. But it should be close\
+ // enough, and should you need to forcefully refresh the token using the `(jwk.Cache).Refresh()` method.
+ //
+ // If refetching the keyset fails, a cached version will be returned from the previous
+ // successful sync
+
+ // Do interesting stuff with the keyset... but here, we just
+ // sleep for a bit
+ time.Sleep(time.Second)
+
+ // Because we're a dummy program, we just cancel the loop now.
+ // If this were a real program, you presumably loop forever
+ cancel()
+ }
+ // OUTPUT:
+}
+```
+source: [examples/jwk_cache_example_test.go](https://github.com/lestrrat-go/jwx/blob/v3/examples/jwk_cache_example_test.go)
+
+
+Parse and use a JWK key:
+
+
+```go
+package examples_test
+
+import (
+ "context"
+ "fmt"
+ "log"
+
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/jwk"
+)
+
+func Example_jwk_usage() {
+ // Use jwk.Cache if you intend to keep reuse the JWKS over and over
+ set, err := jwk.Fetch(context.Background(), "https://www.googleapis.com/oauth2/v3/certs")
+ if err != nil {
+ log.Printf("failed to parse JWK: %s", err)
+ return
+ }
+
+ // Key sets can be serialized back to JSON
+ {
+ jsonbuf, err := json.Marshal(set)
+ if err != nil {
+ log.Printf("failed to marshal key set into JSON: %s", err)
+ return
+ }
+ log.Printf("%s", jsonbuf)
+ }
+
+ for i := 0; i < set.Len(); i++ {
+ var rawkey any // This is where we would like to store the raw key, like *rsa.PrivateKey or *ecdsa.PrivateKey
+ key, ok := set.Key(i) // This retrieves the corresponding jwk.Key
+ if !ok {
+ log.Printf("failed to get key at index %d", i)
+ return
+ }
+
+ // jws and jwe operations can be performed using jwk.Key, but you could also
+ // covert it to their "raw" forms, such as *rsa.PrivateKey or *ecdsa.PrivateKey
+ if err := jwk.Export(key, &rawkey); err != nil {
+ log.Printf("failed to create public key: %s", err)
+ return
+ }
+ _ = rawkey
+
+ // You can create jwk.Key from a raw key, too
+ fromRawKey, err := jwk.Import(rawkey)
+ if err != nil {
+ log.Printf("failed to acquire raw key from jwk.Key: %s", err)
+ return
+ }
+
+ // Keys can be serialized back to JSON
+ jsonbuf, err := json.Marshal(key)
+ if err != nil {
+ log.Printf("failed to marshal key into JSON: %s", err)
+ return
+ }
+
+ fromJSONKey, err := jwk.Parse(jsonbuf)
+ if err != nil {
+ log.Printf("failed to parse json: %s", err)
+ return
+ }
+ _ = fromJSONKey
+ _ = fromRawKey
+ }
+ // OUTPUT:
+}
+
+//nolint:govet
+func Example_jwk_marshal_json() {
+ // JWKs that inherently involve randomness such as RSA and EC keys are
+ // not used in this example, because they may produce different results
+ // depending on the environment.
+ //
+ // (In fact, even if you use a static source of randomness, tests may fail
+ // because of internal changes in the Go runtime).
+
+ raw := []byte("01234567890123456789012345678901234567890123456789ABCDEF")
+
+ // This would create a symmetric key
+ key, err := jwk.Import(raw)
+ if err != nil {
+ fmt.Printf("failed to create symmetric key: %s\n", err)
+ return
+ }
+ if _, ok := key.(jwk.SymmetricKey); !ok {
+ fmt.Printf("expected jwk.SymmetricKey, got %T\n", key)
+ return
+ }
+
+ key.Set(jwk.KeyIDKey, "mykey")
+
+ buf, err := json.MarshalIndent(key, "", " ")
+ if err != nil {
+ fmt.Printf("failed to marshal key into JSON: %s\n", err)
+ return
+ }
+ fmt.Printf("%s\n", buf)
+
+ // OUTPUT:
+ // {
+ // "k": "MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODlBQkNERUY",
+ // "kid": "mykey",
+ // "kty": "oct"
+ // }
+}
+```
+source: [examples/jwk_example_test.go](https://github.com/lestrrat-go/jwx/blob/v3/examples/jwk_example_test.go)
+
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/cache.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/cache.go
new file mode 100644
index 0000000000..b83b56c790
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/cache.go
@@ -0,0 +1,362 @@
+package jwk
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/lestrrat-go/httprc/v3"
+)
+
+type HTTPClient = httprc.HTTPClient
+type ErrorSink = httprc.ErrorSink
+type TraceSink = httprc.TraceSink
+
+// Cache is a container built on top of github.com/lestrrat-go/httprc/v3
+// that keeps track of Set object by their source URLs.
+// The Set objects are stored in memory, and are refreshed automatically
+// behind the scenes.
+//
+// Before retrieving the Set objects, the user must pre-register the
+// URLs they intend to use by calling `Register()`
+//
+// c := jwk.NewCache(ctx, httprc.NewClient())
+// c.Register(ctx, url, options...)
+//
+// Once registered, you can call `Get()` to retrieve the Set object.
+//
+// All JWKS objects that are retrieved via this mechanism should be
+// treated read-only, as they are shared among all consumers, as well
+// as the `jwk.Cache` object.
+//
+// There are cases where `jwk.Cache` and `jwk.CachedSet` should and
+// should not be used.
+//
+// First and foremost, do NOT use a cache for those JWKS objects that
+// need constant checking. For example, unreliable or user-provided JWKS (i.e. those
+// JWKS that are not from a well-known provider) should not be fetched
+// through a `jwk.Cache` or `jwk.CachedSet`.
+//
+// For example, if you have a flaky JWKS server for development
+// that can go down often, you should consider alternatives such as
+// providing `http.Client` with a caching `http.RoundTripper` configured
+// (see `jwk.WithHTTPClient`), setting up a reverse proxy, etc.
+// These techniques allow you to set up a more robust way to both cache
+// and report precise causes of the problems than using `jwk.Cache` or
+// `jwk.CachedSet`. If you handle the caching at the HTTP level like this,
+// you will be able to use a simple `jwk.Fetch` call and not worry about the cache.
+//
+// User-provided JWKS objects may also be problematic, as it may go down
+// unexpectedly (and frequently!), and it will be hard to detect when
+// the URLs or its contents are swapped.
+//
+// A good use-case for `jwk.Cache` and `jwk.CachedSet` are for "stable"
+// JWKS objects.
+//
+// When we say "stable", we are thinking of JWKS that should mostly be
+// ALWAYS available. A good example are those JWKS objects provided by
+// major cloud providers such as Google Cloud, AWS, or Azure.
+// Stable JWKS may still experience intermittent network connectivity problems,
+// but you can expect that they will eventually recover in relatively
+// short period of time. They rarely change URLs, and the contents are
+// expected to be valid or otherwise it would cause havoc to those providers
+//
+// We also know that these stable JWKS objects are rotated periodically,
+// which is a perfect use for `jwk.Cache` and `jwk.CachedSet`. The caches
+// can be configured to periodically refresh the JWKS thereby keeping them
+// fresh without extra intervention from the developer.
+//
+// Notice that for these recommended use-cases the requirement to check
+// the validity or the availability of the JWKS objects are non-existent,
+// as it is expected that they will be available and will be valid. The
+// caching mechanism can hide intermittent connectivity problems as well
+// as keep the objects mostly fresh.
+type Cache struct {
+ ctrl httprc.Controller
+}
+
+// Transformer is a specialized version of `httprc.Transformer` that implements
+// conversion from a `http.Response` object to a `jwk.Set` object. Use this in
+// conjection with `httprc.NewResource` to create a `httprc.Resource` object
+// to auto-update `jwk.Set` objects.
+type Transformer struct {
+ parseOptions []ParseOption
+}
+
+func (t Transformer) Transform(_ context.Context, res *http.Response) (Set, error) {
+ buf, err := io.ReadAll(res.Body)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to read response body status: %w`, err)
+ }
+
+ set, err := Parse(buf, t.parseOptions...)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to parse JWK set at %q: %w`, res.Request.URL.String(), err)
+ }
+
+ return set, nil
+}
+
+// NewCache creates a new `jwk.Cache` object.
+//
+// Under the hood, `jwk.Cache` uses `httprc.Client` manage the
+// fetching and caching of JWKS objects, and thus spawns multiple goroutines
+// per `jwk.Cache` object.
+//
+// The provided `httprc.Client` object must NOT be started prior to
+// passing it to `jwk.NewCache`. The `jwk.Cache` object will start
+// the `httprc.Client` object on its own.
+func NewCache(ctx context.Context, client *httprc.Client) (*Cache, error) {
+ ctrl, err := client.Start(ctx)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to start httprc.Client: %w`, err)
+ }
+
+ return &Cache{
+ ctrl: ctrl,
+ }, nil
+}
+
+// Register registers a URL to be managed by the cache. URLs must
+// be registered before issuing `Get`
+//
+// The `Register` method is a thin wrapper around `(httprc.Controller).Add`
+func (c *Cache) Register(ctx context.Context, u string, options ...RegisterOption) error {
+ var parseOptions []ParseOption
+ var resourceOptions []httprc.NewResourceOption
+ waitReady := true
+ for _, option := range options {
+ switch option := option.(type) {
+ case ParseOption:
+ parseOptions = append(parseOptions, option)
+ case ResourceOption:
+ var v httprc.NewResourceOption
+ if err := option.Value(&v); err != nil {
+ return fmt.Errorf(`failed to retrieve NewResourceOption option value: %w`, err)
+ }
+ resourceOptions = append(resourceOptions, v)
+ default:
+ switch option.Ident() {
+ case identHTTPClient{}:
+ var cli HTTPClient
+ if err := option.Value(&cli); err != nil {
+ return fmt.Errorf(`failed to retrieve HTTPClient option value: %w`, err)
+ }
+ resourceOptions = append(resourceOptions, httprc.WithHTTPClient(cli))
+ case identWaitReady{}:
+ if err := option.Value(&waitReady); err != nil {
+ return fmt.Errorf(`failed to retrieve WaitReady option value: %w`, err)
+ }
+ }
+ }
+ }
+
+ r, err := httprc.NewResource[Set](u, &Transformer{
+ parseOptions: parseOptions,
+ }, resourceOptions...)
+ if err != nil {
+ return fmt.Errorf(`failed to create httprc.Resource: %w`, err)
+ }
+ if err := c.ctrl.Add(ctx, r, httprc.WithWaitReady(waitReady)); err != nil {
+ return fmt.Errorf(`failed to add resource to httprc.Client: %w`, err)
+ }
+
+ return nil
+}
+
+// LookupResource returns the `httprc.Resource` object associated with the
+// given URL `u`. If the URL has not been registered, an error is returned.
+func (c *Cache) LookupResource(ctx context.Context, u string) (*httprc.ResourceBase[Set], error) {
+ r, err := c.ctrl.Lookup(ctx, u)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to lookup resource %q: %w`, u, err)
+ }
+ //nolint:forcetypeassert
+ return r.(*httprc.ResourceBase[Set]), nil
+}
+
+func (c *Cache) Lookup(ctx context.Context, u string) (Set, error) {
+ r, err := c.LookupResource(ctx, u)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to lookup resource %q: %w`, u, err)
+ }
+ set := r.Resource()
+ if set == nil {
+ return nil, fmt.Errorf(`resource %q is not ready`, u)
+ }
+ return set, nil
+}
+
+func (c *Cache) Ready(ctx context.Context, u string) bool {
+ r, err := c.LookupResource(ctx, u)
+ if err != nil {
+ return false
+ }
+ if err := r.Ready(ctx); err != nil {
+ return false
+ }
+ return true
+}
+
+// Refresh is identical to Get(), except it always fetches the
+// specified resource anew, and updates the cached content
+//
+// Please refer to the documentation for `(httprc.Cache).Refresh` for
+// more details
+func (c *Cache) Refresh(ctx context.Context, u string) (Set, error) {
+ if err := c.ctrl.Refresh(ctx, u); err != nil {
+ return nil, fmt.Errorf(`failed to refresh resource %q: %w`, u, err)
+ }
+ return c.Lookup(ctx, u)
+}
+
+// IsRegistered returns true if the given URL `u` has already been registered
+// in the cache.
+func (c *Cache) IsRegistered(ctx context.Context, u string) bool {
+ _, err := c.LookupResource(ctx, u)
+ return err == nil
+}
+
+// Unregister removes the given URL `u` from the cache.
+func (c *Cache) Unregister(ctx context.Context, u string) error {
+ return c.ctrl.Remove(ctx, u)
+}
+
+func (c *Cache) Shutdown(ctx context.Context) error {
+ return c.ctrl.ShutdownContext(ctx)
+}
+
+// CachedSet is a thin shim over jwk.Cache that allows the user to cloak
+// jwk.Cache as if it's a `jwk.Set`. Behind the scenes, the `jwk.Set` is
+// retrieved from the `jwk.Cache` for every operation.
+//
+// Since `jwk.CachedSet` always deals with a cached version of the `jwk.Set`,
+// all operations that mutate the object (such as AddKey(), RemoveKey(), et. al)
+// are no-ops and return an error.
+//
+// Note that since this is a utility shim over `jwk.Cache`, you _will_ lose
+// the ability to control the finer details (such as controlling how long to
+// wait for in case of a fetch failure using `context.Context`)
+//
+// Make sure that you read the documentation for `jwk.Cache` as well.
+type CachedSet interface {
+ Set
+ cached() (Set, error) // used as a marker
+}
+
+type cachedSet struct {
+ r *httprc.ResourceBase[Set]
+}
+
+func (c *Cache) CachedSet(u string) (CachedSet, error) {
+ r, err := c.LookupResource(context.Background(), u)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to lookup resource %q: %w`, u, err)
+ }
+ return NewCachedSet(r), nil
+}
+
+func NewCachedSet(r *httprc.ResourceBase[Set]) CachedSet {
+ return &cachedSet{
+ r: r,
+ }
+}
+
+func (cs *cachedSet) cached() (Set, error) {
+ if err := cs.r.Ready(context.Background()); err != nil {
+ return nil, fmt.Errorf(`failed to fetch resource: %w`, err)
+ }
+ return cs.r.Resource(), nil
+}
+
+// Add is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only
+func (*cachedSet) AddKey(_ Key) error {
+ return fmt.Errorf(`(jwk.Cachedset).AddKey: jwk.CachedSet is immutable`)
+}
+
+// Clear is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only
+func (*cachedSet) Clear() error {
+ return fmt.Errorf(`(jwk.cachedSet).Clear: jwk.CachedSet is immutable`)
+}
+
+// Set is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only
+func (*cachedSet) Set(_ string, _ any) error {
+ return fmt.Errorf(`(jwk.cachedSet).Set: jwk.CachedSet is immutable`)
+}
+
+// Remove is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only
+func (*cachedSet) Remove(_ string) error {
+ // TODO: Remove() should be renamed to Remove(string) error
+ return fmt.Errorf(`(jwk.cachedSet).Remove: jwk.CachedSet is immutable`)
+}
+
+// RemoveKey is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only
+func (*cachedSet) RemoveKey(_ Key) error {
+ return fmt.Errorf(`(jwk.cachedSet).RemoveKey: jwk.CachedSet is immutable`)
+}
+
+func (cs *cachedSet) Clone() (Set, error) {
+ set, err := cs.cached()
+ if err != nil {
+ return nil, fmt.Errorf(`failed to get cached jwk.Set: %w`, err)
+ }
+
+ return set.Clone()
+}
+
+// Get returns the value of non-Key field stored in the jwk.Set
+func (cs *cachedSet) Get(name string, dst any) error {
+ set, err := cs.cached()
+ if err != nil {
+ return err
+ }
+
+ return set.Get(name, dst)
+}
+
+// Key returns the Key at the specified index
+func (cs *cachedSet) Key(idx int) (Key, bool) {
+ set, err := cs.cached()
+ if err != nil {
+ return nil, false
+ }
+
+ return set.Key(idx)
+}
+
+func (cs *cachedSet) Index(key Key) int {
+ set, err := cs.cached()
+ if err != nil {
+ return -1
+ }
+
+ return set.Index(key)
+}
+
+func (cs *cachedSet) Keys() []string {
+ set, err := cs.cached()
+ if err != nil {
+ return nil
+ }
+
+ return set.Keys()
+}
+
+func (cs *cachedSet) Len() int {
+ set, err := cs.cached()
+ if err != nil {
+ return -1
+ }
+
+ return set.Len()
+}
+
+func (cs *cachedSet) LookupKeyID(kid string) (Key, bool) {
+ set, err := cs.cached()
+ if err != nil {
+ return nil, false
+ }
+
+ return set.LookupKeyID(kid)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/convert.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/convert.go
new file mode 100644
index 0000000000..057f4b02a0
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/convert.go
@@ -0,0 +1,399 @@
+package jwk
+
+import (
+ "crypto/ecdh"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "errors"
+ "fmt"
+ "math/big"
+ "reflect"
+ "sync"
+
+ "github.com/lestrrat-go/blackmagic"
+ "github.com/lestrrat-go/jwx/v3/internal/ecutil"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+)
+
+// # Converting between Raw Keys and `jwk.Key`s
+//
+// A converter that converts from a raw key to a `jwk.Key` is called a KeyImporter.
+// A converter that converts from a `jwk.Key` to a raw key is called a KeyExporter.
+
+var keyImporters = make(map[reflect.Type]KeyImporter)
+var keyExporters = make(map[jwa.KeyType][]KeyExporter)
+
+var muKeyImporters sync.RWMutex
+var muKeyExporters sync.RWMutex
+
+// RegisterKeyImporter registers a KeyImporter for the given raw key. When `jwk.Import()` is called,
+// the library will look up the appropriate KeyImporter for the given raw key type (via `reflect`)
+// and execute the KeyImporters in succession until either one of them succeeds, or all of them fail.
+func RegisterKeyImporter(from any, conv KeyImporter) {
+ muKeyImporters.Lock()
+ defer muKeyImporters.Unlock()
+ keyImporters[reflect.TypeOf(from)] = conv
+}
+
+// RegisterKeyExporter registers a KeyExporter for the given key type. When `key.Raw()` is called,
+// the library will look up the appropriate KeyExporter for the given key type and execute the
+// KeyExporters in succession until either one of them succeeds, or all of them fail.
+func RegisterKeyExporter(kty jwa.KeyType, conv KeyExporter) {
+ muKeyExporters.Lock()
+ defer muKeyExporters.Unlock()
+ convs, ok := keyExporters[kty]
+ if !ok {
+ convs = []KeyExporter{conv}
+ } else {
+ convs = append([]KeyExporter{conv}, convs...)
+ }
+ keyExporters[kty] = convs
+}
+
+// KeyImporter is used to convert from a raw key to a `jwk.Key`. mneumonic: from the PoV of the `jwk.Key`,
+// we're _importing_ a raw key.
+type KeyImporter interface {
+ // Import takes the raw key to be converted, and returns a `jwk.Key` or an error if the conversion fails.
+ Import(any) (Key, error)
+}
+
+// KeyImportFunc is a convenience type to implement KeyImporter as a function.
+type KeyImportFunc func(any) (Key, error)
+
+func (f KeyImportFunc) Import(raw any) (Key, error) {
+ return f(raw)
+}
+
+// KeyExporter is used to convert from a `jwk.Key` to a raw key. mneumonic: from the PoV of the `jwk.Key`,
+// we're _exporting_ it to a raw key.
+type KeyExporter interface {
+ // Export takes the `jwk.Key` to be converted, and a hint (the raw key to be converted to).
+ // The hint is the object that the user requested the result to be assigned to.
+ // The method should return the converted raw key, or an error if the conversion fails.
+ //
+ // Third party modules MUST NOT modifiy the hint object.
+ //
+ // When the user calls `key.Export(dst)`, the `dst` object is a _pointer_ to the
+ // object that the user wants the result to be assigned to, but the converter
+ // receives the _value_ that this pointer points to, to make it easier to
+ // detect the type of the result.
+ //
+ // Note that the second argument may be an `any` (which means that the
+ // user has delegated the type detection to the converter).
+ //
+ // Export must NOT modify the hint object, and should return jwk.ContinueError
+ // if the hint object is not compatible with the converter.
+ Export(Key, any) (any, error)
+}
+
+// KeyExportFunc is a convenience type to implement KeyExporter as a function.
+type KeyExportFunc func(Key, any) (any, error)
+
+func (f KeyExportFunc) Export(key Key, hint any) (any, error) {
+ return f(key, hint)
+}
+
+func init() {
+ {
+ f := KeyImportFunc(rsaPrivateKeyToJWK)
+ k := rsa.PrivateKey{}
+ RegisterKeyImporter(k, f)
+ RegisterKeyImporter(&k, f)
+ }
+ {
+ f := KeyImportFunc(rsaPublicKeyToJWK)
+ k := rsa.PublicKey{}
+ RegisterKeyImporter(k, f)
+ RegisterKeyImporter(&k, f)
+ }
+ {
+ f := KeyImportFunc(ecdsaPrivateKeyToJWK)
+ k := ecdsa.PrivateKey{}
+ RegisterKeyImporter(k, f)
+ RegisterKeyImporter(&k, f)
+ }
+ {
+ f := KeyImportFunc(ecdsaPublicKeyToJWK)
+ k := ecdsa.PublicKey{}
+ RegisterKeyImporter(k, f)
+ RegisterKeyImporter(&k, f)
+ }
+ {
+ f := KeyImportFunc(okpPrivateKeyToJWK)
+ for _, k := range []any{ed25519.PrivateKey(nil)} {
+ RegisterKeyImporter(k, f)
+ }
+ }
+ {
+ f := KeyImportFunc(ecdhPrivateKeyToJWK)
+ for _, k := range []any{ecdh.PrivateKey{}, &ecdh.PrivateKey{}} {
+ RegisterKeyImporter(k, f)
+ }
+ }
+ {
+ f := KeyImportFunc(okpPublicKeyToJWK)
+ for _, k := range []any{ed25519.PublicKey(nil)} {
+ RegisterKeyImporter(k, f)
+ }
+ }
+ {
+ f := KeyImportFunc(ecdhPublicKeyToJWK)
+ for _, k := range []any{ecdh.PublicKey{}, &ecdh.PublicKey{}} {
+ RegisterKeyImporter(k, f)
+ }
+ }
+ RegisterKeyImporter([]byte(nil), KeyImportFunc(bytesToKey))
+}
+
+func ecdhPrivateKeyToJWK(src any) (Key, error) {
+ var raw *ecdh.PrivateKey
+ switch src := src.(type) {
+ case *ecdh.PrivateKey:
+ raw = src
+ case ecdh.PrivateKey:
+ raw = &src
+ default:
+ return nil, fmt.Errorf(`cannot convert key type '%T' to ECDH jwk.Key`, src)
+ }
+
+ switch raw.Curve() {
+ case ecdh.X25519():
+ return okpPrivateKeyToJWK(raw)
+ case ecdh.P256():
+ return ecdhPrivateKeyToECJWK(raw, elliptic.P256())
+ case ecdh.P384():
+ return ecdhPrivateKeyToECJWK(raw, elliptic.P384())
+ case ecdh.P521():
+ return ecdhPrivateKeyToECJWK(raw, elliptic.P521())
+ default:
+ return nil, fmt.Errorf(`unsupported curve %s`, raw.Curve())
+ }
+}
+
+func ecdhPrivateKeyToECJWK(raw *ecdh.PrivateKey, crv elliptic.Curve) (Key, error) {
+ pub := raw.PublicKey()
+ rawpub := pub.Bytes()
+
+ size := ecutil.CalculateKeySize(crv)
+ var x, y, d big.Int
+ x.SetBytes(rawpub[1 : 1+size])
+ y.SetBytes(rawpub[1+size:])
+ d.SetBytes(raw.Bytes())
+
+ var ecdsaPriv ecdsa.PrivateKey
+ ecdsaPriv.Curve = crv
+ ecdsaPriv.D = &d
+ ecdsaPriv.X = &x
+ ecdsaPriv.Y = &y
+ return ecdsaPrivateKeyToJWK(&ecdsaPriv)
+}
+
+func ecdhPublicKeyToJWK(src any) (Key, error) {
+ var raw *ecdh.PublicKey
+ switch src := src.(type) {
+ case *ecdh.PublicKey:
+ raw = src
+ case ecdh.PublicKey:
+ raw = &src
+ default:
+ return nil, fmt.Errorf(`cannot convert key type '%T' to ECDH jwk.Key`, src)
+ }
+
+ switch raw.Curve() {
+ case ecdh.X25519():
+ return okpPublicKeyToJWK(raw)
+ case ecdh.P256():
+ return ecdhPublicKeyToECJWK(raw, elliptic.P256())
+ case ecdh.P384():
+ return ecdhPublicKeyToECJWK(raw, elliptic.P384())
+ case ecdh.P521():
+ return ecdhPublicKeyToECJWK(raw, elliptic.P521())
+ default:
+ return nil, fmt.Errorf(`unsupported curve %s`, raw.Curve())
+ }
+}
+
+func ecdhPublicKeyToECJWK(raw *ecdh.PublicKey, crv elliptic.Curve) (Key, error) {
+ rawbytes := raw.Bytes()
+ size := ecutil.CalculateKeySize(crv)
+ var x, y big.Int
+
+ x.SetBytes(rawbytes[1 : 1+size])
+ y.SetBytes(rawbytes[1+size:])
+ var ecdsaPriv ecdsa.PublicKey
+ ecdsaPriv.Curve = crv
+ ecdsaPriv.X = &x
+ ecdsaPriv.Y = &y
+ return ecdsaPublicKeyToJWK(&ecdsaPriv)
+}
+
+// These may seem a bit repetitive and redandunt, but the problem is that
+// each key type has its own Import method -- for example, Import(*ecdsa.PrivateKey)
+// vs Import(*rsa.PrivateKey), and therefore they can't just be bundled into
+// a single function.
+func rsaPrivateKeyToJWK(src any) (Key, error) {
+ var raw *rsa.PrivateKey
+ switch src := src.(type) {
+ case *rsa.PrivateKey:
+ raw = src
+ case rsa.PrivateKey:
+ raw = &src
+ default:
+ return nil, fmt.Errorf(`cannot convert key type '%T' to RSA jwk.Key`, src)
+ }
+ k := newRSAPrivateKey()
+ if err := k.Import(raw); err != nil {
+ return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, raw, err)
+ }
+ return k, nil
+}
+
+func rsaPublicKeyToJWK(src any) (Key, error) {
+ var raw *rsa.PublicKey
+ switch src := src.(type) {
+ case *rsa.PublicKey:
+ raw = src
+ case rsa.PublicKey:
+ raw = &src
+ default:
+ return nil, fmt.Errorf(`cannot convert key type '%T' to RSA jwk.Key`, src)
+ }
+ k := newRSAPublicKey()
+ if err := k.Import(raw); err != nil {
+ return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, raw, err)
+ }
+ return k, nil
+}
+
+func ecdsaPrivateKeyToJWK(src any) (Key, error) {
+ var raw *ecdsa.PrivateKey
+ switch src := src.(type) {
+ case *ecdsa.PrivateKey:
+ raw = src
+ case ecdsa.PrivateKey:
+ raw = &src
+ default:
+ return nil, fmt.Errorf(`cannot convert key type '%T' to ECDSA jwk.Key`, src)
+ }
+ k := newECDSAPrivateKey()
+ if err := k.Import(raw); err != nil {
+ return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, raw, err)
+ }
+ return k, nil
+}
+
+func ecdsaPublicKeyToJWK(src any) (Key, error) {
+ var raw *ecdsa.PublicKey
+ switch src := src.(type) {
+ case *ecdsa.PublicKey:
+ raw = src
+ case ecdsa.PublicKey:
+ raw = &src
+ default:
+ return nil, fmt.Errorf(`cannot convert key type '%T' to ECDSA jwk.Key`, src)
+ }
+ k := newECDSAPublicKey()
+ if err := k.Import(raw); err != nil {
+ return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, raw, err)
+ }
+ return k, nil
+}
+
+func okpPrivateKeyToJWK(src any) (Key, error) {
+ var raw any
+ switch src.(type) {
+ case ed25519.PrivateKey, *ecdh.PrivateKey:
+ raw = src
+ case ecdh.PrivateKey:
+ raw = &src
+ default:
+ return nil, fmt.Errorf(`cannot convert key type '%T' to OKP jwk.Key`, src)
+ }
+ k := newOKPPrivateKey()
+ if err := k.Import(raw); err != nil {
+ return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, raw, err)
+ }
+ return k, nil
+}
+
+func okpPublicKeyToJWK(src any) (Key, error) {
+ var raw any
+ switch src.(type) {
+ case ed25519.PublicKey, *ecdh.PublicKey:
+ raw = src
+ case ecdh.PublicKey:
+ raw = &src
+ default:
+ return nil, fmt.Errorf(`jwk: convert raw to OKP jwk.Key: cannot convert key type '%T' to OKP jwk.Key`, src)
+ }
+ k := newOKPPublicKey()
+ if err := k.Import(raw); err != nil {
+ return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, raw, err)
+ }
+ return k, nil
+}
+
+func bytesToKey(src any) (Key, error) {
+ var raw []byte
+ switch src := src.(type) {
+ case []byte:
+ raw = src
+ default:
+ return nil, fmt.Errorf(`cannot convert key type '%T' to symmetric jwk.Key`, src)
+ }
+
+ k := newSymmetricKey()
+ if err := k.Import(raw); err != nil {
+ return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, raw, err)
+ }
+ return k, nil
+}
+
+// Export converts a `jwk.Key` to a Export key. The dst argument must be a pointer to the
+// object that the user wants the result to be assigned to.
+//
+// Normally you would pass a pointer to the zero value of the raw key type
+// such as &(*rsa.PrivateKey) or &(*ecdsa.PublicKey), which gets assigned
+// the converted key.
+//
+// If you do not know the exact type of a jwk.Key before attempting
+// to obtain the raw key, you can simply pass a pointer to an
+// empty interface as the second argument
+//
+// If you already know the exact type, it is recommended that you
+// pass a pointer to the zero value of the actual key type for efficiency.
+//
+// Be careful when/if you are using a third party key type that implements
+// the `jwk.Key` interface, as the first argument. This function tries hard
+// to Do The Right Thing, but it is not guaranteed to work in all cases,
+// especially when the object implements the `jwk.Key` interface via
+// embedding.
+func Export(key Key, dst any) error {
+ // dst better be a pointer
+ rv := reflect.ValueOf(dst)
+ if rv.Kind() != reflect.Ptr {
+ return fmt.Errorf(`jwk.Export: destination object must be a pointer`)
+ }
+ muKeyExporters.RLock()
+ exporters, ok := keyExporters[key.KeyType()]
+ muKeyExporters.RUnlock()
+ if !ok {
+ return fmt.Errorf(`jwk.Export: no exporters registered for key type '%T'`, key)
+ }
+ for _, conv := range exporters {
+ v, err := conv.Export(key, dst)
+ if err != nil {
+ if errors.Is(err, ContinueError()) {
+ continue
+ }
+ return fmt.Errorf(`jwk.Export: failed to export jwk.Key to raw format: %w`, err)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, v); err != nil {
+ return fmt.Errorf(`jwk.Export: failed to assign key: %w`, err)
+ }
+ return nil
+ }
+ return fmt.Errorf(`jwk.Export: no suitable exporter found for key type '%T'`, key)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/doc.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/doc.go
new file mode 100644
index 0000000000..7df707521b
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/doc.go
@@ -0,0 +1,294 @@
+// Package jwk implements JWK as described in https://tools.ietf.org/html/rfc7517
+//
+// This package implements jwk.Key to represent a single JWK, and jwk.Set to represent
+// a set of JWKs.
+//
+// The `jwk.Key` type is an interface, which hides the underlying implementation for
+// each key type. Each key type can further be converted to interfaces for known
+// types, such as `jwk.ECDSAPrivateKey`, `jwk.RSAPublicKey`, etc. This may not necessarily
+// work for third party key types (see section on "Registering a key type" below).
+//
+// Users can create a JWK in two ways. One is to unmarshal a JSON representation of a
+// key. The second one is to use `jwk.Import()` to import a raw key and convert it to
+// a jwk.Key.
+//
+// # Simple Usage
+//
+// You can parse a JWK from a JSON payload:
+//
+// jwk.ParseKey([]byte(`{"kty":"EC",...}`))
+//
+// You can go back and forth between raw key types and JWKs:
+//
+// jwkKey, _ := jwk.Import(rsaPrivateKey)
+// var rawKey *rsa.PRrivateKey
+// jwkKey.Raw(&rawKey)
+//
+// You can use them to sign/verify/encrypt/decrypt:
+//
+// jws.Sign([]byte(`...`), jws.WithKey(jwa.RS256, jwkKey))
+// jwe.Encrypt([]byte(`...`), jwe.WithKey(jwa.RSA_OAEP, jwkKey))
+//
+// See examples/jwk_parse_example_test.go and other files in the exmaples/ directory for more.
+//
+// # Advanced Usage: Registering a custom key type and conversion routines
+//
+// Caveat Emptor: Functionality around registering keys
+// (KeyProbe/KeyParser/KeyImporter/KeyExporter) should be considered experimental.
+// While we expect that the functionality itself will remain, the API may
+// change in backward incompatible ways, even during minor version
+// releases.
+//
+// ## tl;dr
+//
+// * KeyProbe: Used for parsing JWKs in JSON format. Probes hint fields to be used for later parsing by KeyParser
+// * KeyParser: Used for parsing JWKs in JSON format. Parses the JSON payload into a jwk.Key using the KeyProbe as hint
+// * KeyImporter: Used for converting raw key into jwk.Key.
+// * KeyExporter: Used for converting jwk.Key into raw key.
+//
+// ## Overview
+//
+// You can add the ability to use a JWK type that this library does not
+// implement out of the box. You can do this by registering your own
+// KeyParser, KeyImporter, and KeyExporter instances.
+//
+// func init() {
+// jwk.RegiserProbeField(reflect.StructField{Name: "SomeHint", Type: reflect.TypeOf(""), Tag: `json:"some_hint"`})
+// jwk.RegisterKeyParser(&MyKeyParser{})
+// jwk.RegisterKeyImporter(&MyKeyImporter{})
+// jwk.RegisterKeyExporter(&MyKeyExporter{})
+// }
+//
+// The KeyParser is used to parse JSON payloads and conver them into a jwk.Key.
+// The KeyImporter is used to convert a raw key (e.g. *rsa.PrivateKey, *ecdsa.PrivateKey, etc) into a jwk.Key.
+// The KeyExporter is used to convert a jwk.Key into a raw key.
+//
+// Although we believe the mechanism has been streamline quite a lot, it is also true
+// that the entire process of parsing and converting keys are much more convoluted than you might
+// think. Please know before hand that if you intend to add support for a new key type,
+// it _WILL_ require you to learn this module pretty much in-and-out.
+//
+// Read on for more explanation.
+//
+// ## Registering a KeyParser
+//
+// In order to understand how parsing works, we need to explain how the `jwk.ParseKey()` works.
+//
+// The first thing that occurs when parsing a key is a partial
+// unmarshaling of the payload into a hint / probe object.
+//
+// Because the `json.Unmarshal` works by calling the `UnmarshalJSON`
+// method on a concrete object, we need to create a concrete object first.
+// In order/ to create the appropriate Go object, we need to know which concrete
+// object to create from the JSON payload, meaning we need to peek into the
+// payload and figure out what type of key it is.
+//
+// In order to do this, we effectively need to parse the JSON payload twice.
+// First, we "probe" the payload to figure out what kind of key it is, then
+// we parse it again to create the actual key object.
+//
+// For probing, we create a new "probe" object (KeyProbe, which is not
+// directly available to end users) to populate the object with hints from the payload.
+// For example, a JWK representing an RSA key would look like:
+//
+// { "kty": "RSA", "n": ..., "e": ..., ... }
+//
+// The default KeyProbe is constructed to unmarshal "kty" and "d" fields,
+// because that is enough information to determine what kind of key to
+// construct.
+//
+// For example, if the payload contains "kty" field with the value "RSA",
+// we know that it's an RSA key. If it contains "EC", we know that it's
+// an EC key. Furthermore, if the payload contains some value in the "d" field, we can
+// also tell that this is a private key, as only private keys need
+// this field.
+//
+// For most cases, the default KeyProbe implementation should be sufficient.
+// However, there may be cases in the future where there are new key types
+// that require further information. Perhaps you are embedding another hint
+// in your JWK to further specify what kind of key it is. In that case, you
+// would need to probe more.
+//
+// Normally you can only change how an object is unmarshaled by specifying
+// JSON tags when defining a struct, but we use `reflect` package capabilities
+// to create an object dynamically, which is shared among all parsing operations.
+//
+// To add a new field to be probed, you need to register a new `reflect.StructField`
+// object that has all of the information. For example, the code below would
+// register a field named "MyHint" that is of type string, and has a JSON tag
+// of "my_hint".
+//
+// jwk.RegisterProbeField(reflect.StructField{Name: "MyHint", Type: reflect.TypeOf(""), Tag: `json:"my_hint"`})
+//
+// The value of this field can be retrieved by calling `Get()` method on the
+// KeyProbe object (from the `KeyParser`'s `ParseKey()` method discussed later)
+//
+// var myhint string
+// _ = probe.Get("MyHint", &myhint)
+//
+// var kty string
+// _ = probe.Get("Kty", &kty)
+//
+// This mechanism allows you to be flexible when trying to determine the key type
+// to instantiate.
+//
+// ## Parse via the KeyParser
+//
+// When `jwk.Parse` / `jwk.ParseKey` is called, the library will first probe
+// the payload as discussed above.
+//
+// Once the probe is done, the library will iterate over the registered parsers
+// and attempt to parse the key by calling their `ParseKey()` methods.
+//
+// The parsers will be called in reverse order that they were registered.
+// This means that it will try all parsers that were registered by third
+// parties, and once those are exhausted, the default parser will be used.
+//
+// Each parser's `ParseKey()“ method will receive three arguments: the probe object, a
+// KeyUnmarshaler, and the raw payload. The probe object can be used
+// as a hint to determine what kind of key to instantiate. An example
+// pseudocode may look like this:
+//
+// var kty string
+// _ = probe.Get("Kty", &kty)
+// switch kty {
+// case "RSA":
+// // create an RSA key
+// case "EC":
+// // create an EC key
+// ...
+// }
+//
+// The `KeyUnmarshaler` is a thin wrapper around `json.Unmarshal`. It works almost
+// identical to `json.Unmarshal`, but it allows us to add extra magic that is
+// specific to this library (which users do not need to be aware of) before calling
+// the actual `json.Unmarshal`. Please use the `KeyUnmarshaler` to unmarshal JWKs instead of `json.Unmarshal`.
+//
+// Putting it all together, the boiler plate for registering a new parser may look like this:
+//
+// func init() {
+// jwk.RegisterFieldProbe(reflect.StructField{Name: "MyHint", Type: reflect.TypeOf(""), Tag: `json:"my_hint"`})
+// jwk.RegisterParser(&MyKeyParser{})
+// }
+//
+// type MyKeyParser struct { ... }
+// func(*MyKeyParser) ParseKey(rawProbe *KeyProbe, unmarshaler KeyUnmarshaler, data []byte) (jwk.Key, error) {
+// // Create concrete type
+// var hint string
+// if err := probe.Get("MyHint", &hint); err != nil {
+// // if it doesn't have the `my_hint` field, it probably means
+// // it's not for us, so we return ContinueParseError so that
+// // the next parser can pick it up
+// return nil, jwk.ContinueParseError()
+// }
+//
+// // Use hint to determine concrete key type
+// var key jwk.Key
+// switch hint {
+// case ...:
+// key = = myNewAwesomeJWK()
+// ...
+// }
+//
+// return unmarshaler.Unmarshal(data, key)
+// }
+//
+// ## Registering KeyImporter/KeyExporter
+//
+// If you are going to do anything with the key that was parsed by your KeyParser,
+// you will need to tell the library how to convert back and forth between
+// raw keys and JWKs. Conversion from raw keys to jwk.Keys are done by KeyImporters,
+// and conversion from jwk.Keys to raw keys are done by KeyExporters.
+//
+// ## Using jwk.Import() using KeyImporter
+//
+// Each KeyImporter is hooked to run against a specific raw key type.
+//
+// When `jwk.Import()` is called, the library will iterate over all registered
+// KeyImporters for the specified raw key type, and attempt to convert the raw
+// key to a JWK by calling the `Import()` method on each KeyImporter.
+//
+// The KeyImporter's `Import()` method will receive the raw key to be converted,
+// and should return a JWK or an error if the conversion fails, or the return
+// `jwk.ContinueError()` if the specified raw key cannot be handled by ths/ KeyImporter.
+//
+// Once a KeyImporter is available, you will be able to pass the raw key to `jwk.Import()`.
+// The following example shows how you might register a KeyImporter for a hypotheical
+// mypkg.SuperSecretKey:
+//
+// jwk.RegisterKeyImporter(&mypkg.SuperSecretKey{}, jwk.KeyImportFunc(imnportSuperSecretKey))
+//
+// func importSuperSecretKey(key any) (jwk.Key, error) {
+// mykey, ok := key.(*mypkg.SuperSecretKey)
+// if !ok {
+// // You must return jwk.ContinueError here, or otherwise
+// // processing will stop with an error
+// return nil, fmt.Errorf("invalid key type %T for importer: %w", key, jwk.ContinueError())
+// }
+//
+// return mypkg.SuperSecretJWK{ .... }, nil // You could reuse existing JWK types if you can
+// }
+//
+// ## Registering a KeyExporter
+//
+// KeyExporters are the opposite of KeyImporters: they convert a JWK to a raw key when `key.Raw(...)` is
+// called. If you intend to use `key.Raw(...)` for a JWK created using one of your KeyImporters,
+// you will also
+//
+// KeyExporters are registered by key type. For example, if you want to register a KeyExporter for
+// RSA keys, you would do:
+//
+// jwk.RegisterKeyExporter(jwa.RSA, jwk.KeyExportFunc(exportRSAKey))
+//
+// For a given JWK, it will be passed a "destination" object to store the exported raw key. For example,
+// an RSA-based private JWK can be exported to a `*rsa.PrivateKey` or to a `*any`, but not
+// to a `*ecdsa.PrivateKey`:
+//
+// var dst *rsa.PrivateKey
+// key.Raw(&dst) // OK
+//
+// var dst any
+// key.Raw(&dst) // OK
+//
+// var dst *ecdsa.PrivateKey
+// key.Raw(&dst) // Error, if key is an RSA key
+//
+// You will need to handle this distinction yourself in your KeyImporter. For example, certain
+// elliptic curve keys can be expressed in JWK in the same format, minus the "kty". In that case
+// you will need to check for the type of the destination object and return an error if it is
+// not compatible with your key.
+//
+// var raw mypkg.PrivateKey // assume a hypothetical private key type using a different curve than standard ones lie P-256
+// key, _ := jwk.Import(raw)
+// // key could be jwk.ECDSAPrivateKey, with different curve than P-256
+//
+// var dst *ecdsa.PrivateKey
+// key.Raw(&dst) // your KeyImporter will be called with *ecdsa.PrivateKey, which is not compatible with your key
+//
+// To implement this your code should look like the following:
+//
+// jwk.RegisterKeyExporter(jwk.EC, jwk.KeyExportFunc(exportMyKey))
+//
+// func exportMyKey(key jwk.Key, hint any) (any, error) {
+// // check if the type of object in hint is compatible with your key
+// switch hint.(type) {
+// case *mypkg.PrivateKey, *any:
+// // OK, we can proceed
+// default:
+// // Not compatible, return jwk.ContinueError
+// return nil, jwk.ContinueError()
+// }
+//
+// // key is a jwk.ECDSAPrivateKey or jwk.ECDSAPublicKey
+// switch key := key.(type) {
+// case jwk.ECDSAPrivateKey:
+// // convert key to mypkg.PrivateKey
+// case jwk.ECDSAPublicKey:
+// // convert key to mypkg.PublicKey
+// default:
+// // Not compatible, return jwk.ContinueError
+// return nil, jwk.ContinueError()
+// }
+// return ..., nil
+// }
+package jwk
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa.go
new file mode 100644
index 0000000000..3dcd33bb1f
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa.go
@@ -0,0 +1,402 @@
+package jwk
+
+import (
+ "crypto"
+ "crypto/ecdh"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "fmt"
+ "math/big"
+ "reflect"
+
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/internal/ecutil"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ ourecdsa "github.com/lestrrat-go/jwx/v3/jwk/ecdsa"
+)
+
+func init() {
+ ourecdsa.RegisterCurve(jwa.P256(), elliptic.P256())
+ ourecdsa.RegisterCurve(jwa.P384(), elliptic.P384())
+ ourecdsa.RegisterCurve(jwa.P521(), elliptic.P521())
+
+ RegisterKeyExporter(jwa.EC(), KeyExportFunc(ecdsaJWKToRaw))
+}
+
+func (k *ecdsaPublicKey) Import(rawKey *ecdsa.PublicKey) error {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+
+ if rawKey.X == nil {
+ return fmt.Errorf(`invalid ecdsa.PublicKey`)
+ }
+
+ if rawKey.Y == nil {
+ return fmt.Errorf(`invalid ecdsa.PublicKey`)
+ }
+
+ xbuf := ecutil.AllocECPointBuffer(rawKey.X, rawKey.Curve)
+ ybuf := ecutil.AllocECPointBuffer(rawKey.Y, rawKey.Curve)
+ defer ecutil.ReleaseECPointBuffer(xbuf)
+ defer ecutil.ReleaseECPointBuffer(ybuf)
+
+ k.x = make([]byte, len(xbuf))
+ copy(k.x, xbuf)
+ k.y = make([]byte, len(ybuf))
+ copy(k.y, ybuf)
+
+ alg, err := ourecdsa.AlgorithmFromCurve(rawKey.Curve)
+ if err != nil {
+ return fmt.Errorf(`jwk: failed to get algorithm for converting ECDSA public key to JWK: %w`, err)
+ }
+ k.crv = &alg
+
+ return nil
+}
+
+func (k *ecdsaPrivateKey) Import(rawKey *ecdsa.PrivateKey) error {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+
+ if rawKey.PublicKey.X == nil {
+ return fmt.Errorf(`invalid ecdsa.PrivateKey`)
+ }
+ if rawKey.PublicKey.Y == nil {
+ return fmt.Errorf(`invalid ecdsa.PrivateKey`)
+ }
+ if rawKey.D == nil {
+ return fmt.Errorf(`invalid ecdsa.PrivateKey`)
+ }
+
+ xbuf := ecutil.AllocECPointBuffer(rawKey.PublicKey.X, rawKey.Curve)
+ ybuf := ecutil.AllocECPointBuffer(rawKey.PublicKey.Y, rawKey.Curve)
+ dbuf := ecutil.AllocECPointBuffer(rawKey.D, rawKey.Curve)
+ defer ecutil.ReleaseECPointBuffer(xbuf)
+ defer ecutil.ReleaseECPointBuffer(ybuf)
+ defer ecutil.ReleaseECPointBuffer(dbuf)
+
+ k.x = make([]byte, len(xbuf))
+ copy(k.x, xbuf)
+ k.y = make([]byte, len(ybuf))
+ copy(k.y, ybuf)
+ k.d = make([]byte, len(dbuf))
+ copy(k.d, dbuf)
+
+ alg, err := ourecdsa.AlgorithmFromCurve(rawKey.Curve)
+ if err != nil {
+ return fmt.Errorf(`jwk: failed to get algorithm for converting ECDSA private key to JWK: %w`, err)
+ }
+ k.crv = &alg
+
+ return nil
+}
+
+func buildECDSAPublicKey(alg jwa.EllipticCurveAlgorithm, xbuf, ybuf []byte) (*ecdsa.PublicKey, error) {
+ crv, err := ourecdsa.CurveFromAlgorithm(alg)
+ if err != nil {
+ return nil, fmt.Errorf(`jwk: failed to get algorithm for ECDSA public key: %w`, err)
+ }
+
+ var x, y big.Int
+ x.SetBytes(xbuf)
+ y.SetBytes(ybuf)
+
+ return &ecdsa.PublicKey{Curve: crv, X: &x, Y: &y}, nil
+}
+
+func buildECDHPublicKey(alg jwa.EllipticCurveAlgorithm, xbuf, ybuf []byte) (*ecdh.PublicKey, error) {
+ var ecdhcrv ecdh.Curve
+ switch alg {
+ case jwa.X25519():
+ ecdhcrv = ecdh.X25519()
+ case jwa.P256():
+ ecdhcrv = ecdh.P256()
+ case jwa.P384():
+ ecdhcrv = ecdh.P384()
+ case jwa.P521():
+ ecdhcrv = ecdh.P521()
+ default:
+ return nil, fmt.Errorf(`jwk: unsupported ECDH curve %s`, alg)
+ }
+
+ return ecdhcrv.NewPublicKey(append([]byte{0x04}, append(xbuf, ybuf...)...))
+}
+
+func buildECDHPrivateKey(alg jwa.EllipticCurveAlgorithm, dbuf []byte) (*ecdh.PrivateKey, error) {
+ var ecdhcrv ecdh.Curve
+ switch alg {
+ case jwa.X25519():
+ ecdhcrv = ecdh.X25519()
+ case jwa.P256():
+ ecdhcrv = ecdh.P256()
+ case jwa.P384():
+ ecdhcrv = ecdh.P384()
+ case jwa.P521():
+ ecdhcrv = ecdh.P521()
+ default:
+ return nil, fmt.Errorf(`jwk: unsupported ECDH curve %s`, alg)
+ }
+
+ return ecdhcrv.NewPrivateKey(dbuf)
+}
+
+var ecdsaConvertibleTypes = []reflect.Type{
+ reflect.TypeOf((*ECDSAPrivateKey)(nil)).Elem(),
+ reflect.TypeOf((*ECDSAPublicKey)(nil)).Elem(),
+}
+
+func ecdsaJWKToRaw(keyif Key, hint any) (any, error) {
+ var isECDH bool
+
+ extracted, err := extractEmbeddedKey(keyif, ecdsaConvertibleTypes)
+ if err != nil {
+ return nil, fmt.Errorf(`jwk: failed to extract embedded key: %w`, err)
+ }
+
+ switch k := extracted.(type) {
+ case ECDSAPrivateKey:
+ switch hint.(type) {
+ case ecdsa.PrivateKey, *ecdsa.PrivateKey:
+ case ecdh.PrivateKey, *ecdh.PrivateKey:
+ isECDH = true
+ default:
+ rv := reflect.ValueOf(hint)
+ //nolint:revive
+ if rv.Kind() == reflect.Ptr && rv.Elem().Kind() == reflect.Interface {
+ // pointer to an interface value, presumably they want us to dynamically
+ // create an object of the right type
+ } else {
+ return nil, fmt.Errorf(`invalid destination object type %T: %w`, hint, ContinueError())
+ }
+ }
+
+ locker, ok := k.(rlocker)
+ if ok {
+ locker.rlock()
+ defer locker.runlock()
+ }
+
+ crv, ok := k.Crv()
+ if !ok {
+ return nil, fmt.Errorf(`missing "crv" field`)
+ }
+
+ if isECDH {
+ d, ok := k.D()
+ if !ok {
+ return nil, fmt.Errorf(`missing "d" field`)
+ }
+ return buildECDHPrivateKey(crv, d)
+ }
+
+ x, ok := k.X()
+ if !ok {
+ return nil, fmt.Errorf(`missing "x" field`)
+ }
+ y, ok := k.Y()
+ if !ok {
+ return nil, fmt.Errorf(`missing "y" field`)
+ }
+ pubk, err := buildECDSAPublicKey(crv, x, y)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to build public key: %w`, err)
+ }
+
+ var key ecdsa.PrivateKey
+ var d big.Int
+
+ origD, ok := k.D()
+ if !ok {
+ return nil, fmt.Errorf(`missing "d" field`)
+ }
+
+ d.SetBytes(origD)
+ key.D = &d
+ key.PublicKey = *pubk
+
+ return &key, nil
+ case ECDSAPublicKey:
+ switch hint.(type) {
+ case ecdsa.PublicKey, *ecdsa.PublicKey:
+ case ecdh.PublicKey, *ecdh.PublicKey:
+ isECDH = true
+ default:
+ rv := reflect.ValueOf(hint)
+ //nolint:revive
+ if rv.Kind() == reflect.Ptr && rv.Elem().Kind() == reflect.Interface {
+ // pointer to an interface value, presumably they want us to dynamically
+ // create an object of the right type
+ } else {
+ return nil, fmt.Errorf(`invalid destination object type %T: %w`, hint, ContinueError())
+ }
+ }
+
+ locker, ok := k.(rlocker)
+ if ok {
+ locker.rlock()
+ defer locker.runlock()
+ }
+
+ crv, ok := k.Crv()
+ if !ok {
+ return nil, fmt.Errorf(`missing "crv" field`)
+ }
+
+ x, ok := k.X()
+ if !ok {
+ return nil, fmt.Errorf(`missing "x" field`)
+ }
+
+ y, ok := k.Y()
+ if !ok {
+ return nil, fmt.Errorf(`missing "y" field`)
+ }
+ if isECDH {
+ return buildECDHPublicKey(crv, x, y)
+ }
+ return buildECDSAPublicKey(crv, x, y)
+ default:
+ return nil, ContinueError()
+ }
+}
+
+func makeECDSAPublicKey(src Key) (Key, error) {
+ newKey := newECDSAPublicKey()
+
+ // Iterate and copy everything except for the bits that should not be in the public key
+ for _, k := range src.Keys() {
+ switch k {
+ case ECDSADKey:
+ continue
+ default:
+ var v any
+ if err := src.Get(k, &v); err != nil {
+ return nil, fmt.Errorf(`ecdsa: makeECDSAPublicKey: failed to get field %q: %w`, k, err)
+ }
+
+ if err := newKey.Set(k, v); err != nil {
+ return nil, fmt.Errorf(`ecdsa: makeECDSAPublicKey: failed to set field %q: %w`, k, err)
+ }
+ }
+ }
+
+ return newKey, nil
+}
+
+func (k *ecdsaPrivateKey) PublicKey() (Key, error) {
+ return makeECDSAPublicKey(k)
+}
+
+func (k *ecdsaPublicKey) PublicKey() (Key, error) {
+ return makeECDSAPublicKey(k)
+}
+
+func ecdsaThumbprint(hash crypto.Hash, crv, x, y string) []byte {
+ h := hash.New()
+ fmt.Fprint(h, `{"crv":"`)
+ fmt.Fprint(h, crv)
+ fmt.Fprint(h, `","kty":"EC","x":"`)
+ fmt.Fprint(h, x)
+ fmt.Fprint(h, `","y":"`)
+ fmt.Fprint(h, y)
+ fmt.Fprint(h, `"}`)
+ return h.Sum(nil)
+}
+
+// Thumbprint returns the JWK thumbprint using the indicated
+// hashing algorithm, according to RFC 7638
+func (k ecdsaPublicKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
+ k.mu.RLock()
+ defer k.mu.RUnlock()
+
+ var key ecdsa.PublicKey
+ if err := Export(&k, &key); err != nil {
+ return nil, fmt.Errorf(`failed to export ecdsa.PublicKey for thumbprint generation: %w`, err)
+ }
+
+ xbuf := ecutil.AllocECPointBuffer(key.X, key.Curve)
+ ybuf := ecutil.AllocECPointBuffer(key.Y, key.Curve)
+ defer ecutil.ReleaseECPointBuffer(xbuf)
+ defer ecutil.ReleaseECPointBuffer(ybuf)
+
+ return ecdsaThumbprint(
+ hash,
+ key.Curve.Params().Name,
+ base64.EncodeToString(xbuf),
+ base64.EncodeToString(ybuf),
+ ), nil
+}
+
+// Thumbprint returns the JWK thumbprint using the indicated
+// hashing algorithm, according to RFC 7638
+func (k ecdsaPrivateKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
+ k.mu.RLock()
+ defer k.mu.RUnlock()
+
+ var key ecdsa.PrivateKey
+ if err := Export(&k, &key); err != nil {
+ return nil, fmt.Errorf(`failed to export ecdsa.PrivateKey for thumbprint generation: %w`, err)
+ }
+
+ xbuf := ecutil.AllocECPointBuffer(key.X, key.Curve)
+ ybuf := ecutil.AllocECPointBuffer(key.Y, key.Curve)
+ defer ecutil.ReleaseECPointBuffer(xbuf)
+ defer ecutil.ReleaseECPointBuffer(ybuf)
+
+ return ecdsaThumbprint(
+ hash,
+ key.Curve.Params().Name,
+ base64.EncodeToString(xbuf),
+ base64.EncodeToString(ybuf),
+ ), nil
+}
+
+func ecdsaValidateKey(k interface {
+ Crv() (jwa.EllipticCurveAlgorithm, bool)
+ X() ([]byte, bool)
+ Y() ([]byte, bool)
+}, checkPrivate bool) error {
+ crvtyp, ok := k.Crv()
+ if !ok {
+ return fmt.Errorf(`missing "crv" field`)
+ }
+
+ crv, err := ourecdsa.CurveFromAlgorithm(crvtyp)
+ if err != nil {
+ return fmt.Errorf(`invalid curve algorithm %q: %w`, crvtyp, err)
+ }
+
+ keySize := ecutil.CalculateKeySize(crv)
+ if x, ok := k.X(); !ok || len(x) != keySize {
+ return fmt.Errorf(`invalid "x" length (%d) for curve %q`, len(x), crv.Params().Name)
+ }
+
+ if y, ok := k.Y(); !ok || len(y) != keySize {
+ return fmt.Errorf(`invalid "y" length (%d) for curve %q`, len(y), crv.Params().Name)
+ }
+
+ if checkPrivate {
+ if priv, ok := k.(keyWithD); ok {
+ if d, ok := priv.D(); !ok || len(d) != keySize {
+ return fmt.Errorf(`invalid "d" length (%d) for curve %q`, len(d), crv.Params().Name)
+ }
+ } else {
+ return fmt.Errorf(`missing "d" value`)
+ }
+ }
+ return nil
+}
+
+func (k *ecdsaPrivateKey) Validate() error {
+ if err := ecdsaValidateKey(k, true); err != nil {
+ return NewKeyValidationError(fmt.Errorf(`jwk.ECDSAPrivateKey: %w`, err))
+ }
+ return nil
+}
+
+func (k *ecdsaPublicKey) Validate() error {
+ if err := ecdsaValidateKey(k, false); err != nil {
+ return NewKeyValidationError(fmt.Errorf(`jwk.ECDSAPublicKey: %w`, err))
+ }
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa/BUILD.bazel
new file mode 100644
index 0000000000..bf058aa649
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa/BUILD.bazel
@@ -0,0 +1,15 @@
+load("@rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "ecdsa",
+ srcs = ["ecdsa.go"],
+ importpath = "github.com/lestrrat-go/jwx/v3/jwk/ecdsa",
+ visibility = ["//visibility:public"],
+ deps = ["//jwa"],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":ecdsa",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa/ecdsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa/ecdsa.go
new file mode 100644
index 0000000000..3392483218
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa/ecdsa.go
@@ -0,0 +1,76 @@
+package ecdsa
+
+import (
+ "crypto/elliptic"
+ "fmt"
+ "sync"
+
+ "github.com/lestrrat-go/jwx/v3/jwa"
+)
+
+var muCurves sync.RWMutex
+var algToCurveMap map[jwa.EllipticCurveAlgorithm]elliptic.Curve
+var curveToAlgMap map[elliptic.Curve]jwa.EllipticCurveAlgorithm
+var algList []jwa.EllipticCurveAlgorithm
+
+func init() {
+ muCurves.Lock()
+ algToCurveMap = make(map[jwa.EllipticCurveAlgorithm]elliptic.Curve)
+ curveToAlgMap = make(map[elliptic.Curve]jwa.EllipticCurveAlgorithm)
+ muCurves.Unlock()
+}
+
+// RegisterCurve registers a jwa.EllipticCurveAlgorithm constant and its
+// corresponding elliptic.Curve object. Users do not need to call this unless
+// they are registering a new ECDSA key type
+func RegisterCurve(alg jwa.EllipticCurveAlgorithm, crv elliptic.Curve) {
+ muCurves.Lock()
+ defer muCurves.Unlock()
+
+ algToCurveMap[alg] = crv
+ curveToAlgMap[crv] = alg
+ rebuildCurves()
+}
+
+func rebuildCurves() {
+ l := len(algToCurveMap)
+ if cap(algList) < l {
+ algList = make([]jwa.EllipticCurveAlgorithm, 0, l)
+ } else {
+ algList = algList[:0]
+ }
+
+ for alg := range algToCurveMap {
+ algList = append(algList, alg)
+ }
+}
+
+// Algorithms returns the list of registered jwa.EllipticCurveAlgorithms
+// that ca be used for ECDSA keys.
+func Algorithms() []jwa.EllipticCurveAlgorithm {
+ muCurves.RLock()
+ defer muCurves.RUnlock()
+
+ return algList
+}
+
+func AlgorithmFromCurve(crv elliptic.Curve) (jwa.EllipticCurveAlgorithm, error) {
+ alg, ok := curveToAlgMap[crv]
+ if !ok {
+ return jwa.InvalidEllipticCurve(), fmt.Errorf(`unknown elliptic curve: %q`, crv)
+ }
+ return alg, nil
+}
+
+func CurveFromAlgorithm(alg jwa.EllipticCurveAlgorithm) (elliptic.Curve, error) {
+ crv, ok := algToCurveMap[alg]
+ if !ok {
+ return nil, fmt.Errorf(`unknown elliptic curve algorithm: %q`, alg)
+ }
+ return crv, nil
+}
+
+func IsCurveAvailable(alg jwa.EllipticCurveAlgorithm) bool {
+ _, ok := algToCurveMap[alg]
+ return ok
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa_gen.go
new file mode 100644
index 0000000000..39536de3d8
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa_gen.go
@@ -0,0 +1,1432 @@
+// Code generated by tools/cmd/genjwk/main.go. DO NOT EDIT.
+
+package jwk
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "sync"
+
+ "github.com/lestrrat-go/blackmagic"
+ "github.com/lestrrat-go/jwx/v3/cert"
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+)
+
+const (
+ ECDSACrvKey = "crv"
+ ECDSADKey = "d"
+ ECDSAXKey = "x"
+ ECDSAYKey = "y"
+)
+
+type ECDSAPublicKey interface {
+ Key
+ Crv() (jwa.EllipticCurveAlgorithm, bool)
+ X() ([]byte, bool)
+ Y() ([]byte, bool)
+}
+
+type ecdsaPublicKey struct {
+ algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4
+ crv *jwa.EllipticCurveAlgorithm
+ keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4
+ keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3
+ keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2
+ x []byte
+ x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6
+ x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7
+ x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8
+ x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5
+ y []byte
+ privateParams map[string]any
+ mu *sync.RWMutex
+ dc json.DecodeCtx
+}
+
+var _ ECDSAPublicKey = &ecdsaPublicKey{}
+var _ Key = &ecdsaPublicKey{}
+
+func newECDSAPublicKey() *ecdsaPublicKey {
+ return &ecdsaPublicKey{
+ mu: &sync.RWMutex{},
+ privateParams: make(map[string]any),
+ }
+}
+
+func (h ecdsaPublicKey) KeyType() jwa.KeyType {
+ return jwa.EC()
+}
+
+func (h ecdsaPublicKey) rlock() {
+ h.mu.RLock()
+}
+
+func (h ecdsaPublicKey) runlock() {
+ h.mu.RUnlock()
+}
+
+func (h ecdsaPublicKey) IsPrivate() bool {
+ return false
+}
+
+func (h *ecdsaPublicKey) Algorithm() (jwa.KeyAlgorithm, bool) {
+ if h.algorithm != nil {
+ return *(h.algorithm), true
+ }
+ return nil, false
+}
+
+func (h *ecdsaPublicKey) Crv() (jwa.EllipticCurveAlgorithm, bool) {
+ if h.crv != nil {
+ return *(h.crv), true
+ }
+ return jwa.InvalidEllipticCurve(), false
+}
+
+func (h *ecdsaPublicKey) KeyID() (string, bool) {
+ if h.keyID != nil {
+ return *(h.keyID), true
+ }
+ return "", false
+}
+
+func (h *ecdsaPublicKey) KeyOps() (KeyOperationList, bool) {
+ if h.keyOps != nil {
+ return *(h.keyOps), true
+ }
+ return nil, false
+}
+
+func (h *ecdsaPublicKey) KeyUsage() (string, bool) {
+ if h.keyUsage != nil {
+ return *(h.keyUsage), true
+ }
+ return "", false
+}
+
+func (h *ecdsaPublicKey) X() ([]byte, bool) {
+ if h.x != nil {
+ return h.x, true
+ }
+ return nil, false
+}
+
+func (h *ecdsaPublicKey) X509CertChain() (*cert.Chain, bool) {
+ return h.x509CertChain, true
+}
+
+func (h *ecdsaPublicKey) X509CertThumbprint() (string, bool) {
+ if h.x509CertThumbprint != nil {
+ return *(h.x509CertThumbprint), true
+ }
+ return "", false
+}
+
+func (h *ecdsaPublicKey) X509CertThumbprintS256() (string, bool) {
+ if h.x509CertThumbprintS256 != nil {
+ return *(h.x509CertThumbprintS256), true
+ }
+ return "", false
+}
+
+func (h *ecdsaPublicKey) X509URL() (string, bool) {
+ if h.x509URL != nil {
+ return *(h.x509URL), true
+ }
+ return "", false
+}
+
+func (h *ecdsaPublicKey) Y() ([]byte, bool) {
+ if h.y != nil {
+ return h.y, true
+ }
+ return nil, false
+}
+
+func (h *ecdsaPublicKey) Has(name string) bool {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ switch name {
+ case KeyTypeKey:
+ return true
+ case AlgorithmKey:
+ return h.algorithm != nil
+ case ECDSACrvKey:
+ return h.crv != nil
+ case KeyIDKey:
+ return h.keyID != nil
+ case KeyOpsKey:
+ return h.keyOps != nil
+ case KeyUsageKey:
+ return h.keyUsage != nil
+ case ECDSAXKey:
+ return h.x != nil
+ case X509CertChainKey:
+ return h.x509CertChain != nil
+ case X509CertThumbprintKey:
+ return h.x509CertThumbprint != nil
+ case X509CertThumbprintS256Key:
+ return h.x509CertThumbprintS256 != nil
+ case X509URLKey:
+ return h.x509URL != nil
+ case ECDSAYKey:
+ return h.y != nil
+ default:
+ _, ok := h.privateParams[name]
+ return ok
+ }
+}
+
+func (h *ecdsaPublicKey) Get(name string, dst any) error {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ switch name {
+ case KeyTypeKey:
+ if err := blackmagic.AssignIfCompatible(dst, h.KeyType()); err != nil {
+ return fmt.Errorf(`ecdsaPublicKey.Get: failed to assign value for field %q to destination object: %w`, name, err)
+ }
+ case AlgorithmKey:
+ if h.algorithm == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.algorithm)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case ECDSACrvKey:
+ if h.crv == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.crv)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyIDKey:
+ if h.keyID == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyID)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyOpsKey:
+ if h.keyOps == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyOps)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyUsageKey:
+ if h.keyUsage == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyUsage)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case ECDSAXKey:
+ if h.x == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.x); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertChainKey:
+ if h.x509CertChain == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.x509CertChain); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertThumbprintKey:
+ if h.x509CertThumbprint == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprint)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertThumbprintS256Key:
+ if h.x509CertThumbprintS256 == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprintS256)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509URLKey:
+ if h.x509URL == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509URL)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case ECDSAYKey:
+ if h.y == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.y); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ default:
+ v, ok := h.privateParams[name]
+ if !ok {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, v); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ }
+ return nil
+}
+
+func (h *ecdsaPublicKey) Set(name string, value any) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ return h.setNoLock(name, value)
+}
+
+func (h *ecdsaPublicKey) setNoLock(name string, value any) error {
+ switch name {
+ case "kty":
+ return nil
+ case AlgorithmKey:
+ switch v := value.(type) {
+ case string, jwa.SignatureAlgorithm, jwa.KeyEncryptionAlgorithm, jwa.ContentEncryptionAlgorithm:
+ tmp, err := jwa.KeyAlgorithmFrom(v)
+ if err != nil {
+ return fmt.Errorf(`invalid algorithm for %q key: %w`, AlgorithmKey, err)
+ }
+ h.algorithm = &tmp
+ default:
+ return fmt.Errorf(`invalid type for %q key: %T`, AlgorithmKey, value)
+ }
+ return nil
+ case ECDSACrvKey:
+ if v, ok := value.(jwa.EllipticCurveAlgorithm); ok {
+ h.crv = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, ECDSACrvKey, value)
+ case KeyIDKey:
+ if v, ok := value.(string); ok {
+ h.keyID = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value)
+ case KeyOpsKey:
+ var acceptor KeyOperationList
+ if err := acceptor.Accept(value); err != nil {
+ return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err)
+ }
+ h.keyOps = &acceptor
+ return nil
+ case KeyUsageKey:
+ switch v := value.(type) {
+ case KeyUsageType:
+ switch v {
+ case ForSignature, ForEncryption:
+ tmp := v.String()
+ h.keyUsage = &tmp
+ default:
+ return fmt.Errorf(`invalid key usage type %s`, v)
+ }
+ case string:
+ h.keyUsage = &v
+ default:
+ return fmt.Errorf(`invalid key usage type %s`, v)
+ }
+ case ECDSAXKey:
+ if v, ok := value.([]byte); ok {
+ h.x = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, ECDSAXKey, value)
+ case X509CertChainKey:
+ if v, ok := value.(*cert.Chain); ok {
+ h.x509CertChain = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value)
+ case X509CertThumbprintKey:
+ if v, ok := value.(string); ok {
+ h.x509CertThumbprint = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value)
+ case X509CertThumbprintS256Key:
+ if v, ok := value.(string); ok {
+ h.x509CertThumbprintS256 = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value)
+ case X509URLKey:
+ if v, ok := value.(string); ok {
+ h.x509URL = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value)
+ case ECDSAYKey:
+ if v, ok := value.([]byte); ok {
+ h.y = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, ECDSAYKey, value)
+ default:
+ if h.privateParams == nil {
+ h.privateParams = map[string]any{}
+ }
+ h.privateParams[name] = value
+ }
+ return nil
+}
+
+func (k *ecdsaPublicKey) Remove(key string) error {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+ switch key {
+ case AlgorithmKey:
+ k.algorithm = nil
+ case ECDSACrvKey:
+ k.crv = nil
+ case KeyIDKey:
+ k.keyID = nil
+ case KeyOpsKey:
+ k.keyOps = nil
+ case KeyUsageKey:
+ k.keyUsage = nil
+ case ECDSAXKey:
+ k.x = nil
+ case X509CertChainKey:
+ k.x509CertChain = nil
+ case X509CertThumbprintKey:
+ k.x509CertThumbprint = nil
+ case X509CertThumbprintS256Key:
+ k.x509CertThumbprintS256 = nil
+ case X509URLKey:
+ k.x509URL = nil
+ case ECDSAYKey:
+ k.y = nil
+ default:
+ delete(k.privateParams, key)
+ }
+ return nil
+}
+
+func (k *ecdsaPublicKey) Clone() (Key, error) {
+ key, err := cloneKey(k)
+ if err != nil {
+ return nil, fmt.Errorf(`ecdsaPublicKey.Clone: %w`, err)
+ }
+ return key, nil
+}
+
+func (k *ecdsaPublicKey) DecodeCtx() json.DecodeCtx {
+ k.mu.RLock()
+ defer k.mu.RUnlock()
+ return k.dc
+}
+
+func (k *ecdsaPublicKey) SetDecodeCtx(dc json.DecodeCtx) {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+ k.dc = dc
+}
+
+func (h *ecdsaPublicKey) UnmarshalJSON(buf []byte) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ h.algorithm = nil
+ h.crv = nil
+ h.keyID = nil
+ h.keyOps = nil
+ h.keyUsage = nil
+ h.x = nil
+ h.x509CertChain = nil
+ h.x509CertThumbprint = nil
+ h.x509CertThumbprintS256 = nil
+ h.x509URL = nil
+ h.y = nil
+ dec := json.NewDecoder(bytes.NewReader(buf))
+LOOP:
+ for {
+ tok, err := dec.Token()
+ if err != nil {
+ return fmt.Errorf(`error reading token: %w`, err)
+ }
+ switch tok := tok.(type) {
+ case json.Delim:
+ // Assuming we're doing everything correctly, we should ONLY
+ // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here.
+ if tok == tokens.CloseCurlyBracket { // End of object
+ break LOOP
+ } else if tok != tokens.OpenCurlyBracket {
+ return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok)
+ }
+ case string: // Objects can only have string keys
+ switch tok {
+ case KeyTypeKey:
+ val, err := json.ReadNextStringToken(dec)
+ if err != nil {
+ return fmt.Errorf(`error reading token: %w`, err)
+ }
+ if val != jwa.EC().String() {
+ return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val)
+ }
+ case AlgorithmKey:
+ var s string
+ if err := dec.Decode(&s); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+ }
+ alg, err := jwa.KeyAlgorithmFrom(s)
+ if err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+ }
+ h.algorithm = &alg
+ case ECDSACrvKey:
+ var decoded jwa.EllipticCurveAlgorithm
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSACrvKey, err)
+ }
+ h.crv = &decoded
+ case KeyIDKey:
+ if err := json.AssignNextStringToken(&h.keyID, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err)
+ }
+ case KeyOpsKey:
+ var decoded KeyOperationList
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err)
+ }
+ h.keyOps = &decoded
+ case KeyUsageKey:
+ if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err)
+ }
+ case ECDSAXKey:
+ if err := json.AssignNextBytesToken(&h.x, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSAXKey, err)
+ }
+ case X509CertChainKey:
+ var decoded cert.Chain
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err)
+ }
+ h.x509CertChain = &decoded
+ case X509CertThumbprintKey:
+ if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err)
+ }
+ case X509CertThumbprintS256Key:
+ if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err)
+ }
+ case X509URLKey:
+ if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err)
+ }
+ case ECDSAYKey:
+ if err := json.AssignNextBytesToken(&h.y, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSAYKey, err)
+ }
+ default:
+ if dc := h.dc; dc != nil {
+ if localReg := dc.Registry(); localReg != nil {
+ decoded, err := localReg.Decode(dec, tok)
+ if err == nil {
+ h.setNoLock(tok, decoded)
+ continue
+ }
+ }
+ }
+ decoded, err := registry.Decode(dec, tok)
+ if err == nil {
+ h.setNoLock(tok, decoded)
+ continue
+ }
+ return fmt.Errorf(`could not decode field %s: %w`, tok, err)
+ }
+ default:
+ return fmt.Errorf(`invalid token %T`, tok)
+ }
+ }
+ if h.crv == nil {
+ return fmt.Errorf(`required field crv is missing`)
+ }
+ if h.x == nil {
+ return fmt.Errorf(`required field x is missing`)
+ }
+ if h.y == nil {
+ return fmt.Errorf(`required field y is missing`)
+ }
+ return nil
+}
+
+func (h ecdsaPublicKey) MarshalJSON() ([]byte, error) {
+ data := make(map[string]any)
+ fields := make([]string, 0, 11)
+ data[KeyTypeKey] = jwa.EC()
+ fields = append(fields, KeyTypeKey)
+ if h.algorithm != nil {
+ data[AlgorithmKey] = *(h.algorithm)
+ fields = append(fields, AlgorithmKey)
+ }
+ if h.crv != nil {
+ data[ECDSACrvKey] = *(h.crv)
+ fields = append(fields, ECDSACrvKey)
+ }
+ if h.keyID != nil {
+ data[KeyIDKey] = *(h.keyID)
+ fields = append(fields, KeyIDKey)
+ }
+ if h.keyOps != nil {
+ data[KeyOpsKey] = *(h.keyOps)
+ fields = append(fields, KeyOpsKey)
+ }
+ if h.keyUsage != nil {
+ data[KeyUsageKey] = *(h.keyUsage)
+ fields = append(fields, KeyUsageKey)
+ }
+ if h.x != nil {
+ data[ECDSAXKey] = h.x
+ fields = append(fields, ECDSAXKey)
+ }
+ if h.x509CertChain != nil {
+ data[X509CertChainKey] = h.x509CertChain
+ fields = append(fields, X509CertChainKey)
+ }
+ if h.x509CertThumbprint != nil {
+ data[X509CertThumbprintKey] = *(h.x509CertThumbprint)
+ fields = append(fields, X509CertThumbprintKey)
+ }
+ if h.x509CertThumbprintS256 != nil {
+ data[X509CertThumbprintS256Key] = *(h.x509CertThumbprintS256)
+ fields = append(fields, X509CertThumbprintS256Key)
+ }
+ if h.x509URL != nil {
+ data[X509URLKey] = *(h.x509URL)
+ fields = append(fields, X509URLKey)
+ }
+ if h.y != nil {
+ data[ECDSAYKey] = h.y
+ fields = append(fields, ECDSAYKey)
+ }
+ for k, v := range h.privateParams {
+ data[k] = v
+ fields = append(fields, k)
+ }
+
+ sort.Strings(fields)
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+ buf.WriteByte(tokens.OpenCurlyBracket)
+ enc := json.NewEncoder(buf)
+ for i, f := range fields {
+ if i > 0 {
+ buf.WriteRune(tokens.Comma)
+ }
+ buf.WriteRune(tokens.DoubleQuote)
+ buf.WriteString(f)
+ buf.WriteString(`":`)
+ v := data[f]
+ switch v := v.(type) {
+ case []byte:
+ buf.WriteRune(tokens.DoubleQuote)
+ buf.WriteString(base64.EncodeToString(v))
+ buf.WriteRune(tokens.DoubleQuote)
+ default:
+ if err := enc.Encode(v); err != nil {
+ return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err)
+ }
+ buf.Truncate(buf.Len() - 1)
+ }
+ }
+ buf.WriteByte(tokens.CloseCurlyBracket)
+ ret := make([]byte, buf.Len())
+ copy(ret, buf.Bytes())
+ return ret, nil
+}
+
+func (h *ecdsaPublicKey) Keys() []string {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ keys := make([]string, 0, 11+len(h.privateParams))
+ keys = append(keys, KeyTypeKey)
+ if h.algorithm != nil {
+ keys = append(keys, AlgorithmKey)
+ }
+ if h.crv != nil {
+ keys = append(keys, ECDSACrvKey)
+ }
+ if h.keyID != nil {
+ keys = append(keys, KeyIDKey)
+ }
+ if h.keyOps != nil {
+ keys = append(keys, KeyOpsKey)
+ }
+ if h.keyUsage != nil {
+ keys = append(keys, KeyUsageKey)
+ }
+ if h.x != nil {
+ keys = append(keys, ECDSAXKey)
+ }
+ if h.x509CertChain != nil {
+ keys = append(keys, X509CertChainKey)
+ }
+ if h.x509CertThumbprint != nil {
+ keys = append(keys, X509CertThumbprintKey)
+ }
+ if h.x509CertThumbprintS256 != nil {
+ keys = append(keys, X509CertThumbprintS256Key)
+ }
+ if h.x509URL != nil {
+ keys = append(keys, X509URLKey)
+ }
+ if h.y != nil {
+ keys = append(keys, ECDSAYKey)
+ }
+ for k := range h.privateParams {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+type ECDSAPrivateKey interface {
+ Key
+ Crv() (jwa.EllipticCurveAlgorithm, bool)
+ D() ([]byte, bool)
+ X() ([]byte, bool)
+ Y() ([]byte, bool)
+}
+
+type ecdsaPrivateKey struct {
+ algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4
+ crv *jwa.EllipticCurveAlgorithm
+ d []byte
+ keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4
+ keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3
+ keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2
+ x []byte
+ x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6
+ x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7
+ x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8
+ x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5
+ y []byte
+ privateParams map[string]any
+ mu *sync.RWMutex
+ dc json.DecodeCtx
+}
+
+var _ ECDSAPrivateKey = &ecdsaPrivateKey{}
+var _ Key = &ecdsaPrivateKey{}
+
+func newECDSAPrivateKey() *ecdsaPrivateKey {
+ return &ecdsaPrivateKey{
+ mu: &sync.RWMutex{},
+ privateParams: make(map[string]any),
+ }
+}
+
+func (h ecdsaPrivateKey) KeyType() jwa.KeyType {
+ return jwa.EC()
+}
+
+func (h ecdsaPrivateKey) rlock() {
+ h.mu.RLock()
+}
+
+func (h ecdsaPrivateKey) runlock() {
+ h.mu.RUnlock()
+}
+
+func (h ecdsaPrivateKey) IsPrivate() bool {
+ return true
+}
+
+func (h *ecdsaPrivateKey) Algorithm() (jwa.KeyAlgorithm, bool) {
+ if h.algorithm != nil {
+ return *(h.algorithm), true
+ }
+ return nil, false
+}
+
+func (h *ecdsaPrivateKey) Crv() (jwa.EllipticCurveAlgorithm, bool) {
+ if h.crv != nil {
+ return *(h.crv), true
+ }
+ return jwa.InvalidEllipticCurve(), false
+}
+
+func (h *ecdsaPrivateKey) D() ([]byte, bool) {
+ if h.d != nil {
+ return h.d, true
+ }
+ return nil, false
+}
+
+func (h *ecdsaPrivateKey) KeyID() (string, bool) {
+ if h.keyID != nil {
+ return *(h.keyID), true
+ }
+ return "", false
+}
+
+func (h *ecdsaPrivateKey) KeyOps() (KeyOperationList, bool) {
+ if h.keyOps != nil {
+ return *(h.keyOps), true
+ }
+ return nil, false
+}
+
+func (h *ecdsaPrivateKey) KeyUsage() (string, bool) {
+ if h.keyUsage != nil {
+ return *(h.keyUsage), true
+ }
+ return "", false
+}
+
+func (h *ecdsaPrivateKey) X() ([]byte, bool) {
+ if h.x != nil {
+ return h.x, true
+ }
+ return nil, false
+}
+
+func (h *ecdsaPrivateKey) X509CertChain() (*cert.Chain, bool) {
+ return h.x509CertChain, true
+}
+
+func (h *ecdsaPrivateKey) X509CertThumbprint() (string, bool) {
+ if h.x509CertThumbprint != nil {
+ return *(h.x509CertThumbprint), true
+ }
+ return "", false
+}
+
+func (h *ecdsaPrivateKey) X509CertThumbprintS256() (string, bool) {
+ if h.x509CertThumbprintS256 != nil {
+ return *(h.x509CertThumbprintS256), true
+ }
+ return "", false
+}
+
+func (h *ecdsaPrivateKey) X509URL() (string, bool) {
+ if h.x509URL != nil {
+ return *(h.x509URL), true
+ }
+ return "", false
+}
+
+func (h *ecdsaPrivateKey) Y() ([]byte, bool) {
+ if h.y != nil {
+ return h.y, true
+ }
+ return nil, false
+}
+
+func (h *ecdsaPrivateKey) Has(name string) bool {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ switch name {
+ case KeyTypeKey:
+ return true
+ case AlgorithmKey:
+ return h.algorithm != nil
+ case ECDSACrvKey:
+ return h.crv != nil
+ case ECDSADKey:
+ return h.d != nil
+ case KeyIDKey:
+ return h.keyID != nil
+ case KeyOpsKey:
+ return h.keyOps != nil
+ case KeyUsageKey:
+ return h.keyUsage != nil
+ case ECDSAXKey:
+ return h.x != nil
+ case X509CertChainKey:
+ return h.x509CertChain != nil
+ case X509CertThumbprintKey:
+ return h.x509CertThumbprint != nil
+ case X509CertThumbprintS256Key:
+ return h.x509CertThumbprintS256 != nil
+ case X509URLKey:
+ return h.x509URL != nil
+ case ECDSAYKey:
+ return h.y != nil
+ default:
+ _, ok := h.privateParams[name]
+ return ok
+ }
+}
+
+func (h *ecdsaPrivateKey) Get(name string, dst any) error {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ switch name {
+ case KeyTypeKey:
+ if err := blackmagic.AssignIfCompatible(dst, h.KeyType()); err != nil {
+ return fmt.Errorf(`ecdsaPrivateKey.Get: failed to assign value for field %q to destination object: %w`, name, err)
+ }
+ case AlgorithmKey:
+ if h.algorithm == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.algorithm)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case ECDSACrvKey:
+ if h.crv == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.crv)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case ECDSADKey:
+ if h.d == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.d); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyIDKey:
+ if h.keyID == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyID)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyOpsKey:
+ if h.keyOps == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyOps)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyUsageKey:
+ if h.keyUsage == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyUsage)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case ECDSAXKey:
+ if h.x == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.x); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertChainKey:
+ if h.x509CertChain == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.x509CertChain); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertThumbprintKey:
+ if h.x509CertThumbprint == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprint)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertThumbprintS256Key:
+ if h.x509CertThumbprintS256 == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprintS256)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509URLKey:
+ if h.x509URL == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509URL)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case ECDSAYKey:
+ if h.y == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.y); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ default:
+ v, ok := h.privateParams[name]
+ if !ok {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, v); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ }
+ return nil
+}
+
+func (h *ecdsaPrivateKey) Set(name string, value any) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ return h.setNoLock(name, value)
+}
+
+func (h *ecdsaPrivateKey) setNoLock(name string, value any) error {
+ switch name {
+ case "kty":
+ return nil
+ case AlgorithmKey:
+ switch v := value.(type) {
+ case string, jwa.SignatureAlgorithm, jwa.KeyEncryptionAlgorithm, jwa.ContentEncryptionAlgorithm:
+ tmp, err := jwa.KeyAlgorithmFrom(v)
+ if err != nil {
+ return fmt.Errorf(`invalid algorithm for %q key: %w`, AlgorithmKey, err)
+ }
+ h.algorithm = &tmp
+ default:
+ return fmt.Errorf(`invalid type for %q key: %T`, AlgorithmKey, value)
+ }
+ return nil
+ case ECDSACrvKey:
+ if v, ok := value.(jwa.EllipticCurveAlgorithm); ok {
+ h.crv = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, ECDSACrvKey, value)
+ case ECDSADKey:
+ if v, ok := value.([]byte); ok {
+ h.d = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, ECDSADKey, value)
+ case KeyIDKey:
+ if v, ok := value.(string); ok {
+ h.keyID = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value)
+ case KeyOpsKey:
+ var acceptor KeyOperationList
+ if err := acceptor.Accept(value); err != nil {
+ return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err)
+ }
+ h.keyOps = &acceptor
+ return nil
+ case KeyUsageKey:
+ switch v := value.(type) {
+ case KeyUsageType:
+ switch v {
+ case ForSignature, ForEncryption:
+ tmp := v.String()
+ h.keyUsage = &tmp
+ default:
+ return fmt.Errorf(`invalid key usage type %s`, v)
+ }
+ case string:
+ h.keyUsage = &v
+ default:
+ return fmt.Errorf(`invalid key usage type %s`, v)
+ }
+ case ECDSAXKey:
+ if v, ok := value.([]byte); ok {
+ h.x = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, ECDSAXKey, value)
+ case X509CertChainKey:
+ if v, ok := value.(*cert.Chain); ok {
+ h.x509CertChain = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value)
+ case X509CertThumbprintKey:
+ if v, ok := value.(string); ok {
+ h.x509CertThumbprint = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value)
+ case X509CertThumbprintS256Key:
+ if v, ok := value.(string); ok {
+ h.x509CertThumbprintS256 = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value)
+ case X509URLKey:
+ if v, ok := value.(string); ok {
+ h.x509URL = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value)
+ case ECDSAYKey:
+ if v, ok := value.([]byte); ok {
+ h.y = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, ECDSAYKey, value)
+ default:
+ if h.privateParams == nil {
+ h.privateParams = map[string]any{}
+ }
+ h.privateParams[name] = value
+ }
+ return nil
+}
+
+func (k *ecdsaPrivateKey) Remove(key string) error {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+ switch key {
+ case AlgorithmKey:
+ k.algorithm = nil
+ case ECDSACrvKey:
+ k.crv = nil
+ case ECDSADKey:
+ k.d = nil
+ case KeyIDKey:
+ k.keyID = nil
+ case KeyOpsKey:
+ k.keyOps = nil
+ case KeyUsageKey:
+ k.keyUsage = nil
+ case ECDSAXKey:
+ k.x = nil
+ case X509CertChainKey:
+ k.x509CertChain = nil
+ case X509CertThumbprintKey:
+ k.x509CertThumbprint = nil
+ case X509CertThumbprintS256Key:
+ k.x509CertThumbprintS256 = nil
+ case X509URLKey:
+ k.x509URL = nil
+ case ECDSAYKey:
+ k.y = nil
+ default:
+ delete(k.privateParams, key)
+ }
+ return nil
+}
+
+func (k *ecdsaPrivateKey) Clone() (Key, error) {
+ key, err := cloneKey(k)
+ if err != nil {
+ return nil, fmt.Errorf(`ecdsaPrivateKey.Clone: %w`, err)
+ }
+ return key, nil
+}
+
+func (k *ecdsaPrivateKey) DecodeCtx() json.DecodeCtx {
+ k.mu.RLock()
+ defer k.mu.RUnlock()
+ return k.dc
+}
+
+func (k *ecdsaPrivateKey) SetDecodeCtx(dc json.DecodeCtx) {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+ k.dc = dc
+}
+
+func (h *ecdsaPrivateKey) UnmarshalJSON(buf []byte) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ h.algorithm = nil
+ h.crv = nil
+ h.d = nil
+ h.keyID = nil
+ h.keyOps = nil
+ h.keyUsage = nil
+ h.x = nil
+ h.x509CertChain = nil
+ h.x509CertThumbprint = nil
+ h.x509CertThumbprintS256 = nil
+ h.x509URL = nil
+ h.y = nil
+ dec := json.NewDecoder(bytes.NewReader(buf))
+LOOP:
+ for {
+ tok, err := dec.Token()
+ if err != nil {
+ return fmt.Errorf(`error reading token: %w`, err)
+ }
+ switch tok := tok.(type) {
+ case json.Delim:
+ // Assuming we're doing everything correctly, we should ONLY
+ // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here.
+ if tok == tokens.CloseCurlyBracket { // End of object
+ break LOOP
+ } else if tok != tokens.OpenCurlyBracket {
+ return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok)
+ }
+ case string: // Objects can only have string keys
+ switch tok {
+ case KeyTypeKey:
+ val, err := json.ReadNextStringToken(dec)
+ if err != nil {
+ return fmt.Errorf(`error reading token: %w`, err)
+ }
+ if val != jwa.EC().String() {
+ return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val)
+ }
+ case AlgorithmKey:
+ var s string
+ if err := dec.Decode(&s); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+ }
+ alg, err := jwa.KeyAlgorithmFrom(s)
+ if err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+ }
+ h.algorithm = &alg
+ case ECDSACrvKey:
+ var decoded jwa.EllipticCurveAlgorithm
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSACrvKey, err)
+ }
+ h.crv = &decoded
+ case ECDSADKey:
+ if err := json.AssignNextBytesToken(&h.d, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSADKey, err)
+ }
+ case KeyIDKey:
+ if err := json.AssignNextStringToken(&h.keyID, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err)
+ }
+ case KeyOpsKey:
+ var decoded KeyOperationList
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err)
+ }
+ h.keyOps = &decoded
+ case KeyUsageKey:
+ if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err)
+ }
+ case ECDSAXKey:
+ if err := json.AssignNextBytesToken(&h.x, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSAXKey, err)
+ }
+ case X509CertChainKey:
+ var decoded cert.Chain
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err)
+ }
+ h.x509CertChain = &decoded
+ case X509CertThumbprintKey:
+ if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err)
+ }
+ case X509CertThumbprintS256Key:
+ if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err)
+ }
+ case X509URLKey:
+ if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err)
+ }
+ case ECDSAYKey:
+ if err := json.AssignNextBytesToken(&h.y, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSAYKey, err)
+ }
+ default:
+ if dc := h.dc; dc != nil {
+ if localReg := dc.Registry(); localReg != nil {
+ decoded, err := localReg.Decode(dec, tok)
+ if err == nil {
+ h.setNoLock(tok, decoded)
+ continue
+ }
+ }
+ }
+ decoded, err := registry.Decode(dec, tok)
+ if err == nil {
+ h.setNoLock(tok, decoded)
+ continue
+ }
+ return fmt.Errorf(`could not decode field %s: %w`, tok, err)
+ }
+ default:
+ return fmt.Errorf(`invalid token %T`, tok)
+ }
+ }
+ if h.crv == nil {
+ return fmt.Errorf(`required field crv is missing`)
+ }
+ if h.d == nil {
+ return fmt.Errorf(`required field d is missing`)
+ }
+ if h.x == nil {
+ return fmt.Errorf(`required field x is missing`)
+ }
+ if h.y == nil {
+ return fmt.Errorf(`required field y is missing`)
+ }
+ return nil
+}
+
+func (h ecdsaPrivateKey) MarshalJSON() ([]byte, error) {
+ data := make(map[string]any)
+ fields := make([]string, 0, 12)
+ data[KeyTypeKey] = jwa.EC()
+ fields = append(fields, KeyTypeKey)
+ if h.algorithm != nil {
+ data[AlgorithmKey] = *(h.algorithm)
+ fields = append(fields, AlgorithmKey)
+ }
+ if h.crv != nil {
+ data[ECDSACrvKey] = *(h.crv)
+ fields = append(fields, ECDSACrvKey)
+ }
+ if h.d != nil {
+ data[ECDSADKey] = h.d
+ fields = append(fields, ECDSADKey)
+ }
+ if h.keyID != nil {
+ data[KeyIDKey] = *(h.keyID)
+ fields = append(fields, KeyIDKey)
+ }
+ if h.keyOps != nil {
+ data[KeyOpsKey] = *(h.keyOps)
+ fields = append(fields, KeyOpsKey)
+ }
+ if h.keyUsage != nil {
+ data[KeyUsageKey] = *(h.keyUsage)
+ fields = append(fields, KeyUsageKey)
+ }
+ if h.x != nil {
+ data[ECDSAXKey] = h.x
+ fields = append(fields, ECDSAXKey)
+ }
+ if h.x509CertChain != nil {
+ data[X509CertChainKey] = h.x509CertChain
+ fields = append(fields, X509CertChainKey)
+ }
+ if h.x509CertThumbprint != nil {
+ data[X509CertThumbprintKey] = *(h.x509CertThumbprint)
+ fields = append(fields, X509CertThumbprintKey)
+ }
+ if h.x509CertThumbprintS256 != nil {
+ data[X509CertThumbprintS256Key] = *(h.x509CertThumbprintS256)
+ fields = append(fields, X509CertThumbprintS256Key)
+ }
+ if h.x509URL != nil {
+ data[X509URLKey] = *(h.x509URL)
+ fields = append(fields, X509URLKey)
+ }
+ if h.y != nil {
+ data[ECDSAYKey] = h.y
+ fields = append(fields, ECDSAYKey)
+ }
+ for k, v := range h.privateParams {
+ data[k] = v
+ fields = append(fields, k)
+ }
+
+ sort.Strings(fields)
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+ buf.WriteByte(tokens.OpenCurlyBracket)
+ enc := json.NewEncoder(buf)
+ for i, f := range fields {
+ if i > 0 {
+ buf.WriteRune(tokens.Comma)
+ }
+ buf.WriteRune(tokens.DoubleQuote)
+ buf.WriteString(f)
+ buf.WriteString(`":`)
+ v := data[f]
+ switch v := v.(type) {
+ case []byte:
+ buf.WriteRune(tokens.DoubleQuote)
+ buf.WriteString(base64.EncodeToString(v))
+ buf.WriteRune(tokens.DoubleQuote)
+ default:
+ if err := enc.Encode(v); err != nil {
+ return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err)
+ }
+ buf.Truncate(buf.Len() - 1)
+ }
+ }
+ buf.WriteByte(tokens.CloseCurlyBracket)
+ ret := make([]byte, buf.Len())
+ copy(ret, buf.Bytes())
+ return ret, nil
+}
+
+func (h *ecdsaPrivateKey) Keys() []string {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ keys := make([]string, 0, 12+len(h.privateParams))
+ keys = append(keys, KeyTypeKey)
+ if h.algorithm != nil {
+ keys = append(keys, AlgorithmKey)
+ }
+ if h.crv != nil {
+ keys = append(keys, ECDSACrvKey)
+ }
+ if h.d != nil {
+ keys = append(keys, ECDSADKey)
+ }
+ if h.keyID != nil {
+ keys = append(keys, KeyIDKey)
+ }
+ if h.keyOps != nil {
+ keys = append(keys, KeyOpsKey)
+ }
+ if h.keyUsage != nil {
+ keys = append(keys, KeyUsageKey)
+ }
+ if h.x != nil {
+ keys = append(keys, ECDSAXKey)
+ }
+ if h.x509CertChain != nil {
+ keys = append(keys, X509CertChainKey)
+ }
+ if h.x509CertThumbprint != nil {
+ keys = append(keys, X509CertThumbprintKey)
+ }
+ if h.x509CertThumbprintS256 != nil {
+ keys = append(keys, X509CertThumbprintS256Key)
+ }
+ if h.x509URL != nil {
+ keys = append(keys, X509URLKey)
+ }
+ if h.y != nil {
+ keys = append(keys, ECDSAYKey)
+ }
+ for k := range h.privateParams {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+var ecdsaStandardFields KeyFilter
+
+func init() {
+ ecdsaStandardFields = NewFieldNameFilter(KeyTypeKey, KeyUsageKey, KeyOpsKey, AlgorithmKey, KeyIDKey, X509URLKey, X509CertChainKey, X509CertThumbprintKey, X509CertThumbprintS256Key, ECDSACrvKey, ECDSAXKey, ECDSAYKey, ECDSADKey)
+}
+
+// ECDSAStandardFieldsFilter returns a KeyFilter that filters out standard ECDSA fields.
+func ECDSAStandardFieldsFilter() KeyFilter {
+ return ecdsaStandardFields
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/errors.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/errors.go
new file mode 100644
index 0000000000..af7e00d952
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/errors.go
@@ -0,0 +1,79 @@
+package jwk
+
+import (
+ "errors"
+ "fmt"
+)
+
+var cpe = &continueError{}
+
+// ContinueError returns an opaque error that can be returned
+// when a `KeyParser`, `KeyImporter`, or `KeyExporter` cannot handle the given payload,
+// but would like the process to continue with the next handler.
+func ContinueError() error {
+ return cpe
+}
+
+type continueError struct{}
+
+func (e *continueError) Error() string {
+ return "continue parsing"
+}
+
+type importError struct {
+ error
+}
+
+func (e importError) Unwrap() error {
+ return e.error
+}
+
+func (importError) Is(err error) bool {
+ _, ok := err.(importError)
+ return ok
+}
+
+func importerr(f string, args ...any) error {
+ return importError{fmt.Errorf(`jwk.Import: `+f, args...)}
+}
+
+var errDefaultImportError = importError{errors.New(`import error`)}
+
+func ImportError() error {
+ return errDefaultImportError
+}
+
+type parseError struct {
+ error
+}
+
+func (e parseError) Unwrap() error {
+ return e.error
+}
+
+func (parseError) Is(err error) bool {
+ _, ok := err.(parseError)
+ return ok
+}
+
+func bparseerr(prefix string, f string, args ...any) error {
+ return parseError{fmt.Errorf(prefix+`: `+f, args...)}
+}
+
+func parseerr(f string, args ...any) error {
+ return bparseerr(`jwk.Parse`, f, args...)
+}
+
+func rparseerr(f string, args ...any) error {
+ return bparseerr(`jwk.ParseReader`, f, args...)
+}
+
+func sparseerr(f string, args ...any) error {
+ return bparseerr(`jwk.ParseString`, f, args...)
+}
+
+var errDefaultParseError = parseError{errors.New(`parse error`)}
+
+func ParseError() error {
+ return errDefaultParseError
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/es256k.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/es256k.go
new file mode 100644
index 0000000000..48114bbaee
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/es256k.go
@@ -0,0 +1,14 @@
+//go:build jwx_es256k
+// +build jwx_es256k
+
+package jwk
+
+import (
+ "github.com/decred/dcrd/dcrec/secp256k1/v4"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ ourecdsa "github.com/lestrrat-go/jwx/v3/jwk/ecdsa"
+)
+
+func init() {
+ ourecdsa.RegisterCurve(jwa.Secp256k1(), secp256k1.S256())
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/fetch.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/fetch.go
new file mode 100644
index 0000000000..910a2101d4
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/fetch.go
@@ -0,0 +1,117 @@
+package jwk
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+)
+
+// Fetcher is an interface that represents an object that fetches a JWKS.
+// Currently this is only used in the `jws.WithVerifyAuto` option.
+//
+// Particularly, do not confuse this as the backend to `jwk.Fetch()` function.
+// If you need to control how `jwk.Fetch()` implements HTTP requests look into
+// providing a custom `http.Client` object via `jwk.WithHTTPClient` option
+type Fetcher interface {
+ Fetch(context.Context, string, ...FetchOption) (Set, error)
+}
+
+// FetchFunc describes a type of Fetcher that is represented as a function.
+//
+// You can use this to wrap functions (e.g. `jwk.Fetch“) as a Fetcher object.
+type FetchFunc func(context.Context, string, ...FetchOption) (Set, error)
+
+func (ff FetchFunc) Fetch(ctx context.Context, u string, options ...FetchOption) (Set, error) {
+ return ff(ctx, u, options...)
+}
+
+// CachedFetcher wraps `jwk.Cache` so that it can be used as a `jwk.Fetcher`.
+//
+// One notable diffence from a general use fetcher is that `jwk.CachedFetcher`
+// can only be used with JWKS URLs that have been registered with the cache.
+// Please read the documentation fo `(jwk.CachedFetcher).Fetch` for more details.
+//
+// This object is intended to be used with `jws.WithVerifyAuto` option, specifically
+// for a scenario where there is a very small number of JWKS URLs that are trusted
+// and used to verify JWS messages. It is NOT meant to be used as a general purpose
+// caching fetcher object.
+type CachedFetcher struct {
+ cache *Cache
+}
+
+// Creates a new `jwk.CachedFetcher` object.
+func NewCachedFetcher(cache *Cache) *CachedFetcher {
+ return &CachedFetcher{cache}
+}
+
+// Fetch fetches a JWKS from the cache. If the JWKS URL has not been registered with
+// the cache, an error is returned.
+func (f *CachedFetcher) Fetch(ctx context.Context, u string, _ ...FetchOption) (Set, error) {
+ if !f.cache.IsRegistered(ctx, u) {
+ return nil, fmt.Errorf(`jwk.CachedFetcher: url %q has not been registered`, u)
+ }
+ return f.cache.Lookup(ctx, u)
+}
+
+// Fetch fetches a JWK resource specified by a URL. The url must be
+// pointing to a resource that is supported by `net/http`.
+//
+// This function is just a wrapper around `net/http` and `jwk.Parse`.
+// There is nothing special here, so you are safe to use your own
+// mechanism to fetch the JWKS.
+//
+// If you are using the same `jwk.Set` for long periods of time during
+// the lifecycle of your program, and would like to periodically refresh the
+// contents of the object with the data at the remote resource,
+// consider using `jwk.Cache`, which automatically refreshes
+// jwk.Set objects asynchronously.
+func Fetch(ctx context.Context, u string, options ...FetchOption) (Set, error) {
+ var parseOptions []ParseOption
+ //nolint:revive // I want to keep the type of `wl` as `Whitelist` instead of `InsecureWhitelist`
+ var wl Whitelist = InsecureWhitelist{}
+ var client HTTPClient = http.DefaultClient
+ for _, option := range options {
+ if parseOpt, ok := option.(ParseOption); ok {
+ parseOptions = append(parseOptions, parseOpt)
+ continue
+ }
+
+ switch option.Ident() {
+ case identHTTPClient{}:
+ if err := option.Value(&client); err != nil {
+ return nil, fmt.Errorf(`failed to retrieve HTTPClient option value: %w`, err)
+ }
+ case identFetchWhitelist{}:
+ if err := option.Value(&wl); err != nil {
+ return nil, fmt.Errorf(`failed to retrieve fetch whitelist option value: %w`, err)
+ }
+ }
+ }
+
+ if !wl.IsAllowed(u) {
+ return nil, fmt.Errorf(`jwk.Fetch: url %q has been rejected by whitelist`, u)
+ }
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
+ if err != nil {
+ return nil, fmt.Errorf(`jwk.Fetch: failed to create new request: %w`, err)
+ }
+
+ res, err := client.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf(`jwk.Fetch: request failed: %w`, err)
+ }
+ defer res.Body.Close()
+
+ if res.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf(`jwk.Fetch: request returned status %d, expected 200`, res.StatusCode)
+ }
+
+ buf, err := io.ReadAll(res.Body)
+ if err != nil {
+ return nil, fmt.Errorf(`jwk.Fetch: failed to read response body for %q: %w`, u, err)
+ }
+
+ return Parse(buf, parseOptions...)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/filter.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/filter.go
new file mode 100644
index 0000000000..e73b0757da
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/filter.go
@@ -0,0 +1,28 @@
+package jwk
+
+import (
+ "github.com/lestrrat-go/jwx/v3/transform"
+)
+
+// KeyFilter is an interface that allows users to filter JWK key fields.
+// It provides two methods: Filter and Reject; Filter returns a new key with only
+// the fields that match the filter criteria, while Reject returns a new key with
+// only the fields that DO NOT match the filter.
+//
+// EXPERIMENTAL: This API is experimental and its interface and behavior is
+// subject to change in future releases. This API is not subject to semver
+// compatibility guarantees.
+type KeyFilter interface {
+ Filter(key Key) (Key, error)
+ Reject(key Key) (Key, error)
+}
+
+// NewFieldNameFilter creates a new FieldNameFilter with the specified field names.
+//
+// Note that because some JWK fields are associated with the type instead of
+// stored as data, this filter will not be able to remove them. An example would
+// be the `kty` field: it's associated with the underlying JWK key type, and will
+// always be present even if you attempt to remove it.
+func NewFieldNameFilter(names ...string) KeyFilter {
+ return transform.NewNameBasedFilter[Key](names...)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/interface.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/interface.go
new file mode 100644
index 0000000000..c157c2362c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/interface.go
@@ -0,0 +1,143 @@
+package jwk
+
+import (
+ "sync"
+
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+)
+
+// AsymmetricKey describes a Key that represents a key in an asymmetric key pair,
+// which in turn can be either a private or a public key. This interface
+// allows those keys to be queried if they are one or the other.
+type AsymmetricKey interface {
+ IsPrivate() bool
+}
+
+// KeyUsageType is used to denote what this key should be used for
+type KeyUsageType string
+
+const (
+ // ForSignature is the value used in the headers to indicate that
+ // this key should be used for signatures
+ ForSignature KeyUsageType = "sig"
+ // ForEncryption is the value used in the headers to indicate that
+ // this key should be used for encrypting
+ ForEncryption KeyUsageType = "enc"
+)
+
+type KeyOperation string
+type KeyOperationList []KeyOperation
+
+const (
+ KeyOpSign KeyOperation = "sign" // (compute digital signature or MAC)
+ KeyOpVerify KeyOperation = "verify" // (verify digital signature or MAC)
+ KeyOpEncrypt KeyOperation = "encrypt" // (encrypt content)
+ KeyOpDecrypt KeyOperation = "decrypt" // (decrypt content and validate decryption, if applicable)
+ KeyOpWrapKey KeyOperation = "wrapKey" // (encrypt key)
+ KeyOpUnwrapKey KeyOperation = "unwrapKey" // (decrypt key and validate decryption, if applicable)
+ KeyOpDeriveKey KeyOperation = "deriveKey" // (derive key)
+ KeyOpDeriveBits KeyOperation = "deriveBits" // (derive bits not to be used as a key)
+)
+
+// Set represents JWKS object, a collection of jwk.Key objects.
+//
+// Sets can be safely converted to and from JSON using the standard
+// `"encoding/json".Marshal` and `"encoding/json".Unmarshal`. However,
+// if you do not know if the payload contains a single JWK or a JWK set,
+// consider using `jwk.Parse()` to always get a `jwk.Set` out of it.
+//
+// Since v1.2.12, JWK sets with private parameters can be parsed as well.
+// Such private parameters can be accessed via the `Field()` method.
+// If a resource contains a single JWK instead of a JWK set, private parameters
+// are stored in _both_ the resulting `jwk.Set` object and the `jwk.Key` object .
+//
+//nolint:interfacebloat
+type Set interface {
+ // AddKey adds the specified key. If the key already exists in the set,
+ // an error is returned.
+ AddKey(Key) error
+
+ // Clear resets the list of keys associated with this set, emptying the
+ // internal list of `jwk.Key`s, as well as clearing any other non-key
+ // fields
+ Clear() error
+
+ // Get returns the key at index `idx`. If the index is out of range,
+ // then the second return value is false.
+ Key(int) (Key, bool)
+
+ // Get returns the value of a private field in the key set.
+ //
+ // For the purposes of a key set, any field other than the "keys" field is
+ // considered to be a private field. In other words, you cannot use this
+ // method to directly access the list of keys in the set
+ Get(string, any) error
+
+ // Set sets the value of a single field.
+ //
+ // This method, which takes an `any`, exists because
+ // these objects can contain extra _arbitrary_ fields that users can
+ // specify, and there is no way of knowing what type they could be.
+ Set(string, any) error
+
+ // Remove removes the specified non-key field from the set.
+ // Keys may not be removed using this method. See RemoveKey for
+ // removing keys.
+ Remove(string) error
+
+ // Index returns the index where the given key exists, -1 otherwise
+ Index(Key) int
+
+ // Len returns the number of keys in the set
+ Len() int
+
+ // LookupKeyID returns the first key matching the given key id.
+ // The second return value is false if there are no keys matching the key id.
+ // The set *may* contain multiple keys with the same key id. If you
+ // need all of them, use `Iterate()`
+ LookupKeyID(string) (Key, bool)
+
+ // RemoveKey removes the key from the set.
+ // RemoveKey returns an error when the specified key does not exist
+ // in set.
+ RemoveKey(Key) error
+
+ // Keys returns the list of keys present in the Set, except for `keys`.
+ // e.g. if you had `{"keys": ["a", "b"], "c": .., "d": ...}`, this method would
+ // return `["c", "d"]`. Note that the order of the keys is not guaranteed.
+ //
+ // TODO: name is confusing between this and Key()
+ Keys() []string
+
+ // Clone create a new set with identical keys. Keys themselves are not cloned.
+ Clone() (Set, error)
+}
+
+type set struct {
+ keys []Key
+ mu sync.RWMutex
+ dc DecodeCtx
+ privateParams map[string]any
+}
+
+type PublicKeyer interface {
+ // PublicKey creates the corresponding PublicKey type for this object.
+ // All fields are copied onto the new public key, except for those that are not allowed.
+ // Returned value must not be the receiver itself.
+ PublicKey() (Key, error)
+}
+
+type DecodeCtx interface {
+ json.DecodeCtx
+ IgnoreParseError() bool
+}
+type KeyWithDecodeCtx interface {
+ SetDecodeCtx(DecodeCtx)
+ DecodeCtx() DecodeCtx
+}
+
+// Used internally: It's used to lock a key
+type rlocker interface {
+ rlock()
+ runlock()
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/interface_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/interface_gen.go
new file mode 100644
index 0000000000..4f23d96cb0
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/interface_gen.go
@@ -0,0 +1,109 @@
+// Code generated by tools/cmd/genjwk/main.go. DO NOT EDIT.
+
+package jwk
+
+import (
+ "crypto"
+
+ "github.com/lestrrat-go/jwx/v3/cert"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+)
+
+const (
+ KeyTypeKey = "kty"
+ KeyUsageKey = "use"
+ KeyOpsKey = "key_ops"
+ AlgorithmKey = "alg"
+ KeyIDKey = "kid"
+ X509URLKey = "x5u"
+ X509CertChainKey = "x5c"
+ X509CertThumbprintKey = "x5t"
+ X509CertThumbprintS256Key = "x5t#S256"
+)
+
+// Key defines the minimal interface for each of the
+// key types. Their use and implementation differ significantly
+// between each key type, so you should use type assertions
+// to perform more specific tasks with each key
+type Key interface {
+
+ // Has returns true if the specified field has a value, even if
+ // the value is empty-ish (e.g. 0, false, "") as long as it has been
+ // explicitly set.
+ Has(string) bool
+
+ // Get is used to extract the value of any field, including non-standard fields, out of the key.
+ //
+ // The first argument is the name of the field. The second argument is a pointer
+ // to a variable that will receive the value of the field. The method returns
+ // an error if the field does not exist, or if the value cannot be assigned to
+ // the destination variable. Note that a field is considered to "exist" even if
+ // the value is empty-ish (e.g. 0, false, ""), as long as it is explicitly set.
+ Get(string, any) error
+
+ // Set sets the value of a single field. Note that certain fields,
+ // notably "kty", cannot be altered, but will not return an error
+ //
+ // This method, which takes an `any`, exists because
+ // these objects can contain extra _arbitrary_ fields that users can
+ // specify, and there is no way of knowing what type they could be
+ Set(string, any) error
+
+ // Remove removes the field associated with the specified key.
+ // There is no way to remove the `kty` (key type). You will ALWAYS be left with one field in a jwk.Key.
+ Remove(string) error
+ // Validate performs _minimal_ checks if the data stored in the key are valid.
+ // By minimal, we mean that it does not check if the key is valid for use in
+ // cryptographic operations. For example, it does not check if an RSA key's
+ // `e` field is a valid exponent, or if the `n` field is a valid modulus.
+ // Instead, it checks for things such as the _presence_ of some required fields,
+ // or if certain keys' values are of particular length.
+ //
+ // Note that depending on th underlying key type, use of this method requires
+ // that multiple fields in the key are properly populated. For example, an EC
+ // key's "x", "y" fields cannot be validated unless the "crv" field is populated first.
+ //
+ // Validate is never called by `UnmarshalJSON()` or `Set`. It must explicitly be
+ // called by the user
+ Validate() error
+
+ // Thumbprint returns the JWK thumbprint using the indicated
+ // hashing algorithm, according to RFC 7638
+ Thumbprint(crypto.Hash) ([]byte, error)
+
+ // Keys returns a list of the keys contained in this jwk.Key.
+ Keys() []string
+
+ // Clone creates a new instance of the same type
+ Clone() (Key, error)
+
+ // PublicKey creates the corresponding PublicKey type for this object.
+ // All fields are copied onto the new public key, except for those that are not allowed.
+ //
+ // If the key is already a public key, it returns a new copy minus the disallowed fields as above.
+ PublicKey() (Key, error)
+
+ // KeyType returns the `kty` of a JWK
+ KeyType() jwa.KeyType
+ // KeyUsage returns `use` of a JWK
+ KeyUsage() (string, bool)
+ // KeyOps returns `key_ops` of a JWK
+ KeyOps() (KeyOperationList, bool)
+ // Algorithm returns `alg` of a JWK
+
+ // Algorithm returns the value of the `alg` field.
+ //
+ // This field may contain either `jwk.SignatureAlgorithm`, `jwk.KeyEncryptionAlgorithm`, or `jwk.ContentEncryptionAlgorithm`.
+ // This is why there exists a `jwa.KeyAlgorithm` type that encompasses both types.
+ Algorithm() (jwa.KeyAlgorithm, bool)
+ // KeyID returns `kid` of a JWK
+ KeyID() (string, bool)
+ // X509URL returns `x5u` of a JWK
+ X509URL() (string, bool)
+ // X509CertChain returns `x5c` of a JWK
+ X509CertChain() (*cert.Chain, bool)
+ // X509CertThumbprint returns `x5t` of a JWK
+ X509CertThumbprint() (string, bool)
+ // X509CertThumbprintS256 returns `x5t#S256` of a JWK
+ X509CertThumbprintS256() (string, bool)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/io.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/io.go
new file mode 100644
index 0000000000..29b30274cc
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/io.go
@@ -0,0 +1,42 @@
+// Code generated by tools/cmd/genreadfile/main.go. DO NOT EDIT.
+
+package jwk
+
+import (
+ "fmt"
+ "io/fs"
+ "os"
+)
+
+type sysFS struct{}
+
+func (sysFS) Open(path string) (fs.File, error) {
+ return os.Open(path)
+}
+
+func ReadFile(path string, options ...ReadFileOption) (Set, error) {
+ var parseOptions []ParseOption
+ for _, option := range options {
+ if po, ok := option.(ParseOption); ok {
+ parseOptions = append(parseOptions, po)
+ }
+ }
+
+ var srcFS fs.FS = sysFS{}
+ for _, option := range options {
+ switch option.Ident() {
+ case identFS{}:
+ if err := option.Value(&srcFS); err != nil {
+ return nil, fmt.Errorf("failed to set fs.FS: %w", err)
+ }
+ }
+ }
+
+ f, err := srcFS.Open(path)
+ if err != nil {
+ return nil, err
+ }
+
+ defer f.Close()
+ return ParseReader(f, parseOptions...)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwk.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwk.go
new file mode 100644
index 0000000000..785feaf94c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwk.go
@@ -0,0 +1,709 @@
+//go:generate ../tools/cmd/genjwk.sh
+
+package jwk
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+ "reflect"
+
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+)
+
+var registry = json.NewRegistry()
+
+func bigIntToBytes(n *big.Int) ([]byte, error) {
+ if n == nil {
+ return nil, fmt.Errorf(`invalid *big.Int value`)
+ }
+ return n.Bytes(), nil
+}
+
+func init() {
+ if err := RegisterProbeField(reflect.StructField{
+ Name: "Kty",
+ Type: reflect.TypeOf(""),
+ Tag: `json:"kty"`,
+ }); err != nil {
+ panic(fmt.Errorf("failed to register mandatory probe for 'kty' field: %w", err))
+ }
+ if err := RegisterProbeField(reflect.StructField{
+ Name: "D",
+ Type: reflect.TypeOf(json.RawMessage(nil)),
+ Tag: `json:"d,omitempty"`,
+ }); err != nil {
+ panic(fmt.Errorf("failed to register mandatory probe for 'kty' field: %w", err))
+ }
+}
+
+// Import creates a jwk.Key from the given key (RSA/ECDSA/symmetric keys).
+//
+// The constructor auto-detects the type of key to be instantiated
+// based on the input type:
+//
+// - "crypto/rsa".PrivateKey and "crypto/rsa".PublicKey creates an RSA based key
+// - "crypto/ecdsa".PrivateKey and "crypto/ecdsa".PublicKey creates an EC based key
+// - "crypto/ed25519".PrivateKey and "crypto/ed25519".PublicKey creates an OKP based key
+// - "crypto/ecdh".PrivateKey and "crypto/ecdh".PublicKey creates an OKP based key
+// - []byte creates a symmetric key
+func Import(raw any) (Key, error) {
+ if raw == nil {
+ return nil, importerr(`a non-nil key is required`)
+ }
+
+ muKeyImporters.RLock()
+ conv, ok := keyImporters[reflect.TypeOf(raw)]
+ muKeyImporters.RUnlock()
+ if !ok {
+ return nil, importerr(`failed to convert %T to jwk.Key: no converters were able to convert`, raw)
+ }
+
+ return conv.Import(raw)
+}
+
+// PublicSetOf returns a new jwk.Set consisting of
+// public keys of the keys contained in the set.
+//
+// This is useful when you are generating a set of private keys, and
+// you want to generate the corresponding public versions for the
+// users to verify with.
+//
+// Be aware that all fields will be copied onto the new public key. It is the caller's
+// responsibility to remove any fields, if necessary.
+func PublicSetOf(v Set) (Set, error) {
+ newSet := NewSet()
+
+ n := v.Len()
+ for i := range n {
+ k, ok := v.Key(i)
+ if !ok {
+ return nil, fmt.Errorf(`key not found`)
+ }
+ pubKey, err := PublicKeyOf(k)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to get public key of %T: %w`, k, err)
+ }
+ if err := newSet.AddKey(pubKey); err != nil {
+ return nil, fmt.Errorf(`failed to add key to public key set: %w`, err)
+ }
+ }
+
+ return newSet, nil
+}
+
+// PublicKeyOf returns the corresponding public version of the jwk.Key.
+// If `v` is a SymmetricKey, then the same value is returned.
+// If `v` is already a public key, the key itself is returned.
+//
+// If `v` is a private key type that has a `PublicKey()` method, be aware
+// that all fields will be copied onto the new public key. It is the caller's
+// responsibility to remove any fields, if necessary
+//
+// If `v` is a raw key, the key is first converted to a `jwk.Key`
+func PublicKeyOf(v any) (Key, error) {
+ // This should catch all jwk.Key instances
+ if pk, ok := v.(PublicKeyer); ok {
+ return pk.PublicKey()
+ }
+
+ jk, err := Import(v)
+ if err != nil {
+ return nil, fmt.Errorf(`jwk.PublicKeyOf: failed to convert key into JWK: %w`, err)
+ }
+
+ return jk.PublicKey()
+}
+
+// PublicRawKeyOf returns the corresponding public key of the given
+// value `v` (e.g. given *rsa.PrivateKey, *rsa.PublicKey is returned)
+// If `v` is already a public key, the key itself is returned.
+//
+// The returned value will always be a pointer to the public key,
+// except when a []byte (e.g. symmetric key, ed25519 key) is passed to `v`.
+// In this case, the same []byte value is returned.
+//
+// This function must go through converting the object once to a jwk.Key,
+// then back to a raw key, so it's not exactly efficient.
+func PublicRawKeyOf(v any) (any, error) {
+ pk, ok := v.(PublicKeyer)
+ if !ok {
+ k, err := Import(v)
+ if err != nil {
+ return nil, fmt.Errorf(`jwk.PublicRawKeyOf: failed to convert key to jwk.Key: %w`, err)
+ }
+
+ pk, ok = k.(PublicKeyer)
+ if !ok {
+ return nil, fmt.Errorf(`jwk.PublicRawKeyOf: failed to convert key to jwk.PublicKeyer: %w`, err)
+ }
+ }
+
+ pubk, err := pk.PublicKey()
+ if err != nil {
+ return nil, fmt.Errorf(`jwk.PublicRawKeyOf: failed to obtain public key from %T: %w`, v, err)
+ }
+
+ var raw any
+ if err := Export(pubk, &raw); err != nil {
+ return nil, fmt.Errorf(`jwk.PublicRawKeyOf: failed to obtain raw key from %T: %w`, pubk, err)
+ }
+ return raw, nil
+}
+
+// ParseRawKey is a combination of ParseKey and Raw. It parses a single JWK key,
+// and assigns the "raw" key to the given parameter. The key must either be
+// a pointer to an empty interface, or a pointer to the actual raw key type
+// such as *rsa.PrivateKey, *ecdsa.PublicKey, *[]byte, etc.
+func ParseRawKey(data []byte, rawkey any) error {
+ key, err := ParseKey(data)
+ if err != nil {
+ return fmt.Errorf(`failed to parse key: %w`, err)
+ }
+
+ if err := Export(key, rawkey); err != nil {
+ return fmt.Errorf(`failed to assign to raw key variable: %w`, err)
+ }
+
+ return nil
+}
+
+type setDecodeCtx struct {
+ json.DecodeCtx
+
+ ignoreParseError bool
+}
+
+func (ctx *setDecodeCtx) IgnoreParseError() bool {
+ return ctx.ignoreParseError
+}
+
+// ParseKey parses a single key JWK. Unlike `jwk.Parse` this method will
+// report failure if you attempt to pass a JWK set. Only use this function
+// when you know that the data is a single JWK.
+//
+// Given a WithPEM(true) option, this function assumes that the given input
+// is PEM encoded ASN.1 DER format key.
+//
+// Note that a successful parsing of any type of key does NOT necessarily
+// guarantee a valid key. For example, no checks against expiration dates
+// are performed for certificate expiration, no checks against missing
+// parameters are performed, etc.
+func ParseKey(data []byte, options ...ParseOption) (Key, error) {
+ var parsePEM bool
+ var localReg *json.Registry
+ var pemDecoder PEMDecoder
+ for _, option := range options {
+ switch option.Ident() {
+ case identPEM{}:
+ if err := option.Value(&parsePEM); err != nil {
+ return nil, fmt.Errorf(`failed to retrieve PEM option value: %w`, err)
+ }
+ case identPEMDecoder{}:
+ if err := option.Value(&pemDecoder); err != nil {
+ return nil, fmt.Errorf(`failed to retrieve PEMDecoder option value: %w`, err)
+ }
+ case identLocalRegistry{}:
+ if err := option.Value(&localReg); err != nil {
+ return nil, fmt.Errorf(`failed to retrieve local registry option value: %w`, err)
+ }
+ case identTypedField{}:
+ var pair typedFieldPair // temporary var needed for typed field
+ if err := option.Value(&pair); err != nil {
+ return nil, fmt.Errorf(`failed to retrieve typed field option value: %w`, err)
+ }
+ if localReg == nil {
+ localReg = json.NewRegistry()
+ }
+ localReg.Register(pair.Name, pair.Value)
+ case identIgnoreParseError{}:
+ return nil, fmt.Errorf(`jwk.WithIgnoreParseError() cannot be used for ParseKey()`)
+ }
+ }
+
+ if parsePEM {
+ var raw any
+
+ // PEMDecoder should probably be deprecated, because of being a misnomer.
+ if pemDecoder != nil {
+ if err := decodeX509WithPEMDEcoder(&raw, data, pemDecoder); err != nil {
+ return nil, fmt.Errorf(`failed to decode PEM encoded key: %w`, err)
+ }
+ } else {
+ // This version takes into account the various X509 decoders that are
+ // pre-registered.
+ if err := decodeX509(&raw, data); err != nil {
+ return nil, fmt.Errorf(`failed to decode X.509 encoded key: %w`, err)
+ }
+ }
+ return Import(raw)
+ }
+
+ probe, err := keyProbe.Probe(data)
+ if err != nil {
+ return nil, fmt.Errorf(`jwk.Parse: failed to probe data: %w`, err)
+ }
+
+ unmarshaler := keyUnmarshaler{localReg: localReg}
+
+ muKeyParser.RLock()
+ parsers := make([]KeyParser, len(keyParsers))
+ copy(parsers, keyParsers)
+ muKeyParser.RUnlock()
+
+ for i := len(parsers) - 1; i >= 0; i-- {
+ parser := parsers[i]
+ key, err := parser.ParseKey(probe, &unmarshaler, data)
+ if err == nil {
+ return key, nil
+ }
+
+ if errors.Is(err, ContinueError()) {
+ continue
+ }
+
+ return nil, err
+ }
+ return nil, fmt.Errorf(`jwk.Parse: no parser was able to parse the key`)
+}
+
+// Parse parses JWK from the incoming []byte.
+//
+// For JWK sets, this is a convenience function. You could just as well
+// call `json.Unmarshal` against an empty set created by `jwk.NewSet()`
+// to parse a JSON buffer into a `jwk.Set`.
+//
+// This function exists because many times the user does not know before hand
+// if a JWK(s) resource at a remote location contains a single JWK key or
+// a JWK set, and `jwk.Parse()` can handle either case, returning a JWK Set
+// even if the data only contains a single JWK key
+//
+// If you are looking for more information on how JWKs are parsed, or if
+// you know for sure that you have a single key, please see the documentation
+// for `jwk.ParseKey()`.
+func Parse(src []byte, options ...ParseOption) (Set, error) {
+ var parsePEM bool
+ var parseX509 bool
+ var localReg *json.Registry
+ var ignoreParseError bool
+ var pemDecoder PEMDecoder
+ for _, option := range options {
+ switch option.Ident() {
+ case identPEM{}:
+ if err := option.Value(&parsePEM); err != nil {
+ return nil, parseerr(`failed to retrieve PEM option value: %w`, err)
+ }
+ case identX509{}:
+ if err := option.Value(&parseX509); err != nil {
+ return nil, parseerr(`failed to retrieve X509 option value: %w`, err)
+ }
+ case identPEMDecoder{}:
+ if err := option.Value(&pemDecoder); err != nil {
+ return nil, parseerr(`failed to retrieve PEMDecoder option value: %w`, err)
+ }
+ case identIgnoreParseError{}:
+ if err := option.Value(&ignoreParseError); err != nil {
+ return nil, parseerr(`failed to retrieve IgnoreParseError option value: %w`, err)
+ }
+ case identTypedField{}:
+ var pair typedFieldPair // temporary var needed for typed field
+ if err := option.Value(&pair); err != nil {
+ return nil, parseerr(`failed to retrieve typed field option value: %w`, err)
+ }
+ if localReg == nil {
+ localReg = json.NewRegistry()
+ }
+ localReg.Register(pair.Name, pair.Value)
+ }
+ }
+
+ s := NewSet()
+
+ if parsePEM || parseX509 {
+ if pemDecoder == nil {
+ pemDecoder = NewPEMDecoder()
+ }
+ src = bytes.TrimSpace(src)
+ for len(src) > 0 {
+ raw, rest, err := pemDecoder.Decode(src)
+ if err != nil {
+ return nil, parseerr(`failed to parse PEM encoded key: %w`, err)
+ }
+ key, err := Import(raw)
+ if err != nil {
+ return nil, parseerr(`failed to create jwk.Key from %T: %w`, raw, err)
+ }
+ if err := s.AddKey(key); err != nil {
+ return nil, parseerr(`failed to add jwk.Key to set: %w`, err)
+ }
+ src = bytes.TrimSpace(rest)
+ }
+ return s, nil
+ }
+
+ if localReg != nil || ignoreParseError {
+ dcKs, ok := s.(KeyWithDecodeCtx)
+ if !ok {
+ return nil, parseerr(`typed field was requested, but the key set (%T) does not support DecodeCtx`, s)
+ }
+ dc := &setDecodeCtx{
+ DecodeCtx: json.NewDecodeCtx(localReg),
+ ignoreParseError: ignoreParseError,
+ }
+ dcKs.SetDecodeCtx(dc)
+ defer func() { dcKs.SetDecodeCtx(nil) }()
+ }
+
+ if err := json.Unmarshal(src, s); err != nil {
+ return nil, parseerr(`failed to unmarshal JWK set: %w`, err)
+ }
+
+ return s, nil
+}
+
+// ParseReader parses a JWK set from the incoming byte buffer.
+func ParseReader(src io.Reader, options ...ParseOption) (Set, error) {
+ // meh, there's no way to tell if a stream has "ended" a single
+ // JWKs except when we encounter an EOF, so just... ReadAll
+ buf, err := io.ReadAll(src)
+ if err != nil {
+ return nil, rparseerr(`failed to read from io.Reader: %w`, err)
+ }
+
+ set, err := Parse(buf, options...)
+ if err != nil {
+ return nil, rparseerr(`failed to parse reader: %w`, err)
+ }
+ return set, nil
+}
+
+// ParseString parses a JWK set from the incoming string.
+func ParseString(s string, options ...ParseOption) (Set, error) {
+ set, err := Parse([]byte(s), options...)
+ if err != nil {
+ return nil, sparseerr(`failed to parse string: %w`, err)
+ }
+ return set, nil
+}
+
+// AssignKeyID is a convenience function to automatically assign the "kid"
+// section of the key, if it already doesn't have one. It uses Key.Thumbprint
+// method with crypto.SHA256 as the default hashing algorithm
+func AssignKeyID(key Key, options ...AssignKeyIDOption) error {
+ if key.Has(KeyIDKey) {
+ return nil
+ }
+
+ hash := crypto.SHA256
+ for _, option := range options {
+ switch option.Ident() {
+ case identThumbprintHash{}:
+ if err := option.Value(&hash); err != nil {
+ return fmt.Errorf(`failed to retrieve thumbprint hash option value: %w`, err)
+ }
+ }
+ }
+
+ h, err := key.Thumbprint(hash)
+ if err != nil {
+ return fmt.Errorf(`failed to generate thumbprint: %w`, err)
+ }
+
+ if err := key.Set(KeyIDKey, base64.EncodeToString(h)); err != nil {
+ return fmt.Errorf(`failed to set "kid": %w`, err)
+ }
+
+ return nil
+}
+
+// NOTE: may need to remove this to allow pluggale key types
+func cloneKey(src Key) (Key, error) {
+ var dst Key
+ switch src.(type) {
+ case RSAPrivateKey:
+ dst = newRSAPrivateKey()
+ case RSAPublicKey:
+ dst = newRSAPublicKey()
+ case ECDSAPrivateKey:
+ dst = newECDSAPrivateKey()
+ case ECDSAPublicKey:
+ dst = newECDSAPublicKey()
+ case OKPPrivateKey:
+ dst = newOKPPrivateKey()
+ case OKPPublicKey:
+ dst = newOKPPublicKey()
+ case SymmetricKey:
+ dst = newSymmetricKey()
+ default:
+ return nil, fmt.Errorf(`jwk.cloneKey: unknown key type %T`, src)
+ }
+
+ for _, k := range src.Keys() {
+ // It's absolutely
+ var v any
+ if err := src.Get(k, &v); err != nil {
+ return nil, fmt.Errorf(`jwk.cloneKey: failed to get %q: %w`, k, err)
+ }
+ if err := dst.Set(k, v); err != nil {
+ return nil, fmt.Errorf(`jwk.cloneKey: failed to set %q: %w`, k, err)
+ }
+ }
+ return dst, nil
+}
+
+// Pem serializes the given jwk.Key in PEM encoded ASN.1 DER format,
+// using either PKCS8 for private keys and PKIX for public keys.
+// If you need to encode using PKCS1 or SEC1, you must do it yourself.
+//
+// # Argument must be of type jwk.Key or jwk.Set
+//
+// Currently only EC (including Ed25519) and RSA keys (and jwk.Set
+// comprised of these key types) are supported.
+func Pem(v any) ([]byte, error) {
+ var set Set
+ switch v := v.(type) {
+ case Key:
+ set = NewSet()
+ if err := set.AddKey(v); err != nil {
+ return nil, fmt.Errorf(`failed to add key to set: %w`, err)
+ }
+ case Set:
+ set = v
+ default:
+ return nil, fmt.Errorf(`argument to Pem must be either jwk.Key or jwk.Set: %T`, v)
+ }
+
+ var ret []byte
+ for i := range set.Len() {
+ key, _ := set.Key(i)
+ typ, buf, err := asnEncode(key)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to encode content for key #%d: %w`, i, err)
+ }
+
+ var block pem.Block
+ block.Type = typ
+ block.Bytes = buf
+ ret = append(ret, pem.EncodeToMemory(&block)...)
+ }
+ return ret, nil
+}
+
+func asnEncode(key Key) (string, []byte, error) {
+ switch key := key.(type) {
+ case ECDSAPrivateKey:
+ var rawkey ecdsa.PrivateKey
+ if err := Export(key, &rawkey); err != nil {
+ return "", nil, fmt.Errorf(`failed to get raw key from jwk.Key: %w`, err)
+ }
+ buf, err := x509.MarshalECPrivateKey(&rawkey)
+ if err != nil {
+ return "", nil, fmt.Errorf(`failed to marshal PKCS8: %w`, err)
+ }
+ return pmECPrivateKey, buf, nil
+ case RSAPrivateKey, OKPPrivateKey:
+ var rawkey any
+ if err := Export(key, &rawkey); err != nil {
+ return "", nil, fmt.Errorf(`failed to get raw key from jwk.Key: %w`, err)
+ }
+ buf, err := x509.MarshalPKCS8PrivateKey(rawkey)
+ if err != nil {
+ return "", nil, fmt.Errorf(`failed to marshal PKCS8: %w`, err)
+ }
+ return pmPrivateKey, buf, nil
+ case RSAPublicKey, ECDSAPublicKey, OKPPublicKey:
+ var rawkey any
+ if err := Export(key, &rawkey); err != nil {
+ return "", nil, fmt.Errorf(`failed to get raw key from jwk.Key: %w`, err)
+ }
+ buf, err := x509.MarshalPKIXPublicKey(rawkey)
+ if err != nil {
+ return "", nil, fmt.Errorf(`failed to marshal PKIX: %w`, err)
+ }
+ return pmPublicKey, buf, nil
+ default:
+ return "", nil, fmt.Errorf(`unsupported key type %T`, key)
+ }
+}
+
+type CustomDecoder = json.CustomDecoder
+type CustomDecodeFunc = json.CustomDecodeFunc
+
+// RegisterCustomField allows users to specify that a private field
+// be decoded as an instance of the specified type. This option has
+// a global effect.
+//
+// For example, suppose you have a custom field `x-birthday`, which
+// you want to represent as a string formatted in RFC3339 in JSON,
+// but want it back as `time.Time`.
+//
+// In such case you would register a custom field as follows
+//
+// jwk.RegisterCustomField(`x-birthday`, time.Time{})
+//
+// Then you can use a `time.Time` variable to extract the value
+// of `x-birthday` field, instead of having to use `any`
+// and later convert it to `time.Time`
+//
+// var bday time.Time
+// _ = key.Get(`x-birthday`, &bday)
+//
+// If you need a more fine-tuned control over the decoding process,
+// you can register a `CustomDecoder`. For example, below shows
+// how to register a decoder that can parse RFC1123 format string:
+//
+// jwk.RegisterCustomField(`x-birthday`, jwk.CustomDecodeFunc(func(data []byte) (any, error) {
+// return time.Parse(time.RFC1123, string(data))
+// }))
+//
+// Please note that use of custom fields can be problematic if you
+// are using a library that does not implement MarshalJSON/UnmarshalJSON
+// and you try to roundtrip from an object to JSON, and then back to an object.
+// For example, in the above example, you can _parse_ time values formatted
+// in the format specified in RFC822, but when you convert an object into
+// JSON, it will be formatted in RFC3339, because that's what `time.Time`
+// likes to do. To avoid this, it's always better to use a custom type
+// that wraps your desired type (in this case `time.Time`) and implement
+// MarshalJSON and UnmashalJSON.
+func RegisterCustomField(name string, object any) {
+ registry.Register(name, object)
+}
+
+// Equal compares two keys and returns true if they are equal. The comparison
+// is solely done against the thumbprints of k1 and k2. It is possible for keys
+// that have, for example, different key IDs, key usage, etc, to be considered equal.
+func Equal(k1, k2 Key) bool {
+ h := crypto.SHA256
+ tp1, err := k1.Thumbprint(h)
+ if err != nil {
+ return false // can't report error
+ }
+ tp2, err := k2.Thumbprint(h)
+ if err != nil {
+ return false // can't report error
+ }
+
+ return bytes.Equal(tp1, tp2)
+}
+
+// IsPrivateKey returns true if the supplied key is a private key of an
+// asymmetric key pair. The argument `k` must implement the `AsymmetricKey`
+// interface.
+//
+// An error is returned if the supplied key is not an `AsymmetricKey`.
+func IsPrivateKey(k Key) (bool, error) {
+ asymmetric, ok := k.(AsymmetricKey)
+ if ok {
+ return asymmetric.IsPrivate(), nil
+ }
+ return false, fmt.Errorf("jwk.IsPrivateKey: %T is not an asymmetric key", k)
+}
+
+type keyValidationError struct {
+ err error
+}
+
+func (e *keyValidationError) Error() string {
+ return fmt.Sprintf(`key validation failed: %s`, e.err)
+}
+
+func (e *keyValidationError) Unwrap() error {
+ return e.err
+}
+
+func (e *keyValidationError) Is(target error) bool {
+ _, ok := target.(*keyValidationError)
+ return ok
+}
+
+// NewKeyValidationError wraps the given error with an error that denotes
+// `key.Validate()` has failed. This error type should ONLY be used as
+// return value from the `Validate()` method.
+func NewKeyValidationError(err error) error {
+ return &keyValidationError{err: err}
+}
+
+func IsKeyValidationError(err error) bool {
+ var kve keyValidationError
+ return errors.Is(err, &kve)
+}
+
+// Configure is used to configure global behavior of the jwk package.
+func Configure(options ...GlobalOption) {
+ var strictKeyUsagePtr *bool
+ for _, option := range options {
+ switch option.Ident() {
+ case identStrictKeyUsage{}:
+ var v bool
+ if err := option.Value(&v); err != nil {
+ continue
+ }
+ strictKeyUsagePtr = &v
+ }
+ }
+
+ if strictKeyUsagePtr != nil {
+ strictKeyUsage.Store(*strictKeyUsagePtr)
+ }
+}
+
+// These are used when validating keys.
+type keyWithD interface {
+ D() ([]byte, bool)
+}
+
+var _ keyWithD = &okpPrivateKey{}
+
+func extractEmbeddedKey(keyif Key, concretTypes []reflect.Type) (Key, error) {
+ rv := reflect.ValueOf(keyif)
+
+ // If the value can be converted to one of the concrete types, then we're done
+ for _, t := range concretTypes {
+ if rv.Type().ConvertibleTo(t) {
+ return keyif, nil
+ }
+ }
+
+ // When a struct implements the Key interface via embedding, you unfortunately
+ // cannot use a type switch to determine the concrete type, because
+ if rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ return nil, fmt.Errorf(`invalid key value (0): %w`, ContinueError())
+ }
+ rv = rv.Elem()
+ }
+
+ if rv.Kind() != reflect.Struct {
+ return nil, fmt.Errorf(`invalid key value type %T (1): %w`, keyif, ContinueError())
+ }
+ if rv.NumField() == 0 {
+ return nil, fmt.Errorf(`invalid key value type %T (2): %w`, keyif, ContinueError())
+ }
+ // Iterate through the fields of the struct to find the first field that
+ // implements the Key interface
+ rt := rv.Type()
+ for i := range rv.NumField() {
+ field := rv.Field(i)
+ ft := rt.Field(i)
+ if !ft.Anonymous {
+ // We can only salvage this object if the object implements jwk.Key
+ // via embedding, so we skip fields that are not anonymous
+ continue
+ }
+
+ if field.CanInterface() {
+ if k, ok := field.Interface().(Key); ok {
+ return extractEmbeddedKey(k, concretTypes)
+ }
+ }
+ }
+
+ return nil, fmt.Errorf(`invalid key value type %T (3): %w`, keyif, ContinueError())
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwkbb/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwkbb/BUILD.bazel
new file mode 100644
index 0000000000..68a4ccdc19
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwkbb/BUILD.bazel
@@ -0,0 +1,17 @@
+load("@rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "jwkbb",
+ srcs = ["x509.go"],
+ importpath = "github.com/lestrrat-go/jwx/v3/jwk/jwkbb",
+ visibility = ["//visibility:public"],
+ deps = [
+ "@com_github_lestrrat_go_blackmagic//:blackmagic",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":jwkbb",
+ visibility = ["//visibility:public"],
+)
\ No newline at end of file
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwkbb/x509.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwkbb/x509.go
new file mode 100644
index 0000000000..3c827cfa6f
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwkbb/x509.go
@@ -0,0 +1,111 @@
+package jwkbb
+
+import (
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+
+ "github.com/lestrrat-go/blackmagic"
+)
+
+const (
+ PrivateKeyBlockType = `PRIVATE KEY`
+ PublicKeyBlockType = `PUBLIC KEY`
+ ECPrivateKeyBlockType = `EC PRIVATE KEY`
+ RSAPublicKeyBlockType = `RSA PUBLIC KEY`
+ RSAPrivateKeyBlockType = `RSA PRIVATE KEY`
+ CertificateBlockType = `CERTIFICATE`
+)
+
+// EncodeX509 encodes the given value into ASN.1 DER format, and returns
+// the encoded bytes. The value must be one of the following types:
+// *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey,
+// *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey.
+//
+// Users can pass a pre-allocated byte slice (but make sure its length is
+// changed so that the encoded buffer is appended to the correct location)
+// as `dst` to avoid allocations.
+func EncodeX509(dst []byte, v any) ([]byte, error) {
+ var block pem.Block
+ // Try to convert it into a certificate
+ switch v := v.(type) {
+ case *rsa.PrivateKey:
+ block.Type = RSAPrivateKeyBlockType
+ block.Bytes = x509.MarshalPKCS1PrivateKey(v)
+ case *ecdsa.PrivateKey:
+ marshaled, err := x509.MarshalECPrivateKey(v)
+ if err != nil {
+ return nil, err
+ }
+ block.Type = ECPrivateKeyBlockType
+ block.Bytes = marshaled
+ case ed25519.PrivateKey:
+ marshaled, err := x509.MarshalPKCS8PrivateKey(v)
+ if err != nil {
+ return nil, err
+ }
+ block.Type = PrivateKeyBlockType
+ block.Bytes = marshaled
+ case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey:
+ marshaled, err := x509.MarshalPKIXPublicKey(v)
+ if err != nil {
+ return nil, err
+ }
+ block.Type = PublicKeyBlockType
+ block.Bytes = marshaled
+ default:
+ return nil, fmt.Errorf(`unsupported type %T for ASN.1 DER encoding`, v)
+ }
+
+ encoded := pem.EncodeToMemory(&block)
+ dst = append(dst, encoded...)
+ return dst, nil
+}
+
+func DecodeX509(dst any, block *pem.Block) error {
+ switch block.Type {
+ // Handle the semi-obvious cases
+ case RSAPrivateKeyBlockType:
+ key, err := x509.ParsePKCS1PrivateKey(block.Bytes)
+ if err != nil {
+ return fmt.Errorf(`failed to parse PKCS1 private key: %w`, err)
+ }
+ return blackmagic.AssignIfCompatible(dst, key)
+ case RSAPublicKeyBlockType:
+ key, err := x509.ParsePKCS1PublicKey(block.Bytes)
+ if err != nil {
+ return fmt.Errorf(`failed to parse PKCS1 public key: %w`, err)
+ }
+ return blackmagic.AssignIfCompatible(dst, key)
+ case ECPrivateKeyBlockType:
+ key, err := x509.ParseECPrivateKey(block.Bytes)
+ if err != nil {
+ return fmt.Errorf(`failed to parse EC private key: %w`, err)
+ }
+ return blackmagic.AssignIfCompatible(dst, key)
+ case PublicKeyBlockType:
+ // XXX *could* return dsa.PublicKey
+ key, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return fmt.Errorf(`failed to parse PKIX public key: %w`, err)
+ }
+ return blackmagic.AssignIfCompatible(dst, key)
+ case PrivateKeyBlockType:
+ key, err := x509.ParsePKCS8PrivateKey(block.Bytes)
+ if err != nil {
+ return fmt.Errorf(`failed to parse PKCS8 private key: %w`, err)
+ }
+ return blackmagic.AssignIfCompatible(dst, key)
+ case CertificateBlockType:
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return fmt.Errorf(`failed to parse certificate: %w`, err)
+ }
+ return blackmagic.AssignIfCompatible(dst, cert.PublicKey)
+ default:
+ return fmt.Errorf(`invalid PEM block type %s`, block.Type)
+ }
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/key_ops.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/key_ops.go
new file mode 100644
index 0000000000..b8c229b3af
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/key_ops.go
@@ -0,0 +1,58 @@
+package jwk
+
+import "fmt"
+
+func (ops *KeyOperationList) Get() KeyOperationList {
+ if ops == nil {
+ return nil
+ }
+ return *ops
+}
+
+func (ops *KeyOperationList) Accept(v any) error {
+ switch x := v.(type) {
+ case string:
+ return ops.Accept([]string{x})
+ case []any:
+ l := make([]string, len(x))
+ for i, e := range x {
+ if es, ok := e.(string); ok {
+ l[i] = es
+ } else {
+ return fmt.Errorf(`invalid list element type: expected string, got %T`, v)
+ }
+ }
+ return ops.Accept(l)
+ case []string:
+ list := make(KeyOperationList, len(x))
+ for i, e := range x {
+ switch e := KeyOperation(e); e {
+ case KeyOpSign, KeyOpVerify, KeyOpEncrypt, KeyOpDecrypt, KeyOpWrapKey, KeyOpUnwrapKey, KeyOpDeriveKey, KeyOpDeriveBits:
+ list[i] = e
+ default:
+ return fmt.Errorf(`invalid keyoperation %v`, e)
+ }
+ }
+
+ *ops = list
+ return nil
+ case []KeyOperation:
+ list := make(KeyOperationList, len(x))
+ for i, e := range x {
+ switch e {
+ case KeyOpSign, KeyOpVerify, KeyOpEncrypt, KeyOpDecrypt, KeyOpWrapKey, KeyOpUnwrapKey, KeyOpDeriveKey, KeyOpDeriveBits:
+ list[i] = e
+ default:
+ return fmt.Errorf(`invalid keyoperation %v`, e)
+ }
+ }
+
+ *ops = list
+ return nil
+ case KeyOperationList:
+ *ops = x
+ return nil
+ default:
+ return fmt.Errorf(`invalid value %T`, v)
+ }
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/okp.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/okp.go
new file mode 100644
index 0000000000..773734b660
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/okp.go
@@ -0,0 +1,321 @@
+package jwk
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdh"
+ "crypto/ed25519"
+ "fmt"
+ "reflect"
+
+ "github.com/lestrrat-go/blackmagic"
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+)
+
+func init() {
+ RegisterKeyExporter(jwa.OKP(), KeyExportFunc(okpJWKToRaw))
+}
+
+// Mental note:
+//
+// Curve25519 refers to a particular curve, and is represented in its Montgomery form.
+//
+// Ed25519 refers to the biratinally equivalent curve of Curve25519, except it's in Edwards form.
+// Ed25519 is the name of the curve and the also the signature scheme using that curve.
+// The full name of the scheme is Edwards Curve Digital Signature Algorithm, and thus it is
+// also referred to as EdDSA.
+//
+// X25519 refers to the Diffie-Hellman key exchange protocol that uses Cruve25519.
+// Because this is an elliptic curve based Diffie Hellman protocol, it is also referred to
+// as ECDH.
+//
+// OKP keys are used to represent private/public pairs of thse elliptic curve
+// keys. But note that the name just means Octet Key Pair.
+
+func (k *okpPublicKey) Import(rawKeyIf any) error {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+
+ var crv jwa.EllipticCurveAlgorithm
+ switch rawKey := rawKeyIf.(type) {
+ case ed25519.PublicKey:
+ k.x = rawKey
+ crv = jwa.Ed25519()
+ k.crv = &crv
+ case *ecdh.PublicKey:
+ k.x = rawKey.Bytes()
+ crv = jwa.X25519()
+ k.crv = &crv
+ default:
+ return fmt.Errorf(`unknown key type %T`, rawKeyIf)
+ }
+
+ return nil
+}
+
+func (k *okpPrivateKey) Import(rawKeyIf any) error {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+
+ var crv jwa.EllipticCurveAlgorithm
+ switch rawKey := rawKeyIf.(type) {
+ case ed25519.PrivateKey:
+ k.d = rawKey.Seed()
+ k.x = rawKey.Public().(ed25519.PublicKey) //nolint:forcetypeassert
+ crv = jwa.Ed25519()
+ k.crv = &crv
+ case *ecdh.PrivateKey:
+ // k.d = rawKey.Seed()
+ k.d = rawKey.Bytes()
+ k.x = rawKey.PublicKey().Bytes()
+ crv = jwa.X25519()
+ k.crv = &crv
+ default:
+ return fmt.Errorf(`unknown key type %T`, rawKeyIf)
+ }
+
+ return nil
+}
+
+func buildOKPPublicKey(alg jwa.EllipticCurveAlgorithm, xbuf []byte) (any, error) {
+ switch alg {
+ case jwa.Ed25519():
+ return ed25519.PublicKey(xbuf), nil
+ case jwa.X25519():
+ ret, err := ecdh.X25519().NewPublicKey(xbuf)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to parse x25519 public key %x (size %d): %w`, xbuf, len(xbuf), err)
+ }
+ return ret, nil
+ default:
+ return nil, fmt.Errorf(`invalid curve algorithm %s`, alg)
+ }
+}
+
+// Raw returns the EC-DSA public key represented by this JWK
+func (k *okpPublicKey) Raw(v any) error {
+ k.mu.RLock()
+ defer k.mu.RUnlock()
+
+ crv, ok := k.Crv()
+ if !ok {
+ return fmt.Errorf(`missing "crv" field`)
+ }
+
+ pubk, err := buildOKPPublicKey(crv, k.x)
+ if err != nil {
+ return fmt.Errorf(`jwk.OKPPublicKey: failed to build public key: %w`, err)
+ }
+
+ if err := blackmagic.AssignIfCompatible(v, pubk); err != nil {
+ return fmt.Errorf(`jwk.OKPPublicKey: failed to assign to destination variable: %w`, err)
+ }
+ return nil
+}
+
+func buildOKPPrivateKey(alg jwa.EllipticCurveAlgorithm, xbuf []byte, dbuf []byte) (any, error) {
+ if len(dbuf) == 0 {
+ return nil, fmt.Errorf(`cannot use empty seed`)
+ }
+ switch alg {
+ case jwa.Ed25519():
+ if len(dbuf) != ed25519.SeedSize {
+ return nil, fmt.Errorf(`ed25519: wrong private key size`)
+ }
+ ret := ed25519.NewKeyFromSeed(dbuf)
+ //nolint:forcetypeassert
+ if !bytes.Equal(xbuf, ret.Public().(ed25519.PublicKey)) {
+ return nil, fmt.Errorf(`ed25519: invalid x value given d value`)
+ }
+ return ret, nil
+ case jwa.X25519():
+ ret, err := ecdh.X25519().NewPrivateKey(dbuf)
+ if err != nil {
+ return nil, fmt.Errorf(`x25519: unable to construct x25519 private key from seed: %w`, err)
+ }
+ return ret, nil
+ default:
+ return nil, fmt.Errorf(`invalid curve algorithm %s`, alg)
+ }
+}
+
+var okpConvertibleKeys = []reflect.Type{
+ reflect.TypeOf((*OKPPrivateKey)(nil)).Elem(),
+ reflect.TypeOf((*OKPPublicKey)(nil)).Elem(),
+}
+
+// This is half baked. I think it will blow up if we used ecdh.* keys and/or x25519 keys
+func okpJWKToRaw(key Key, _ any /* this is unused because this is half baked */) (any, error) {
+ extracted, err := extractEmbeddedKey(key, okpConvertibleKeys)
+ if err != nil {
+ return nil, fmt.Errorf(`jwk.OKP: failed to extract embedded key: %w`, err)
+ }
+
+ switch key := extracted.(type) {
+ case OKPPrivateKey:
+ locker, ok := key.(rlocker)
+ if ok {
+ locker.rlock()
+ defer locker.runlock()
+ }
+
+ crv, ok := key.Crv()
+ if !ok {
+ return nil, fmt.Errorf(`missing "crv" field`)
+ }
+
+ x, ok := key.X()
+ if !ok {
+ return nil, fmt.Errorf(`missing "x" field`)
+ }
+
+ d, ok := key.D()
+ if !ok {
+ return nil, fmt.Errorf(`missing "d" field`)
+ }
+
+ privk, err := buildOKPPrivateKey(crv, x, d)
+ if err != nil {
+ return nil, fmt.Errorf(`jwk.OKPPrivateKey: failed to build private key: %w`, err)
+ }
+ return privk, nil
+ case OKPPublicKey:
+ locker, ok := key.(rlocker)
+ if ok {
+ locker.rlock()
+ defer locker.runlock()
+ }
+
+ crv, ok := key.Crv()
+ if !ok {
+ return nil, fmt.Errorf(`missing "crv" field`)
+ }
+
+ x, ok := key.X()
+ if !ok {
+ return nil, fmt.Errorf(`missing "x" field`)
+ }
+ pubk, err := buildOKPPublicKey(crv, x)
+ if err != nil {
+ return nil, fmt.Errorf(`jwk.OKPPublicKey: failed to build public key: %w`, err)
+ }
+ return pubk, nil
+ default:
+ return nil, ContinueError()
+ }
+}
+
+func makeOKPPublicKey(src Key) (Key, error) {
+ newKey := newOKPPublicKey()
+
+ // Iterate and copy everything except for the bits that should not be in the public key
+ for _, k := range src.Keys() {
+ switch k {
+ case OKPDKey:
+ continue
+ default:
+ var v any
+ if err := src.Get(k, &v); err != nil {
+ return nil, fmt.Errorf(`failed to get field %q: %w`, k, err)
+ }
+
+ if err := newKey.Set(k, v); err != nil {
+ return nil, fmt.Errorf(`failed to set field %q: %w`, k, err)
+ }
+ }
+ }
+
+ return newKey, nil
+}
+
+func (k *okpPrivateKey) PublicKey() (Key, error) {
+ return makeOKPPublicKey(k)
+}
+
+func (k *okpPublicKey) PublicKey() (Key, error) {
+ return makeOKPPublicKey(k)
+}
+
+func okpThumbprint(hash crypto.Hash, crv, x string) []byte {
+ h := hash.New()
+ fmt.Fprint(h, `{"crv":"`)
+ fmt.Fprint(h, crv)
+ fmt.Fprint(h, `","kty":"OKP","x":"`)
+ fmt.Fprint(h, x)
+ fmt.Fprint(h, `"}`)
+ return h.Sum(nil)
+}
+
+// Thumbprint returns the JWK thumbprint using the indicated
+// hashing algorithm, according to RFC 7638 / 8037
+func (k okpPublicKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
+ k.mu.RLock()
+ defer k.mu.RUnlock()
+
+ crv, ok := k.Crv()
+ if !ok {
+ return nil, fmt.Errorf(`missing "crv" field`)
+ }
+ return okpThumbprint(
+ hash,
+ crv.String(),
+ base64.EncodeToString(k.x),
+ ), nil
+}
+
+// Thumbprint returns the JWK thumbprint using the indicated
+// hashing algorithm, according to RFC 7638 / 8037
+func (k okpPrivateKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
+ k.mu.RLock()
+ defer k.mu.RUnlock()
+
+ crv, ok := k.Crv()
+ if !ok {
+ return nil, fmt.Errorf(`missing "crv" field`)
+ }
+
+ return okpThumbprint(
+ hash,
+ crv.String(),
+ base64.EncodeToString(k.x),
+ ), nil
+}
+
+func validateOKPKey(key interface {
+ Crv() (jwa.EllipticCurveAlgorithm, bool)
+ X() ([]byte, bool)
+}) error {
+ if v, ok := key.Crv(); !ok || v == jwa.InvalidEllipticCurve() {
+ return fmt.Errorf(`invalid curve algorithm`)
+ }
+
+ if v, ok := key.X(); !ok || len(v) == 0 {
+ return fmt.Errorf(`missing "x" field`)
+ }
+
+ if priv, ok := key.(keyWithD); ok {
+ if d, ok := priv.D(); !ok || len(d) == 0 {
+ return fmt.Errorf(`missing "d" field`)
+ }
+ }
+ return nil
+}
+
+func (k *okpPublicKey) Validate() error {
+ k.mu.RLock()
+ defer k.mu.RUnlock()
+ if err := validateOKPKey(k); err != nil {
+ return NewKeyValidationError(fmt.Errorf(`jwk.OKPPublicKey: %w`, err))
+ }
+ return nil
+}
+
+func (k *okpPrivateKey) Validate() error {
+ k.mu.RLock()
+ defer k.mu.RUnlock()
+ if err := validateOKPKey(k); err != nil {
+ return NewKeyValidationError(fmt.Errorf(`jwk.OKPPrivateKey: %w`, err))
+ }
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/okp_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/okp_gen.go
new file mode 100644
index 0000000000..0bde986147
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/okp_gen.go
@@ -0,0 +1,1347 @@
+// Code generated by tools/cmd/genjwk/main.go. DO NOT EDIT.
+
+package jwk
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "sync"
+
+ "github.com/lestrrat-go/blackmagic"
+ "github.com/lestrrat-go/jwx/v3/cert"
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+)
+
+const (
+ OKPCrvKey = "crv"
+ OKPDKey = "d"
+ OKPXKey = "x"
+)
+
+type OKPPublicKey interface {
+ Key
+ Crv() (jwa.EllipticCurveAlgorithm, bool)
+ X() ([]byte, bool)
+}
+
+type okpPublicKey struct {
+ algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4
+ crv *jwa.EllipticCurveAlgorithm
+ keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4
+ keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3
+ keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2
+ x []byte
+ x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6
+ x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7
+ x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8
+ x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5
+ privateParams map[string]any
+ mu *sync.RWMutex
+ dc json.DecodeCtx
+}
+
+var _ OKPPublicKey = &okpPublicKey{}
+var _ Key = &okpPublicKey{}
+
+func newOKPPublicKey() *okpPublicKey {
+ return &okpPublicKey{
+ mu: &sync.RWMutex{},
+ privateParams: make(map[string]any),
+ }
+}
+
+func (h okpPublicKey) KeyType() jwa.KeyType {
+ return jwa.OKP()
+}
+
+func (h okpPublicKey) rlock() {
+ h.mu.RLock()
+}
+
+func (h okpPublicKey) runlock() {
+ h.mu.RUnlock()
+}
+
+func (h okpPublicKey) IsPrivate() bool {
+ return false
+}
+
+func (h *okpPublicKey) Algorithm() (jwa.KeyAlgorithm, bool) {
+ if h.algorithm != nil {
+ return *(h.algorithm), true
+ }
+ return nil, false
+}
+
+func (h *okpPublicKey) Crv() (jwa.EllipticCurveAlgorithm, bool) {
+ if h.crv != nil {
+ return *(h.crv), true
+ }
+ return jwa.InvalidEllipticCurve(), false
+}
+
+func (h *okpPublicKey) KeyID() (string, bool) {
+ if h.keyID != nil {
+ return *(h.keyID), true
+ }
+ return "", false
+}
+
+func (h *okpPublicKey) KeyOps() (KeyOperationList, bool) {
+ if h.keyOps != nil {
+ return *(h.keyOps), true
+ }
+ return nil, false
+}
+
+func (h *okpPublicKey) KeyUsage() (string, bool) {
+ if h.keyUsage != nil {
+ return *(h.keyUsage), true
+ }
+ return "", false
+}
+
+func (h *okpPublicKey) X() ([]byte, bool) {
+ if h.x != nil {
+ return h.x, true
+ }
+ return nil, false
+}
+
+func (h *okpPublicKey) X509CertChain() (*cert.Chain, bool) {
+ return h.x509CertChain, true
+}
+
+func (h *okpPublicKey) X509CertThumbprint() (string, bool) {
+ if h.x509CertThumbprint != nil {
+ return *(h.x509CertThumbprint), true
+ }
+ return "", false
+}
+
+func (h *okpPublicKey) X509CertThumbprintS256() (string, bool) {
+ if h.x509CertThumbprintS256 != nil {
+ return *(h.x509CertThumbprintS256), true
+ }
+ return "", false
+}
+
+func (h *okpPublicKey) X509URL() (string, bool) {
+ if h.x509URL != nil {
+ return *(h.x509URL), true
+ }
+ return "", false
+}
+
+func (h *okpPublicKey) Has(name string) bool {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ switch name {
+ case KeyTypeKey:
+ return true
+ case AlgorithmKey:
+ return h.algorithm != nil
+ case OKPCrvKey:
+ return h.crv != nil
+ case KeyIDKey:
+ return h.keyID != nil
+ case KeyOpsKey:
+ return h.keyOps != nil
+ case KeyUsageKey:
+ return h.keyUsage != nil
+ case OKPXKey:
+ return h.x != nil
+ case X509CertChainKey:
+ return h.x509CertChain != nil
+ case X509CertThumbprintKey:
+ return h.x509CertThumbprint != nil
+ case X509CertThumbprintS256Key:
+ return h.x509CertThumbprintS256 != nil
+ case X509URLKey:
+ return h.x509URL != nil
+ default:
+ _, ok := h.privateParams[name]
+ return ok
+ }
+}
+
+func (h *okpPublicKey) Get(name string, dst any) error {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ switch name {
+ case KeyTypeKey:
+ if err := blackmagic.AssignIfCompatible(dst, h.KeyType()); err != nil {
+ return fmt.Errorf(`okpPublicKey.Get: failed to assign value for field %q to destination object: %w`, name, err)
+ }
+ case AlgorithmKey:
+ if h.algorithm == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.algorithm)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case OKPCrvKey:
+ if h.crv == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.crv)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyIDKey:
+ if h.keyID == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyID)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyOpsKey:
+ if h.keyOps == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyOps)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyUsageKey:
+ if h.keyUsage == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyUsage)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case OKPXKey:
+ if h.x == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.x); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertChainKey:
+ if h.x509CertChain == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.x509CertChain); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertThumbprintKey:
+ if h.x509CertThumbprint == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprint)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertThumbprintS256Key:
+ if h.x509CertThumbprintS256 == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprintS256)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509URLKey:
+ if h.x509URL == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509URL)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ default:
+ v, ok := h.privateParams[name]
+ if !ok {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, v); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ }
+ return nil
+}
+
+func (h *okpPublicKey) Set(name string, value any) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ return h.setNoLock(name, value)
+}
+
+func (h *okpPublicKey) setNoLock(name string, value any) error {
+ switch name {
+ case "kty":
+ return nil
+ case AlgorithmKey:
+ switch v := value.(type) {
+ case string, jwa.SignatureAlgorithm, jwa.KeyEncryptionAlgorithm, jwa.ContentEncryptionAlgorithm:
+ tmp, err := jwa.KeyAlgorithmFrom(v)
+ if err != nil {
+ return fmt.Errorf(`invalid algorithm for %q key: %w`, AlgorithmKey, err)
+ }
+ h.algorithm = &tmp
+ default:
+ return fmt.Errorf(`invalid type for %q key: %T`, AlgorithmKey, value)
+ }
+ return nil
+ case OKPCrvKey:
+ if v, ok := value.(jwa.EllipticCurveAlgorithm); ok {
+ h.crv = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, OKPCrvKey, value)
+ case KeyIDKey:
+ if v, ok := value.(string); ok {
+ h.keyID = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value)
+ case KeyOpsKey:
+ var acceptor KeyOperationList
+ if err := acceptor.Accept(value); err != nil {
+ return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err)
+ }
+ h.keyOps = &acceptor
+ return nil
+ case KeyUsageKey:
+ switch v := value.(type) {
+ case KeyUsageType:
+ switch v {
+ case ForSignature, ForEncryption:
+ tmp := v.String()
+ h.keyUsage = &tmp
+ default:
+ return fmt.Errorf(`invalid key usage type %s`, v)
+ }
+ case string:
+ h.keyUsage = &v
+ default:
+ return fmt.Errorf(`invalid key usage type %s`, v)
+ }
+ case OKPXKey:
+ if v, ok := value.([]byte); ok {
+ h.x = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, OKPXKey, value)
+ case X509CertChainKey:
+ if v, ok := value.(*cert.Chain); ok {
+ h.x509CertChain = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value)
+ case X509CertThumbprintKey:
+ if v, ok := value.(string); ok {
+ h.x509CertThumbprint = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value)
+ case X509CertThumbprintS256Key:
+ if v, ok := value.(string); ok {
+ h.x509CertThumbprintS256 = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value)
+ case X509URLKey:
+ if v, ok := value.(string); ok {
+ h.x509URL = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value)
+ default:
+ if h.privateParams == nil {
+ h.privateParams = map[string]any{}
+ }
+ h.privateParams[name] = value
+ }
+ return nil
+}
+
+func (k *okpPublicKey) Remove(key string) error {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+ switch key {
+ case AlgorithmKey:
+ k.algorithm = nil
+ case OKPCrvKey:
+ k.crv = nil
+ case KeyIDKey:
+ k.keyID = nil
+ case KeyOpsKey:
+ k.keyOps = nil
+ case KeyUsageKey:
+ k.keyUsage = nil
+ case OKPXKey:
+ k.x = nil
+ case X509CertChainKey:
+ k.x509CertChain = nil
+ case X509CertThumbprintKey:
+ k.x509CertThumbprint = nil
+ case X509CertThumbprintS256Key:
+ k.x509CertThumbprintS256 = nil
+ case X509URLKey:
+ k.x509URL = nil
+ default:
+ delete(k.privateParams, key)
+ }
+ return nil
+}
+
+func (k *okpPublicKey) Clone() (Key, error) {
+ key, err := cloneKey(k)
+ if err != nil {
+ return nil, fmt.Errorf(`okpPublicKey.Clone: %w`, err)
+ }
+ return key, nil
+}
+
+func (k *okpPublicKey) DecodeCtx() json.DecodeCtx {
+ k.mu.RLock()
+ defer k.mu.RUnlock()
+ return k.dc
+}
+
+func (k *okpPublicKey) SetDecodeCtx(dc json.DecodeCtx) {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+ k.dc = dc
+}
+
+func (h *okpPublicKey) UnmarshalJSON(buf []byte) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ h.algorithm = nil
+ h.crv = nil
+ h.keyID = nil
+ h.keyOps = nil
+ h.keyUsage = nil
+ h.x = nil
+ h.x509CertChain = nil
+ h.x509CertThumbprint = nil
+ h.x509CertThumbprintS256 = nil
+ h.x509URL = nil
+ dec := json.NewDecoder(bytes.NewReader(buf))
+LOOP:
+ for {
+ tok, err := dec.Token()
+ if err != nil {
+ return fmt.Errorf(`error reading token: %w`, err)
+ }
+ switch tok := tok.(type) {
+ case json.Delim:
+ // Assuming we're doing everything correctly, we should ONLY
+ // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here.
+ if tok == tokens.CloseCurlyBracket { // End of object
+ break LOOP
+ } else if tok != tokens.OpenCurlyBracket {
+ return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok)
+ }
+ case string: // Objects can only have string keys
+ switch tok {
+ case KeyTypeKey:
+ val, err := json.ReadNextStringToken(dec)
+ if err != nil {
+ return fmt.Errorf(`error reading token: %w`, err)
+ }
+ if val != jwa.OKP().String() {
+ return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val)
+ }
+ case AlgorithmKey:
+ var s string
+ if err := dec.Decode(&s); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+ }
+ alg, err := jwa.KeyAlgorithmFrom(s)
+ if err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+ }
+ h.algorithm = &alg
+ case OKPCrvKey:
+ var decoded jwa.EllipticCurveAlgorithm
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, OKPCrvKey, err)
+ }
+ h.crv = &decoded
+ case KeyIDKey:
+ if err := json.AssignNextStringToken(&h.keyID, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err)
+ }
+ case KeyOpsKey:
+ var decoded KeyOperationList
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err)
+ }
+ h.keyOps = &decoded
+ case KeyUsageKey:
+ if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err)
+ }
+ case OKPXKey:
+ if err := json.AssignNextBytesToken(&h.x, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, OKPXKey, err)
+ }
+ case X509CertChainKey:
+ var decoded cert.Chain
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err)
+ }
+ h.x509CertChain = &decoded
+ case X509CertThumbprintKey:
+ if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err)
+ }
+ case X509CertThumbprintS256Key:
+ if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err)
+ }
+ case X509URLKey:
+ if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err)
+ }
+ default:
+ if dc := h.dc; dc != nil {
+ if localReg := dc.Registry(); localReg != nil {
+ decoded, err := localReg.Decode(dec, tok)
+ if err == nil {
+ h.setNoLock(tok, decoded)
+ continue
+ }
+ }
+ }
+ decoded, err := registry.Decode(dec, tok)
+ if err == nil {
+ h.setNoLock(tok, decoded)
+ continue
+ }
+ return fmt.Errorf(`could not decode field %s: %w`, tok, err)
+ }
+ default:
+ return fmt.Errorf(`invalid token %T`, tok)
+ }
+ }
+ if h.crv == nil {
+ return fmt.Errorf(`required field crv is missing`)
+ }
+ if h.x == nil {
+ return fmt.Errorf(`required field x is missing`)
+ }
+ return nil
+}
+
+func (h okpPublicKey) MarshalJSON() ([]byte, error) {
+ data := make(map[string]any)
+ fields := make([]string, 0, 10)
+ data[KeyTypeKey] = jwa.OKP()
+ fields = append(fields, KeyTypeKey)
+ if h.algorithm != nil {
+ data[AlgorithmKey] = *(h.algorithm)
+ fields = append(fields, AlgorithmKey)
+ }
+ if h.crv != nil {
+ data[OKPCrvKey] = *(h.crv)
+ fields = append(fields, OKPCrvKey)
+ }
+ if h.keyID != nil {
+ data[KeyIDKey] = *(h.keyID)
+ fields = append(fields, KeyIDKey)
+ }
+ if h.keyOps != nil {
+ data[KeyOpsKey] = *(h.keyOps)
+ fields = append(fields, KeyOpsKey)
+ }
+ if h.keyUsage != nil {
+ data[KeyUsageKey] = *(h.keyUsage)
+ fields = append(fields, KeyUsageKey)
+ }
+ if h.x != nil {
+ data[OKPXKey] = h.x
+ fields = append(fields, OKPXKey)
+ }
+ if h.x509CertChain != nil {
+ data[X509CertChainKey] = h.x509CertChain
+ fields = append(fields, X509CertChainKey)
+ }
+ if h.x509CertThumbprint != nil {
+ data[X509CertThumbprintKey] = *(h.x509CertThumbprint)
+ fields = append(fields, X509CertThumbprintKey)
+ }
+ if h.x509CertThumbprintS256 != nil {
+ data[X509CertThumbprintS256Key] = *(h.x509CertThumbprintS256)
+ fields = append(fields, X509CertThumbprintS256Key)
+ }
+ if h.x509URL != nil {
+ data[X509URLKey] = *(h.x509URL)
+ fields = append(fields, X509URLKey)
+ }
+ for k, v := range h.privateParams {
+ data[k] = v
+ fields = append(fields, k)
+ }
+
+ sort.Strings(fields)
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+ buf.WriteByte(tokens.OpenCurlyBracket)
+ enc := json.NewEncoder(buf)
+ for i, f := range fields {
+ if i > 0 {
+ buf.WriteRune(tokens.Comma)
+ }
+ buf.WriteRune(tokens.DoubleQuote)
+ buf.WriteString(f)
+ buf.WriteString(`":`)
+ v := data[f]
+ switch v := v.(type) {
+ case []byte:
+ buf.WriteRune(tokens.DoubleQuote)
+ buf.WriteString(base64.EncodeToString(v))
+ buf.WriteRune(tokens.DoubleQuote)
+ default:
+ if err := enc.Encode(v); err != nil {
+ return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err)
+ }
+ buf.Truncate(buf.Len() - 1)
+ }
+ }
+ buf.WriteByte(tokens.CloseCurlyBracket)
+ ret := make([]byte, buf.Len())
+ copy(ret, buf.Bytes())
+ return ret, nil
+}
+
+func (h *okpPublicKey) Keys() []string {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ keys := make([]string, 0, 10+len(h.privateParams))
+ keys = append(keys, KeyTypeKey)
+ if h.algorithm != nil {
+ keys = append(keys, AlgorithmKey)
+ }
+ if h.crv != nil {
+ keys = append(keys, OKPCrvKey)
+ }
+ if h.keyID != nil {
+ keys = append(keys, KeyIDKey)
+ }
+ if h.keyOps != nil {
+ keys = append(keys, KeyOpsKey)
+ }
+ if h.keyUsage != nil {
+ keys = append(keys, KeyUsageKey)
+ }
+ if h.x != nil {
+ keys = append(keys, OKPXKey)
+ }
+ if h.x509CertChain != nil {
+ keys = append(keys, X509CertChainKey)
+ }
+ if h.x509CertThumbprint != nil {
+ keys = append(keys, X509CertThumbprintKey)
+ }
+ if h.x509CertThumbprintS256 != nil {
+ keys = append(keys, X509CertThumbprintS256Key)
+ }
+ if h.x509URL != nil {
+ keys = append(keys, X509URLKey)
+ }
+ for k := range h.privateParams {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+type OKPPrivateKey interface {
+ Key
+ Crv() (jwa.EllipticCurveAlgorithm, bool)
+ D() ([]byte, bool)
+ X() ([]byte, bool)
+}
+
+type okpPrivateKey struct {
+ algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4
+ crv *jwa.EllipticCurveAlgorithm
+ d []byte
+ keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4
+ keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3
+ keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2
+ x []byte
+ x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6
+ x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7
+ x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8
+ x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5
+ privateParams map[string]any
+ mu *sync.RWMutex
+ dc json.DecodeCtx
+}
+
+var _ OKPPrivateKey = &okpPrivateKey{}
+var _ Key = &okpPrivateKey{}
+
+func newOKPPrivateKey() *okpPrivateKey {
+ return &okpPrivateKey{
+ mu: &sync.RWMutex{},
+ privateParams: make(map[string]any),
+ }
+}
+
+func (h okpPrivateKey) KeyType() jwa.KeyType {
+ return jwa.OKP()
+}
+
+func (h okpPrivateKey) rlock() {
+ h.mu.RLock()
+}
+
+func (h okpPrivateKey) runlock() {
+ h.mu.RUnlock()
+}
+
+func (h okpPrivateKey) IsPrivate() bool {
+ return true
+}
+
+func (h *okpPrivateKey) Algorithm() (jwa.KeyAlgorithm, bool) {
+ if h.algorithm != nil {
+ return *(h.algorithm), true
+ }
+ return nil, false
+}
+
+func (h *okpPrivateKey) Crv() (jwa.EllipticCurveAlgorithm, bool) {
+ if h.crv != nil {
+ return *(h.crv), true
+ }
+ return jwa.InvalidEllipticCurve(), false
+}
+
+func (h *okpPrivateKey) D() ([]byte, bool) {
+ if h.d != nil {
+ return h.d, true
+ }
+ return nil, false
+}
+
+func (h *okpPrivateKey) KeyID() (string, bool) {
+ if h.keyID != nil {
+ return *(h.keyID), true
+ }
+ return "", false
+}
+
+func (h *okpPrivateKey) KeyOps() (KeyOperationList, bool) {
+ if h.keyOps != nil {
+ return *(h.keyOps), true
+ }
+ return nil, false
+}
+
+func (h *okpPrivateKey) KeyUsage() (string, bool) {
+ if h.keyUsage != nil {
+ return *(h.keyUsage), true
+ }
+ return "", false
+}
+
+func (h *okpPrivateKey) X() ([]byte, bool) {
+ if h.x != nil {
+ return h.x, true
+ }
+ return nil, false
+}
+
+func (h *okpPrivateKey) X509CertChain() (*cert.Chain, bool) {
+ return h.x509CertChain, true
+}
+
+func (h *okpPrivateKey) X509CertThumbprint() (string, bool) {
+ if h.x509CertThumbprint != nil {
+ return *(h.x509CertThumbprint), true
+ }
+ return "", false
+}
+
+func (h *okpPrivateKey) X509CertThumbprintS256() (string, bool) {
+ if h.x509CertThumbprintS256 != nil {
+ return *(h.x509CertThumbprintS256), true
+ }
+ return "", false
+}
+
+func (h *okpPrivateKey) X509URL() (string, bool) {
+ if h.x509URL != nil {
+ return *(h.x509URL), true
+ }
+ return "", false
+}
+
+func (h *okpPrivateKey) Has(name string) bool {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ switch name {
+ case KeyTypeKey:
+ return true
+ case AlgorithmKey:
+ return h.algorithm != nil
+ case OKPCrvKey:
+ return h.crv != nil
+ case OKPDKey:
+ return h.d != nil
+ case KeyIDKey:
+ return h.keyID != nil
+ case KeyOpsKey:
+ return h.keyOps != nil
+ case KeyUsageKey:
+ return h.keyUsage != nil
+ case OKPXKey:
+ return h.x != nil
+ case X509CertChainKey:
+ return h.x509CertChain != nil
+ case X509CertThumbprintKey:
+ return h.x509CertThumbprint != nil
+ case X509CertThumbprintS256Key:
+ return h.x509CertThumbprintS256 != nil
+ case X509URLKey:
+ return h.x509URL != nil
+ default:
+ _, ok := h.privateParams[name]
+ return ok
+ }
+}
+
+func (h *okpPrivateKey) Get(name string, dst any) error {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ switch name {
+ case KeyTypeKey:
+ if err := blackmagic.AssignIfCompatible(dst, h.KeyType()); err != nil {
+ return fmt.Errorf(`okpPrivateKey.Get: failed to assign value for field %q to destination object: %w`, name, err)
+ }
+ case AlgorithmKey:
+ if h.algorithm == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.algorithm)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case OKPCrvKey:
+ if h.crv == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.crv)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case OKPDKey:
+ if h.d == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.d); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyIDKey:
+ if h.keyID == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyID)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyOpsKey:
+ if h.keyOps == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyOps)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyUsageKey:
+ if h.keyUsage == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyUsage)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case OKPXKey:
+ if h.x == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.x); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertChainKey:
+ if h.x509CertChain == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.x509CertChain); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertThumbprintKey:
+ if h.x509CertThumbprint == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprint)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertThumbprintS256Key:
+ if h.x509CertThumbprintS256 == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprintS256)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509URLKey:
+ if h.x509URL == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509URL)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ default:
+ v, ok := h.privateParams[name]
+ if !ok {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, v); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ }
+ return nil
+}
+
+func (h *okpPrivateKey) Set(name string, value any) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ return h.setNoLock(name, value)
+}
+
+func (h *okpPrivateKey) setNoLock(name string, value any) error {
+ switch name {
+ case "kty":
+ return nil
+ case AlgorithmKey:
+ switch v := value.(type) {
+ case string, jwa.SignatureAlgorithm, jwa.KeyEncryptionAlgorithm, jwa.ContentEncryptionAlgorithm:
+ tmp, err := jwa.KeyAlgorithmFrom(v)
+ if err != nil {
+ return fmt.Errorf(`invalid algorithm for %q key: %w`, AlgorithmKey, err)
+ }
+ h.algorithm = &tmp
+ default:
+ return fmt.Errorf(`invalid type for %q key: %T`, AlgorithmKey, value)
+ }
+ return nil
+ case OKPCrvKey:
+ if v, ok := value.(jwa.EllipticCurveAlgorithm); ok {
+ h.crv = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, OKPCrvKey, value)
+ case OKPDKey:
+ if v, ok := value.([]byte); ok {
+ h.d = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, OKPDKey, value)
+ case KeyIDKey:
+ if v, ok := value.(string); ok {
+ h.keyID = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value)
+ case KeyOpsKey:
+ var acceptor KeyOperationList
+ if err := acceptor.Accept(value); err != nil {
+ return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err)
+ }
+ h.keyOps = &acceptor
+ return nil
+ case KeyUsageKey:
+ switch v := value.(type) {
+ case KeyUsageType:
+ switch v {
+ case ForSignature, ForEncryption:
+ tmp := v.String()
+ h.keyUsage = &tmp
+ default:
+ return fmt.Errorf(`invalid key usage type %s`, v)
+ }
+ case string:
+ h.keyUsage = &v
+ default:
+ return fmt.Errorf(`invalid key usage type %s`, v)
+ }
+ case OKPXKey:
+ if v, ok := value.([]byte); ok {
+ h.x = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, OKPXKey, value)
+ case X509CertChainKey:
+ if v, ok := value.(*cert.Chain); ok {
+ h.x509CertChain = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value)
+ case X509CertThumbprintKey:
+ if v, ok := value.(string); ok {
+ h.x509CertThumbprint = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value)
+ case X509CertThumbprintS256Key:
+ if v, ok := value.(string); ok {
+ h.x509CertThumbprintS256 = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value)
+ case X509URLKey:
+ if v, ok := value.(string); ok {
+ h.x509URL = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value)
+ default:
+ if h.privateParams == nil {
+ h.privateParams = map[string]any{}
+ }
+ h.privateParams[name] = value
+ }
+ return nil
+}
+
+func (k *okpPrivateKey) Remove(key string) error {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+ switch key {
+ case AlgorithmKey:
+ k.algorithm = nil
+ case OKPCrvKey:
+ k.crv = nil
+ case OKPDKey:
+ k.d = nil
+ case KeyIDKey:
+ k.keyID = nil
+ case KeyOpsKey:
+ k.keyOps = nil
+ case KeyUsageKey:
+ k.keyUsage = nil
+ case OKPXKey:
+ k.x = nil
+ case X509CertChainKey:
+ k.x509CertChain = nil
+ case X509CertThumbprintKey:
+ k.x509CertThumbprint = nil
+ case X509CertThumbprintS256Key:
+ k.x509CertThumbprintS256 = nil
+ case X509URLKey:
+ k.x509URL = nil
+ default:
+ delete(k.privateParams, key)
+ }
+ return nil
+}
+
+func (k *okpPrivateKey) Clone() (Key, error) {
+ key, err := cloneKey(k)
+ if err != nil {
+ return nil, fmt.Errorf(`okpPrivateKey.Clone: %w`, err)
+ }
+ return key, nil
+}
+
+func (k *okpPrivateKey) DecodeCtx() json.DecodeCtx {
+ k.mu.RLock()
+ defer k.mu.RUnlock()
+ return k.dc
+}
+
+func (k *okpPrivateKey) SetDecodeCtx(dc json.DecodeCtx) {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+ k.dc = dc
+}
+
+func (h *okpPrivateKey) UnmarshalJSON(buf []byte) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ h.algorithm = nil
+ h.crv = nil
+ h.d = nil
+ h.keyID = nil
+ h.keyOps = nil
+ h.keyUsage = nil
+ h.x = nil
+ h.x509CertChain = nil
+ h.x509CertThumbprint = nil
+ h.x509CertThumbprintS256 = nil
+ h.x509URL = nil
+ dec := json.NewDecoder(bytes.NewReader(buf))
+LOOP:
+ for {
+ tok, err := dec.Token()
+ if err != nil {
+ return fmt.Errorf(`error reading token: %w`, err)
+ }
+ switch tok := tok.(type) {
+ case json.Delim:
+ // Assuming we're doing everything correctly, we should ONLY
+ // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here.
+ if tok == tokens.CloseCurlyBracket { // End of object
+ break LOOP
+ } else if tok != tokens.OpenCurlyBracket {
+ return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok)
+ }
+ case string: // Objects can only have string keys
+ switch tok {
+ case KeyTypeKey:
+ val, err := json.ReadNextStringToken(dec)
+ if err != nil {
+ return fmt.Errorf(`error reading token: %w`, err)
+ }
+ if val != jwa.OKP().String() {
+ return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val)
+ }
+ case AlgorithmKey:
+ var s string
+ if err := dec.Decode(&s); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+ }
+ alg, err := jwa.KeyAlgorithmFrom(s)
+ if err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+ }
+ h.algorithm = &alg
+ case OKPCrvKey:
+ var decoded jwa.EllipticCurveAlgorithm
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, OKPCrvKey, err)
+ }
+ h.crv = &decoded
+ case OKPDKey:
+ if err := json.AssignNextBytesToken(&h.d, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, OKPDKey, err)
+ }
+ case KeyIDKey:
+ if err := json.AssignNextStringToken(&h.keyID, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err)
+ }
+ case KeyOpsKey:
+ var decoded KeyOperationList
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err)
+ }
+ h.keyOps = &decoded
+ case KeyUsageKey:
+ if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err)
+ }
+ case OKPXKey:
+ if err := json.AssignNextBytesToken(&h.x, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, OKPXKey, err)
+ }
+ case X509CertChainKey:
+ var decoded cert.Chain
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err)
+ }
+ h.x509CertChain = &decoded
+ case X509CertThumbprintKey:
+ if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err)
+ }
+ case X509CertThumbprintS256Key:
+ if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err)
+ }
+ case X509URLKey:
+ if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err)
+ }
+ default:
+ if dc := h.dc; dc != nil {
+ if localReg := dc.Registry(); localReg != nil {
+ decoded, err := localReg.Decode(dec, tok)
+ if err == nil {
+ h.setNoLock(tok, decoded)
+ continue
+ }
+ }
+ }
+ decoded, err := registry.Decode(dec, tok)
+ if err == nil {
+ h.setNoLock(tok, decoded)
+ continue
+ }
+ return fmt.Errorf(`could not decode field %s: %w`, tok, err)
+ }
+ default:
+ return fmt.Errorf(`invalid token %T`, tok)
+ }
+ }
+ if h.crv == nil {
+ return fmt.Errorf(`required field crv is missing`)
+ }
+ if h.d == nil {
+ return fmt.Errorf(`required field d is missing`)
+ }
+ if h.x == nil {
+ return fmt.Errorf(`required field x is missing`)
+ }
+ return nil
+}
+
+func (h okpPrivateKey) MarshalJSON() ([]byte, error) {
+ data := make(map[string]any)
+ fields := make([]string, 0, 11)
+ data[KeyTypeKey] = jwa.OKP()
+ fields = append(fields, KeyTypeKey)
+ if h.algorithm != nil {
+ data[AlgorithmKey] = *(h.algorithm)
+ fields = append(fields, AlgorithmKey)
+ }
+ if h.crv != nil {
+ data[OKPCrvKey] = *(h.crv)
+ fields = append(fields, OKPCrvKey)
+ }
+ if h.d != nil {
+ data[OKPDKey] = h.d
+ fields = append(fields, OKPDKey)
+ }
+ if h.keyID != nil {
+ data[KeyIDKey] = *(h.keyID)
+ fields = append(fields, KeyIDKey)
+ }
+ if h.keyOps != nil {
+ data[KeyOpsKey] = *(h.keyOps)
+ fields = append(fields, KeyOpsKey)
+ }
+ if h.keyUsage != nil {
+ data[KeyUsageKey] = *(h.keyUsage)
+ fields = append(fields, KeyUsageKey)
+ }
+ if h.x != nil {
+ data[OKPXKey] = h.x
+ fields = append(fields, OKPXKey)
+ }
+ if h.x509CertChain != nil {
+ data[X509CertChainKey] = h.x509CertChain
+ fields = append(fields, X509CertChainKey)
+ }
+ if h.x509CertThumbprint != nil {
+ data[X509CertThumbprintKey] = *(h.x509CertThumbprint)
+ fields = append(fields, X509CertThumbprintKey)
+ }
+ if h.x509CertThumbprintS256 != nil {
+ data[X509CertThumbprintS256Key] = *(h.x509CertThumbprintS256)
+ fields = append(fields, X509CertThumbprintS256Key)
+ }
+ if h.x509URL != nil {
+ data[X509URLKey] = *(h.x509URL)
+ fields = append(fields, X509URLKey)
+ }
+ for k, v := range h.privateParams {
+ data[k] = v
+ fields = append(fields, k)
+ }
+
+ sort.Strings(fields)
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+ buf.WriteByte(tokens.OpenCurlyBracket)
+ enc := json.NewEncoder(buf)
+ for i, f := range fields {
+ if i > 0 {
+ buf.WriteRune(tokens.Comma)
+ }
+ buf.WriteRune(tokens.DoubleQuote)
+ buf.WriteString(f)
+ buf.WriteString(`":`)
+ v := data[f]
+ switch v := v.(type) {
+ case []byte:
+ buf.WriteRune(tokens.DoubleQuote)
+ buf.WriteString(base64.EncodeToString(v))
+ buf.WriteRune(tokens.DoubleQuote)
+ default:
+ if err := enc.Encode(v); err != nil {
+ return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err)
+ }
+ buf.Truncate(buf.Len() - 1)
+ }
+ }
+ buf.WriteByte(tokens.CloseCurlyBracket)
+ ret := make([]byte, buf.Len())
+ copy(ret, buf.Bytes())
+ return ret, nil
+}
+
+func (h *okpPrivateKey) Keys() []string {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ keys := make([]string, 0, 11+len(h.privateParams))
+ keys = append(keys, KeyTypeKey)
+ if h.algorithm != nil {
+ keys = append(keys, AlgorithmKey)
+ }
+ if h.crv != nil {
+ keys = append(keys, OKPCrvKey)
+ }
+ if h.d != nil {
+ keys = append(keys, OKPDKey)
+ }
+ if h.keyID != nil {
+ keys = append(keys, KeyIDKey)
+ }
+ if h.keyOps != nil {
+ keys = append(keys, KeyOpsKey)
+ }
+ if h.keyUsage != nil {
+ keys = append(keys, KeyUsageKey)
+ }
+ if h.x != nil {
+ keys = append(keys, OKPXKey)
+ }
+ if h.x509CertChain != nil {
+ keys = append(keys, X509CertChainKey)
+ }
+ if h.x509CertThumbprint != nil {
+ keys = append(keys, X509CertThumbprintKey)
+ }
+ if h.x509CertThumbprintS256 != nil {
+ keys = append(keys, X509CertThumbprintS256Key)
+ }
+ if h.x509URL != nil {
+ keys = append(keys, X509URLKey)
+ }
+ for k := range h.privateParams {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+var okpStandardFields KeyFilter
+
+func init() {
+ okpStandardFields = NewFieldNameFilter(KeyTypeKey, KeyUsageKey, KeyOpsKey, AlgorithmKey, KeyIDKey, X509URLKey, X509CertChainKey, X509CertThumbprintKey, X509CertThumbprintS256Key, OKPCrvKey, OKPXKey, OKPDKey)
+}
+
+// OKPStandardFieldsFilter returns a KeyFilter that filters out standard OKP fields.
+func OKPStandardFieldsFilter() KeyFilter {
+ return okpStandardFields
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/options.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/options.go
new file mode 100644
index 0000000000..56cc52625f
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/options.go
@@ -0,0 +1,76 @@
+package jwk
+
+import (
+ "time"
+
+ "github.com/lestrrat-go/httprc/v3"
+ "github.com/lestrrat-go/option/v2"
+)
+
+type identTypedField struct{}
+
+type typedFieldPair struct {
+ Name string
+ Value any
+}
+
+// WithTypedField allows a private field to be parsed into the object type of
+// your choice. It works much like the RegisterCustomField, but the effect
+// is only applicable to the jwt.Parse function call which receives this option.
+//
+// While this can be extremely useful, this option should be used with caution:
+// There are many caveats that your entire team/user-base needs to be aware of,
+// and therefore in general its use is discouraged. Only use it when you know
+// what you are doing, and you document its use clearly for others.
+//
+// First and foremost, this is a "per-object" option. Meaning that given the same
+// serialized format, it is possible to generate two objects whose internal
+// representations may differ. That is, if you parse one _WITH_ the option,
+// and the other _WITHOUT_, their internal representation may completely differ.
+// This could potentially lead to problems.
+//
+// Second, specifying this option will slightly slow down the decoding process
+// as it needs to consult multiple definitions sources (global and local), so
+// be careful if you are decoding a large number of tokens, as the effects will stack up.
+func WithTypedField(name string, object any) ParseOption {
+ return &parseOption{
+ option.New(identTypedField{},
+ typedFieldPair{Name: name, Value: object},
+ ),
+ }
+}
+
+type registerResourceOption struct {
+ option.Interface
+}
+
+func (registerResourceOption) registerOption() {}
+func (registerResourceOption) resourceOption() {}
+
+type identNewResourceOption struct{}
+
+// WithHttprcResourceOption can be used to pass arbitrary `httprc.NewResourceOption`
+// to `(httprc.Client).Add` by way of `(jwk.Cache).Register`.
+func WithHttprcResourceOption(o httprc.NewResourceOption) RegisterOption {
+ return ®isterResourceOption{
+ option.New(identNewResourceOption{}, o),
+ }
+}
+
+// WithConstantInterval can be used to pass `httprc.WithConstantInterval` option to
+// `(httprc.Client).Add` by way of `(jwk.Cache).Register`.
+func WithConstantInterval(d time.Duration) RegisterOption {
+ return WithHttprcResourceOption(httprc.WithConstantInterval(d))
+}
+
+// WithMinInterval can be used to pass `httprc.WithMinInterval` option to
+// `(httprc.Client).Add` by way of `(jwk.Cache).Register`.
+func WithMinInterval(d time.Duration) RegisterOption {
+ return WithHttprcResourceOption(httprc.WithMinInterval(d))
+}
+
+// WithMaxInterval can be used to pass `httprc.WithMaxInterval` option to
+// `(httprc.Client).Add` by way of `(jwk.Cache).Register`.
+func WithMaxInterval(d time.Duration) RegisterOption {
+ return WithHttprcResourceOption(httprc.WithMaxInterval(d))
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/options.yaml b/vendor/github.com/lestrrat-go/jwx/v3/jwk/options.yaml
new file mode 100644
index 0000000000..879dcba158
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/options.yaml
@@ -0,0 +1,143 @@
+package_name: jwk
+output: jwk/options_gen.go
+interfaces:
+ - name: CacheOption
+ comment: |
+ CacheOption is a type of Option that can be passed to the
+ the `jwk.NewCache()` function.
+ - name: ResourceOption
+ comment: |
+ ResourceOption is a type of Option that can be passed to the `httprc.NewResource` function
+ by way of RegisterOption.
+ - name: AssignKeyIDOption
+ - name: FetchOption
+ methods:
+ - fetchOption
+ - parseOption
+ - registerOption
+ comment: |
+ FetchOption is a type of Option that can be passed to `jwk.Fetch()`
+ FetchOption also implements the `RegisterOption`, and thus can
+ safely be passed to `(*jwk.Cache).Register()`
+ - name: ParseOption
+ methods:
+ - fetchOption
+ - registerOption
+ - readFileOption
+ comment: |
+ ParseOption is a type of Option that can be passed to `jwk.Parse()`
+ ParseOption also implements the `ReadFileOption` and `NewCacheOption`,
+ and thus safely be passed to `jwk.ReadFile` and `(*jwk.Cache).Configure()`
+ - name: ReadFileOption
+ comment: |
+ ReadFileOption is a type of `Option` that can be passed to `jwk.ReadFile`
+ - name: RegisterOption
+ comment: |
+ RegisterOption describes options that can be passed to `(jwk.Cache).Register()`
+ - name: RegisterFetchOption
+ methods:
+ - fetchOption
+ - registerOption
+ - parseOption
+ comment: |
+ RegisterFetchOption describes options that can be passed to `(jwk.Cache).Register()` and `jwk.Fetch()`
+ - name: GlobalOption
+ comment: |
+ GlobalOption is a type of Option that can be passed to the `jwk.Configure()` to
+ change the global configuration of the jwk package.
+options:
+ - ident: HTTPClient
+ interface: RegisterFetchOption
+ argument_type: HTTPClient
+ comment: |
+ WithHTTPClient allows users to specify the "net/http".Client object that
+ is used when fetching jwk.Set objects.
+ - ident: ThumbprintHash
+ interface: AssignKeyIDOption
+ argument_type: crypto.Hash
+ - ident: LocalRegistry
+ option_name: withLocalRegistry
+ interface: ParseOption
+ argument_type: '*json.Registry'
+ comment: This option is only available for internal code. Users don't get to play with it
+ - ident: PEM
+ interface: ParseOption
+ argument_type: bool
+ comment: |
+ WithPEM specifies that the input to `Parse()` is a PEM encoded key.
+
+ This option is planned to be deprecated in the future. The plan is to
+ replace it with `jwk.WithX509(true)`
+ - ident: X509
+ interface: ParseOption
+ argument_type: bool
+ comment: |
+ WithX509 specifies that the input to `Parse()` is an X.509 encoded key
+ - ident: PEMDecoder
+ interface: ParseOption
+ argument_type: PEMDecoder
+ comment: |
+ WithPEMDecoder specifies the PEMDecoder object to use when decoding
+ PEM encoded keys. This option can be passed to `jwk.Parse()`
+
+ This option is planned to be deprecated in the future. The plan is to
+ use `jwk.RegisterX509Decoder()` to register a custom X.509 decoder globally.
+ - ident: FetchWhitelist
+ interface: FetchOption
+ argument_type: Whitelist
+ comment: |
+ WithFetchWhitelist specifies the Whitelist object to use when
+ fetching JWKs from a remote source. This option can be passed
+ to both `jwk.Fetch()`
+ - ident: IgnoreParseError
+ interface: ParseOption
+ argument_type: bool
+ comment: |
+ WithIgnoreParseError is only applicable when used with `jwk.Parse()`
+ (i.e. to parse JWK sets). If passed to `jwk.ParseKey()`, the function
+ will return an error no matter what the input is.
+
+ DO NOT USE WITHOUT EXHAUSTING ALL OTHER ROUTES FIRST.
+
+ The option specifies that errors found during parsing of individual
+ keys are ignored. For example, if you had keys A, B, C where B is
+ invalid (e.g. it does not contain the required fields), then the
+ resulting JWKS will contain keys A and C only.
+
+ This options exists as an escape hatch for those times when a
+ key in a JWKS that is irrelevant for your use case is causing
+ your JWKS parsing to fail, and you want to get to the rest of the
+ keys in the JWKS.
+
+ Again, DO NOT USE unless you have exhausted all other routes.
+ When you use this option, you will not be able to tell if you are
+ using a faulty JWKS, except for when there are JSON syntax errors.
+ - ident: FS
+ interface: ReadFileOption
+ argument_type: fs.FS
+ comment: |
+ WithFS specifies the source `fs.FS` object to read the file from.
+ - ident: WaitReady
+ interface: RegisterOption
+ argument_type: bool
+ comment: |
+ WithWaitReady specifies that the `jwk.Cache` should wait until the
+ first fetch is done before returning from the `Register()` call.
+
+ This option is by default true. Specify a false value if you would
+ like to return immediately from the `Register()` call.
+
+ This options is exactly the same as `httprc.WithWaitReady()`
+ - ident: StrictKeyUsage
+ interface: GlobalOption
+ argument_type: bool
+ comment: |
+ WithStrictKeyUsage specifies if during JWK parsing, the "use" field
+ should be confined to the values that have been registered via
+ `jwk.RegisterKeyType()`. By default this option is true, and the
+ initial allowed values are "use" and "enc" only.
+
+ If this option is set to false, then the "use" field can be any
+ value. If this options is set to true, then the "use" field must
+ be one of the registered values, and otherwise an error will be
+ reported during parsing / assignment to `jwk.KeyUsageType`
\ No newline at end of file
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/options_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/options_gen.go
new file mode 100644
index 0000000000..99e66c3e7e
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/options_gen.go
@@ -0,0 +1,297 @@
+// Code generated by tools/cmd/genoptions/main.go. DO NOT EDIT.
+
+package jwk
+
+import (
+ "crypto"
+ "io/fs"
+
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/option/v2"
+)
+
+type Option = option.Interface
+
+type AssignKeyIDOption interface {
+ Option
+ assignKeyIDOption()
+}
+
+type assignKeyIDOption struct {
+ Option
+}
+
+func (*assignKeyIDOption) assignKeyIDOption() {}
+
+// CacheOption is a type of Option that can be passed to the
+// the `jwk.NewCache()` function.
+type CacheOption interface {
+ Option
+ cacheOption()
+}
+
+type cacheOption struct {
+ Option
+}
+
+func (*cacheOption) cacheOption() {}
+
+// FetchOption is a type of Option that can be passed to `jwk.Fetch()`
+// FetchOption also implements the `RegisterOption`, and thus can
+// safely be passed to `(*jwk.Cache).Register()`
+type FetchOption interface {
+ Option
+ fetchOption()
+ parseOption()
+ registerOption()
+}
+
+type fetchOption struct {
+ Option
+}
+
+func (*fetchOption) fetchOption() {}
+
+func (*fetchOption) parseOption() {}
+
+func (*fetchOption) registerOption() {}
+
+// GlobalOption is a type of Option that can be passed to the `jwk.Configure()` to
+// change the global configuration of the jwk package.
+type GlobalOption interface {
+ Option
+ globalOption()
+}
+
+type globalOption struct {
+ Option
+}
+
+func (*globalOption) globalOption() {}
+
+// ParseOption is a type of Option that can be passed to `jwk.Parse()`
+// ParseOption also implements the `ReadFileOption` and `NewCacheOption`,
+// and thus safely be passed to `jwk.ReadFile` and `(*jwk.Cache).Configure()`
+type ParseOption interface {
+ Option
+ fetchOption()
+ registerOption()
+ readFileOption()
+}
+
+type parseOption struct {
+ Option
+}
+
+func (*parseOption) fetchOption() {}
+
+func (*parseOption) registerOption() {}
+
+func (*parseOption) readFileOption() {}
+
+// ReadFileOption is a type of `Option` that can be passed to `jwk.ReadFile`
+type ReadFileOption interface {
+ Option
+ readFileOption()
+}
+
+type readFileOption struct {
+ Option
+}
+
+func (*readFileOption) readFileOption() {}
+
+// RegisterFetchOption describes options that can be passed to `(jwk.Cache).Register()` and `jwk.Fetch()`
+type RegisterFetchOption interface {
+ Option
+ fetchOption()
+ registerOption()
+ parseOption()
+}
+
+type registerFetchOption struct {
+ Option
+}
+
+func (*registerFetchOption) fetchOption() {}
+
+func (*registerFetchOption) registerOption() {}
+
+func (*registerFetchOption) parseOption() {}
+
+// RegisterOption describes options that can be passed to `(jwk.Cache).Register()`
+type RegisterOption interface {
+ Option
+ registerOption()
+}
+
+type registerOption struct {
+ Option
+}
+
+func (*registerOption) registerOption() {}
+
+// ResourceOption is a type of Option that can be passed to the `httprc.NewResource` function
+// by way of RegisterOption.
+type ResourceOption interface {
+ Option
+ resourceOption()
+}
+
+type resourceOption struct {
+ Option
+}
+
+func (*resourceOption) resourceOption() {}
+
+type identFS struct{}
+type identFetchWhitelist struct{}
+type identHTTPClient struct{}
+type identIgnoreParseError struct{}
+type identLocalRegistry struct{}
+type identPEM struct{}
+type identPEMDecoder struct{}
+type identStrictKeyUsage struct{}
+type identThumbprintHash struct{}
+type identWaitReady struct{}
+type identX509 struct{}
+
+func (identFS) String() string {
+ return "WithFS"
+}
+
+func (identFetchWhitelist) String() string {
+ return "WithFetchWhitelist"
+}
+
+func (identHTTPClient) String() string {
+ return "WithHTTPClient"
+}
+
+func (identIgnoreParseError) String() string {
+ return "WithIgnoreParseError"
+}
+
+func (identLocalRegistry) String() string {
+ return "withLocalRegistry"
+}
+
+func (identPEM) String() string {
+ return "WithPEM"
+}
+
+func (identPEMDecoder) String() string {
+ return "WithPEMDecoder"
+}
+
+func (identStrictKeyUsage) String() string {
+ return "WithStrictKeyUsage"
+}
+
+func (identThumbprintHash) String() string {
+ return "WithThumbprintHash"
+}
+
+func (identWaitReady) String() string {
+ return "WithWaitReady"
+}
+
+func (identX509) String() string {
+ return "WithX509"
+}
+
+// WithFS specifies the source `fs.FS` object to read the file from.
+func WithFS(v fs.FS) ReadFileOption {
+ return &readFileOption{option.New(identFS{}, v)}
+}
+
+// WithFetchWhitelist specifies the Whitelist object to use when
+// fetching JWKs from a remote source. This option can be passed
+// to both `jwk.Fetch()`
+func WithFetchWhitelist(v Whitelist) FetchOption {
+ return &fetchOption{option.New(identFetchWhitelist{}, v)}
+}
+
+// WithHTTPClient allows users to specify the "net/http".Client object that
+// is used when fetching jwk.Set objects.
+func WithHTTPClient(v HTTPClient) RegisterFetchOption {
+ return ®isterFetchOption{option.New(identHTTPClient{}, v)}
+}
+
+// WithIgnoreParseError is only applicable when used with `jwk.Parse()`
+// (i.e. to parse JWK sets). If passed to `jwk.ParseKey()`, the function
+// will return an error no matter what the input is.
+//
+// DO NOT USE WITHOUT EXHAUSTING ALL OTHER ROUTES FIRST.
+//
+// The option specifies that errors found during parsing of individual
+// keys are ignored. For example, if you had keys A, B, C where B is
+// invalid (e.g. it does not contain the required fields), then the
+// resulting JWKS will contain keys A and C only.
+//
+// This options exists as an escape hatch for those times when a
+// key in a JWKS that is irrelevant for your use case is causing
+// your JWKS parsing to fail, and you want to get to the rest of the
+// keys in the JWKS.
+//
+// Again, DO NOT USE unless you have exhausted all other routes.
+// When you use this option, you will not be able to tell if you are
+// using a faulty JWKS, except for when there are JSON syntax errors.
+func WithIgnoreParseError(v bool) ParseOption {
+ return &parseOption{option.New(identIgnoreParseError{}, v)}
+}
+
+// This option is only available for internal code. Users don't get to play with it
+func withLocalRegistry(v *json.Registry) ParseOption {
+ return &parseOption{option.New(identLocalRegistry{}, v)}
+}
+
+// WithPEM specifies that the input to `Parse()` is a PEM encoded key.
+//
+// This option is planned to be deprecated in the future. The plan is to
+// replace it with `jwk.WithX509(true)`
+func WithPEM(v bool) ParseOption {
+ return &parseOption{option.New(identPEM{}, v)}
+}
+
+// WithPEMDecoder specifies the PEMDecoder object to use when decoding
+// PEM encoded keys. This option can be passed to `jwk.Parse()`
+//
+// This option is planned to be deprecated in the future. The plan is to
+// use `jwk.RegisterX509Decoder()` to register a custom X.509 decoder globally.
+func WithPEMDecoder(v PEMDecoder) ParseOption {
+ return &parseOption{option.New(identPEMDecoder{}, v)}
+}
+
+// WithStrictKeyUsage specifies if during JWK parsing, the "use" field
+// should be confined to the values that have been registered via
+// `jwk.RegisterKeyType()`. By default this option is true, and the
+// initial allowed values are "use" and "enc" only.
+//
+// If this option is set to false, then the "use" field can be any
+// value. If this options is set to true, then the "use" field must
+// be one of the registered values, and otherwise an error will be
+// reported during parsing / assignment to `jwk.KeyUsageType`
+func WithStrictKeyUsage(v bool) GlobalOption {
+ return &globalOption{option.New(identStrictKeyUsage{}, v)}
+}
+
+func WithThumbprintHash(v crypto.Hash) AssignKeyIDOption {
+ return &assignKeyIDOption{option.New(identThumbprintHash{}, v)}
+}
+
+// WithWaitReady specifies that the `jwk.Cache` should wait until the
+// first fetch is done before returning from the `Register()` call.
+//
+// This option is by default true. Specify a false value if you would
+// like to return immediately from the `Register()` call.
+//
+// This options is exactly the same as `httprc.WithWaitReady()`
+func WithWaitReady(v bool) RegisterOption {
+ return ®isterOption{option.New(identWaitReady{}, v)}
+}
+
+// WithX509 specifies that the input to `Parse()` is an X.509 encoded key
+func WithX509(v bool) ParseOption {
+ return &parseOption{option.New(identX509{}, v)}
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/parser.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/parser.go
new file mode 100644
index 0000000000..fa8764ef72
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/parser.go
@@ -0,0 +1,244 @@
+package jwk
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+
+ "github.com/lestrrat-go/blackmagic"
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+)
+
+// KeyParser represents a type that can parse a JSON representation of a JWK into
+// a jwk.Key.
+// See KeyConvertor for a type that can convert a raw key into a jwk.Key
+type KeyParser interface {
+ // ParseKey parses a JSON payload to a `jwk.Key` object. The first
+ // argument is an object that contains some hints as to what kind of
+ // key the JSON payload contains.
+ //
+ // If your KeyParser decides that the payload is not something
+ // you can parse, and you would like to continue parsing with
+ // the remaining KeyParser instances that are registered,
+ // return a `jwk.ContinueParseError`. Any other errors will immediately
+ // halt the parsing process.
+ //
+ // When unmarshaling JSON, use the unmarshaler object supplied as
+ // the second argument. This will ensure that the JSON is unmarshaled
+ // in a way that is compatible with the rest of the library.
+ ParseKey(probe *KeyProbe, unmarshaler KeyUnmarshaler, payload []byte) (Key, error)
+}
+
+// KeyParseFunc is a type of KeyParser that is based on a function/closure
+type KeyParseFunc func(probe *KeyProbe, unmarshaler KeyUnmarshaler, payload []byte) (Key, error)
+
+func (f KeyParseFunc) ParseKey(probe *KeyProbe, unmarshaler KeyUnmarshaler, payload []byte) (Key, error) {
+ return f(probe, unmarshaler, payload)
+}
+
+// protects keyParsers
+var muKeyParser sync.RWMutex
+
+// list of parsers
+var keyParsers = []KeyParser{KeyParseFunc(defaultParseKey)}
+
+// RegisterKeyParser adds a new KeyParser. Parsers are called in FILO order.
+// That is, the last parser to be registered is called first. There is no
+// check for duplicate entries.
+func RegisterKeyParser(kp KeyParser) {
+ muKeyParser.Lock()
+ defer muKeyParser.Unlock()
+ keyParsers = append(keyParsers, kp)
+}
+
+func defaultParseKey(probe *KeyProbe, unmarshaler KeyUnmarshaler, data []byte) (Key, error) {
+ var key Key
+ var kty string
+ var d json.RawMessage
+ if err := probe.Get("Kty", &kty); err != nil {
+ return nil, fmt.Errorf(`jwk.Parse: failed to get "kty" hint: %w`, err)
+ }
+ // We ignore errors from this field, as it's optional
+ _ = probe.Get("D", &d)
+ switch v, _ := jwa.LookupKeyType(kty); v {
+ case jwa.RSA():
+ if d != nil {
+ key = newRSAPrivateKey()
+ } else {
+ key = newRSAPublicKey()
+ }
+ case jwa.EC():
+ if d != nil {
+ key = newECDSAPrivateKey()
+ } else {
+ key = newECDSAPublicKey()
+ }
+ case jwa.OctetSeq():
+ key = newSymmetricKey()
+ case jwa.OKP():
+ if d != nil {
+ key = newOKPPrivateKey()
+ } else {
+ key = newOKPPublicKey()
+ }
+ default:
+ return nil, fmt.Errorf(`invalid key type from JSON (%s)`, kty)
+ }
+
+ if err := unmarshaler.UnmarshalKey(data, key); err != nil {
+ return nil, fmt.Errorf(`failed to unmarshal JSON into key (%T): %w`, key, err)
+ }
+ return key, nil
+}
+
+type keyUnmarshaler struct {
+ localReg *json.Registry
+}
+
+func (ku *keyUnmarshaler) UnmarshalKey(data []byte, key any) error {
+ if ku.localReg != nil {
+ dcKey, ok := key.(json.DecodeCtxContainer)
+ if !ok {
+ return fmt.Errorf(`typed field was requested, but the key (%T) does not support DecodeCtx`, key)
+ }
+ dc := json.NewDecodeCtx(ku.localReg)
+ dcKey.SetDecodeCtx(dc)
+ defer func() { dcKey.SetDecodeCtx(nil) }()
+ }
+
+ if err := json.Unmarshal(data, key); err != nil {
+ return fmt.Errorf(`failed to unmarshal JSON into key (%T): %w`, key, err)
+ }
+
+ return nil
+}
+
+// keyProber is the object that starts the probing. When Probe() is called,
+// it creates (possibly from a cached value) an object that is used to
+// hold hint values.
+type keyProber struct {
+ mu sync.RWMutex
+ pool *sync.Pool
+ fields map[string]reflect.StructField
+ typ reflect.Type
+}
+
+func (kp *keyProber) AddField(field reflect.StructField) error {
+ kp.mu.Lock()
+ defer kp.mu.Unlock()
+
+ if _, ok := kp.fields[field.Name]; ok {
+ return fmt.Errorf(`field name %s is already registered`, field.Name)
+ }
+ kp.fields[field.Name] = field
+ kp.makeStructType()
+
+ // Update pool (note: the logic is the same, but we need to recreate it
+ // so that we don't accidentally use old stored values)
+ kp.pool = &sync.Pool{
+ New: kp.makeStruct,
+ }
+ return nil
+}
+
+func (kp *keyProber) makeStructType() {
+ // DOES NOT LOCK
+ fields := make([]reflect.StructField, 0, len(kp.fields))
+ for _, f := range kp.fields {
+ fields = append(fields, f)
+ }
+ kp.typ = reflect.StructOf(fields)
+}
+
+func (kp *keyProber) makeStruct() any {
+ return reflect.New(kp.typ)
+}
+
+func (kp *keyProber) Probe(data []byte) (*KeyProbe, error) {
+ kp.mu.RLock()
+ defer kp.mu.RUnlock()
+
+ // if the field list unchanged, so is the pool object, so effectively
+ // we should be using the cached version
+ v := kp.pool.Get()
+ if v == nil {
+ return nil, fmt.Errorf(`probe: failed to get object from pool`)
+ }
+ rv, ok := v.(reflect.Value)
+ if !ok {
+ return nil, fmt.Errorf(`probe: value returned from pool as of type %T, expected reflect.Value`, v)
+ }
+
+ if err := json.Unmarshal(data, rv.Interface()); err != nil {
+ return nil, fmt.Errorf(`probe: failed to unmarshal data: %w`, err)
+ }
+
+ return &KeyProbe{data: rv}, nil
+}
+
+// KeyProbe is the object that carries the hints when parsing a key.
+// The exact list of fields can vary depending on the types of key
+// that are registered.
+//
+// Use `Get()` to access the value of a field.
+//
+// The underlying data stored in a KeyProbe is recycled each
+// time a value is parsed, therefore you are not allowed to hold
+// onto this object after ParseKey() is done.
+type KeyProbe struct {
+ data reflect.Value
+}
+
+// Get returns the value of the field with the given `name“.
+// `dst` must be a pointer to a value that can hold the type of
+// the value of the field, which is determined by the
+// field type registered through `jwk.RegisterProbeField()`
+func (kp *KeyProbe) Get(name string, dst any) error {
+ f := kp.data.Elem().FieldByName(name)
+ if !f.IsValid() {
+ return fmt.Errorf(`field %s not found`, name)
+ }
+
+ if err := blackmagic.AssignIfCompatible(dst, f.Addr().Interface()); err != nil {
+ return fmt.Errorf(`failed to assign value of field %q to %T: %w`, name, dst, err)
+ }
+ return nil
+}
+
+// We don't really need the object, we need to know its type
+var keyProbe = &keyProber{
+ fields: make(map[string]reflect.StructField),
+}
+
+// RegisterProbeField adds a new field to be probed during the initial
+// phase of parsing. This is done by partially parsing the JSON payload,
+// and we do this by calling `json.Unmarshal` using a dynamic type that
+// can possibly be modified during runtime. This function is used to
+// add a new field to this dynamic type.
+//
+// Note that the `Name` field for the given `reflect.StructField` must start
+// with an upper case alphabet, such that it is treated as an exported field.
+// So for example, if you want to probe the "my_hint" field, you should specify
+// the field name as "MyHint" or similar.
+//
+// Also the field name must be unique. If you believe that your field name may
+// collide with other packages that may want to add their own probes,
+// it is the responsibility of the caller
+// to ensure that the field name is unique (possibly by prefixing the field
+// name with a unique string). It is important to note that the field name
+// need not be the same as the JSON field name. For example, your field name
+// could be "MyPkg_MyHint", while the actual JSON field name could be "my_hint".
+//
+// If the field name is not unique, an error is returned.
+func RegisterProbeField(p reflect.StructField) error {
+ // locking is done inside keyProbe
+ return keyProbe.AddField(p)
+}
+
+// KeyUnmarshaler is a thin wrapper around json.Unmarshal. It behaves almost
+// exactly like json.Unmarshal, but it allows us to add extra magic that
+// is specific to this library before calling the actual json.Unmarshal.
+type KeyUnmarshaler interface {
+ UnmarshalKey(data []byte, key any) error
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/rsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/rsa.go
new file mode 100644
index 0000000000..bcd7d05c02
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/rsa.go
@@ -0,0 +1,360 @@
+package jwk
+
+import (
+ "crypto"
+ "crypto/rsa"
+ "encoding/binary"
+ "fmt"
+ "math/big"
+ "reflect"
+
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+)
+
+func init() {
+ RegisterKeyExporter(jwa.RSA(), KeyExportFunc(rsaJWKToRaw))
+}
+
+func (k *rsaPrivateKey) Import(rawKey *rsa.PrivateKey) error {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+
+ d, err := bigIntToBytes(rawKey.D)
+ if err != nil {
+ return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err)
+ }
+ k.d = d
+
+ l := len(rawKey.Primes)
+
+ if l < 0 /* I know, I'm being paranoid */ || l > 2 {
+ return fmt.Errorf(`invalid number of primes in rsa.PrivateKey: need 0 to 2, but got %d`, len(rawKey.Primes))
+ }
+
+ if l > 0 {
+ p, err := bigIntToBytes(rawKey.Primes[0])
+ if err != nil {
+ return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err)
+ }
+ k.p = p
+ }
+
+ if l > 1 {
+ q, err := bigIntToBytes(rawKey.Primes[1])
+ if err != nil {
+ return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err)
+ }
+ k.q = q
+ }
+
+ // dp, dq, qi are optional values
+ if v, err := bigIntToBytes(rawKey.Precomputed.Dp); err == nil {
+ k.dp = v
+ }
+ if v, err := bigIntToBytes(rawKey.Precomputed.Dq); err == nil {
+ k.dq = v
+ }
+ if v, err := bigIntToBytes(rawKey.Precomputed.Qinv); err == nil {
+ k.qi = v
+ }
+
+ // public key part
+ n, e, err := importRsaPublicKeyByteValues(&rawKey.PublicKey)
+ if err != nil {
+ return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err)
+ }
+ k.n = n
+ k.e = e
+
+ return nil
+}
+
+func importRsaPublicKeyByteValues(rawKey *rsa.PublicKey) ([]byte, []byte, error) {
+ n, err := bigIntToBytes(rawKey.N)
+ if err != nil {
+ return nil, nil, fmt.Errorf(`invalid rsa.PublicKey: %w`, err)
+ }
+
+ data := make([]byte, 8)
+ binary.BigEndian.PutUint64(data, uint64(rawKey.E))
+ i := 0
+ for ; i < len(data); i++ {
+ if data[i] != 0x0 {
+ break
+ }
+ }
+ return n, data[i:], nil
+}
+
+func (k *rsaPublicKey) Import(rawKey *rsa.PublicKey) error {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+
+ n, e, err := importRsaPublicKeyByteValues(rawKey)
+ if err != nil {
+ return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err)
+ }
+ k.n = n
+ k.e = e
+
+ return nil
+}
+
+func buildRSAPublicKey(key *rsa.PublicKey, n, e []byte) {
+ bin := pool.BigInt().Get()
+ bie := pool.BigInt().Get()
+ defer pool.BigInt().Put(bie)
+
+ bin.SetBytes(n)
+ bie.SetBytes(e)
+
+ key.N = bin
+ key.E = int(bie.Int64())
+}
+
+var rsaConvertibleKeys = []reflect.Type{
+ reflect.TypeOf((*RSAPrivateKey)(nil)).Elem(),
+ reflect.TypeOf((*RSAPublicKey)(nil)).Elem(),
+}
+
+func rsaJWKToRaw(key Key, hint any) (any, error) {
+ extracted, err := extractEmbeddedKey(key, rsaConvertibleKeys)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to extract embedded key: %w`, err)
+ }
+ switch key := extracted.(type) {
+ case RSAPrivateKey:
+ switch hint.(type) {
+ case *rsa.PrivateKey, *any:
+ default:
+ return nil, fmt.Errorf(`invalid destination object type %T for private RSA JWK: %w`, hint, ContinueError())
+ }
+
+ locker, ok := key.(rlocker)
+ if !ok {
+ locker.rlock()
+ defer locker.runlock()
+ }
+
+ od, ok := key.D()
+ if !ok {
+ return nil, fmt.Errorf(`missing "d" value`)
+ }
+
+ oq, ok := key.Q()
+ if !ok {
+ return nil, fmt.Errorf(`missing "q" value`)
+ }
+
+ op, ok := key.P()
+ if !ok {
+ return nil, fmt.Errorf(`missing "p" value`)
+ }
+
+ var d, q, p big.Int // note: do not use from sync.Pool
+
+ d.SetBytes(od)
+ q.SetBytes(oq)
+ p.SetBytes(op)
+
+ // optional fields
+ var dp, dq, qi *big.Int
+
+ if odp, ok := key.DP(); ok {
+ dp = &big.Int{} // note: do not use from sync.Pool
+ dp.SetBytes(odp)
+ }
+
+ if odq, ok := key.DQ(); ok {
+ dq = &big.Int{} // note: do not use from sync.Pool
+ dq.SetBytes(odq)
+ }
+
+ if oqi, ok := key.QI(); ok {
+ qi = &big.Int{} // note: do not use from sync.Pool
+ qi.SetBytes(oqi)
+ }
+
+ n, ok := key.N()
+ if !ok {
+ return nil, fmt.Errorf(`missing "n" value`)
+ }
+
+ e, ok := key.E()
+ if !ok {
+ return nil, fmt.Errorf(`missing "e" value`)
+ }
+
+ var privkey rsa.PrivateKey
+ buildRSAPublicKey(&privkey.PublicKey, n, e)
+ privkey.D = &d
+ privkey.Primes = []*big.Int{&p, &q}
+
+ if dp != nil {
+ privkey.Precomputed.Dp = dp
+ }
+ if dq != nil {
+ privkey.Precomputed.Dq = dq
+ }
+ if qi != nil {
+ privkey.Precomputed.Qinv = qi
+ }
+ // This may look like a no-op, but it's required if we want to
+ // compare it against a key generated by rsa.GenerateKey
+ privkey.Precomputed.CRTValues = []rsa.CRTValue{}
+ return &privkey, nil
+ case RSAPublicKey:
+ switch hint.(type) {
+ case *rsa.PublicKey, *any:
+ default:
+ return nil, fmt.Errorf(`invalid destination object type %T for public RSA JWK: %w`, hint, ContinueError())
+ }
+
+ locker, ok := key.(rlocker)
+ if !ok {
+ locker.rlock()
+ defer locker.runlock()
+ }
+
+ n, ok := key.N()
+ if !ok {
+ return nil, fmt.Errorf(`missing "n" value`)
+ }
+
+ e, ok := key.E()
+ if !ok {
+ return nil, fmt.Errorf(`missing "e" value`)
+ }
+
+ var pubkey rsa.PublicKey
+ buildRSAPublicKey(&pubkey, n, e)
+
+ return &pubkey, nil
+
+ default:
+ return nil, ContinueError()
+ }
+}
+
+func makeRSAPublicKey(src Key) (Key, error) {
+ newKey := newRSAPublicKey()
+
+ // Iterate and copy everything except for the bits that should not be in the public key
+ for _, k := range src.Keys() {
+ switch k {
+ case RSADKey, RSADPKey, RSADQKey, RSAPKey, RSAQKey, RSAQIKey:
+ continue
+ default:
+ var v any
+ if err := src.Get(k, &v); err != nil {
+ return nil, fmt.Errorf(`rsa: makeRSAPublicKey: failed to get field %q: %w`, k, err)
+ }
+ if err := newKey.Set(k, v); err != nil {
+ return nil, fmt.Errorf(`rsa: makeRSAPublicKey: failed to set field %q: %w`, k, err)
+ }
+ }
+ }
+
+ return newKey, nil
+}
+
+func (k *rsaPrivateKey) PublicKey() (Key, error) {
+ return makeRSAPublicKey(k)
+}
+
+func (k *rsaPublicKey) PublicKey() (Key, error) {
+ return makeRSAPublicKey(k)
+}
+
+// Thumbprint returns the JWK thumbprint using the indicated
+// hashing algorithm, according to RFC 7638
+func (k rsaPrivateKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
+ k.mu.RLock()
+ defer k.mu.RUnlock()
+
+ var key rsa.PrivateKey
+ if err := Export(&k, &key); err != nil {
+ return nil, fmt.Errorf(`failed to export RSA private key: %w`, err)
+ }
+ return rsaThumbprint(hash, &key.PublicKey)
+}
+
+func (k rsaPublicKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
+ k.mu.RLock()
+ defer k.mu.RUnlock()
+
+ var key rsa.PublicKey
+ if err := Export(&k, &key); err != nil {
+ return nil, fmt.Errorf(`failed to export RSA public key: %w`, err)
+ }
+ return rsaThumbprint(hash, &key)
+}
+
+func rsaThumbprint(hash crypto.Hash, key *rsa.PublicKey) ([]byte, error) {
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+
+ buf.WriteString(`{"e":"`)
+ buf.WriteString(base64.EncodeUint64ToString(uint64(key.E)))
+ buf.WriteString(`","kty":"RSA","n":"`)
+ buf.WriteString(base64.EncodeToString(key.N.Bytes()))
+ buf.WriteString(`"}`)
+
+ h := hash.New()
+ if _, err := buf.WriteTo(h); err != nil {
+ return nil, fmt.Errorf(`failed to write rsaThumbprint: %w`, err)
+ }
+ return h.Sum(nil), nil
+}
+
+func validateRSAKey(key interface {
+ N() ([]byte, bool)
+ E() ([]byte, bool)
+}, checkPrivate bool) error {
+ n, ok := key.N()
+ if !ok {
+ return fmt.Errorf(`missing "n" value`)
+ }
+
+ e, ok := key.E()
+ if !ok {
+ return fmt.Errorf(`missing "e" value`)
+ }
+
+ if len(n) == 0 {
+ // Ideally we would like to check for the actual length, but unlike
+ // EC keys, we have nothing in the key itself that will tell us
+ // how many bits this key should have.
+ return fmt.Errorf(`missing "n" value`)
+ }
+ if len(e) == 0 {
+ return fmt.Errorf(`missing "e" value`)
+ }
+ if checkPrivate {
+ if priv, ok := key.(keyWithD); ok {
+ if d, ok := priv.D(); !ok || len(d) == 0 {
+ return fmt.Errorf(`missing "d" value`)
+ }
+ } else {
+ return fmt.Errorf(`missing "d" value`)
+ }
+ }
+
+ return nil
+}
+
+func (k *rsaPrivateKey) Validate() error {
+ if err := validateRSAKey(k, true); err != nil {
+ return NewKeyValidationError(fmt.Errorf(`jwk.RSAPrivateKey: %w`, err))
+ }
+ return nil
+}
+
+func (k *rsaPublicKey) Validate() error {
+ if err := validateRSAKey(k, false); err != nil {
+ return NewKeyValidationError(fmt.Errorf(`jwk.RSAPublicKey: %w`, err))
+ }
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/rsa_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/rsa_gen.go
new file mode 100644
index 0000000000..8e2a4f085b
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/rsa_gen.go
@@ -0,0 +1,1543 @@
+// Code generated by tools/cmd/genjwk/main.go. DO NOT EDIT.
+
+package jwk
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "sync"
+
+ "github.com/lestrrat-go/blackmagic"
+ "github.com/lestrrat-go/jwx/v3/cert"
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+)
+
+const (
+ RSADKey = "d"
+ RSADPKey = "dp"
+ RSADQKey = "dq"
+ RSAEKey = "e"
+ RSANKey = "n"
+ RSAPKey = "p"
+ RSAQIKey = "qi"
+ RSAQKey = "q"
+)
+
+type RSAPublicKey interface {
+ Key
+ E() ([]byte, bool)
+ N() ([]byte, bool)
+}
+
+type rsaPublicKey struct {
+ algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4
+ e []byte
+ keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4
+ keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3
+ keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2
+ n []byte
+ x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6
+ x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7
+ x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8
+ x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5
+ privateParams map[string]any
+ mu *sync.RWMutex
+ dc json.DecodeCtx
+}
+
+var _ RSAPublicKey = &rsaPublicKey{}
+var _ Key = &rsaPublicKey{}
+
+func newRSAPublicKey() *rsaPublicKey {
+ return &rsaPublicKey{
+ mu: &sync.RWMutex{},
+ privateParams: make(map[string]any),
+ }
+}
+
+func (h rsaPublicKey) KeyType() jwa.KeyType {
+ return jwa.RSA()
+}
+
+func (h rsaPublicKey) rlock() {
+ h.mu.RLock()
+}
+
+func (h rsaPublicKey) runlock() {
+ h.mu.RUnlock()
+}
+
+func (h rsaPublicKey) IsPrivate() bool {
+ return false
+}
+
+func (h *rsaPublicKey) Algorithm() (jwa.KeyAlgorithm, bool) {
+ if h.algorithm != nil {
+ return *(h.algorithm), true
+ }
+ return nil, false
+}
+
+func (h *rsaPublicKey) E() ([]byte, bool) {
+ if h.e != nil {
+ return h.e, true
+ }
+ return nil, false
+}
+
+func (h *rsaPublicKey) KeyID() (string, bool) {
+ if h.keyID != nil {
+ return *(h.keyID), true
+ }
+ return "", false
+}
+
+func (h *rsaPublicKey) KeyOps() (KeyOperationList, bool) {
+ if h.keyOps != nil {
+ return *(h.keyOps), true
+ }
+ return nil, false
+}
+
+func (h *rsaPublicKey) KeyUsage() (string, bool) {
+ if h.keyUsage != nil {
+ return *(h.keyUsage), true
+ }
+ return "", false
+}
+
+func (h *rsaPublicKey) N() ([]byte, bool) {
+ if h.n != nil {
+ return h.n, true
+ }
+ return nil, false
+}
+
+func (h *rsaPublicKey) X509CertChain() (*cert.Chain, bool) {
+ return h.x509CertChain, true
+}
+
+func (h *rsaPublicKey) X509CertThumbprint() (string, bool) {
+ if h.x509CertThumbprint != nil {
+ return *(h.x509CertThumbprint), true
+ }
+ return "", false
+}
+
+func (h *rsaPublicKey) X509CertThumbprintS256() (string, bool) {
+ if h.x509CertThumbprintS256 != nil {
+ return *(h.x509CertThumbprintS256), true
+ }
+ return "", false
+}
+
+func (h *rsaPublicKey) X509URL() (string, bool) {
+ if h.x509URL != nil {
+ return *(h.x509URL), true
+ }
+ return "", false
+}
+
+func (h *rsaPublicKey) Has(name string) bool {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ switch name {
+ case KeyTypeKey:
+ return true
+ case AlgorithmKey:
+ return h.algorithm != nil
+ case RSAEKey:
+ return h.e != nil
+ case KeyIDKey:
+ return h.keyID != nil
+ case KeyOpsKey:
+ return h.keyOps != nil
+ case KeyUsageKey:
+ return h.keyUsage != nil
+ case RSANKey:
+ return h.n != nil
+ case X509CertChainKey:
+ return h.x509CertChain != nil
+ case X509CertThumbprintKey:
+ return h.x509CertThumbprint != nil
+ case X509CertThumbprintS256Key:
+ return h.x509CertThumbprintS256 != nil
+ case X509URLKey:
+ return h.x509URL != nil
+ default:
+ _, ok := h.privateParams[name]
+ return ok
+ }
+}
+
+func (h *rsaPublicKey) Get(name string, dst any) error {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ switch name {
+ case KeyTypeKey:
+ if err := blackmagic.AssignIfCompatible(dst, h.KeyType()); err != nil {
+ return fmt.Errorf(`rsaPublicKey.Get: failed to assign value for field %q to destination object: %w`, name, err)
+ }
+ case AlgorithmKey:
+ if h.algorithm == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.algorithm)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case RSAEKey:
+ if h.e == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.e); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyIDKey:
+ if h.keyID == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyID)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyOpsKey:
+ if h.keyOps == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyOps)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyUsageKey:
+ if h.keyUsage == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyUsage)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case RSANKey:
+ if h.n == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.n); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertChainKey:
+ if h.x509CertChain == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.x509CertChain); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertThumbprintKey:
+ if h.x509CertThumbprint == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprint)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertThumbprintS256Key:
+ if h.x509CertThumbprintS256 == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprintS256)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509URLKey:
+ if h.x509URL == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509URL)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ default:
+ v, ok := h.privateParams[name]
+ if !ok {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, v); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ }
+ return nil
+}
+
+func (h *rsaPublicKey) Set(name string, value any) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ return h.setNoLock(name, value)
+}
+
+func (h *rsaPublicKey) setNoLock(name string, value any) error {
+ switch name {
+ case "kty":
+ return nil
+ case AlgorithmKey:
+ switch v := value.(type) {
+ case string, jwa.SignatureAlgorithm, jwa.KeyEncryptionAlgorithm, jwa.ContentEncryptionAlgorithm:
+ tmp, err := jwa.KeyAlgorithmFrom(v)
+ if err != nil {
+ return fmt.Errorf(`invalid algorithm for %q key: %w`, AlgorithmKey, err)
+ }
+ h.algorithm = &tmp
+ default:
+ return fmt.Errorf(`invalid type for %q key: %T`, AlgorithmKey, value)
+ }
+ return nil
+ case RSAEKey:
+ if v, ok := value.([]byte); ok {
+ h.e = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, RSAEKey, value)
+ case KeyIDKey:
+ if v, ok := value.(string); ok {
+ h.keyID = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value)
+ case KeyOpsKey:
+ var acceptor KeyOperationList
+ if err := acceptor.Accept(value); err != nil {
+ return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err)
+ }
+ h.keyOps = &acceptor
+ return nil
+ case KeyUsageKey:
+ switch v := value.(type) {
+ case KeyUsageType:
+ switch v {
+ case ForSignature, ForEncryption:
+ tmp := v.String()
+ h.keyUsage = &tmp
+ default:
+ return fmt.Errorf(`invalid key usage type %s`, v)
+ }
+ case string:
+ h.keyUsage = &v
+ default:
+ return fmt.Errorf(`invalid key usage type %s`, v)
+ }
+ case RSANKey:
+ if v, ok := value.([]byte); ok {
+ h.n = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, RSANKey, value)
+ case X509CertChainKey:
+ if v, ok := value.(*cert.Chain); ok {
+ h.x509CertChain = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value)
+ case X509CertThumbprintKey:
+ if v, ok := value.(string); ok {
+ h.x509CertThumbprint = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value)
+ case X509CertThumbprintS256Key:
+ if v, ok := value.(string); ok {
+ h.x509CertThumbprintS256 = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value)
+ case X509URLKey:
+ if v, ok := value.(string); ok {
+ h.x509URL = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value)
+ default:
+ if h.privateParams == nil {
+ h.privateParams = map[string]any{}
+ }
+ h.privateParams[name] = value
+ }
+ return nil
+}
+
+func (k *rsaPublicKey) Remove(key string) error {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+ switch key {
+ case AlgorithmKey:
+ k.algorithm = nil
+ case RSAEKey:
+ k.e = nil
+ case KeyIDKey:
+ k.keyID = nil
+ case KeyOpsKey:
+ k.keyOps = nil
+ case KeyUsageKey:
+ k.keyUsage = nil
+ case RSANKey:
+ k.n = nil
+ case X509CertChainKey:
+ k.x509CertChain = nil
+ case X509CertThumbprintKey:
+ k.x509CertThumbprint = nil
+ case X509CertThumbprintS256Key:
+ k.x509CertThumbprintS256 = nil
+ case X509URLKey:
+ k.x509URL = nil
+ default:
+ delete(k.privateParams, key)
+ }
+ return nil
+}
+
+func (k *rsaPublicKey) Clone() (Key, error) {
+ key, err := cloneKey(k)
+ if err != nil {
+ return nil, fmt.Errorf(`rsaPublicKey.Clone: %w`, err)
+ }
+ return key, nil
+}
+
+func (k *rsaPublicKey) DecodeCtx() json.DecodeCtx {
+ k.mu.RLock()
+ defer k.mu.RUnlock()
+ return k.dc
+}
+
+func (k *rsaPublicKey) SetDecodeCtx(dc json.DecodeCtx) {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+ k.dc = dc
+}
+
+func (h *rsaPublicKey) UnmarshalJSON(buf []byte) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ h.algorithm = nil
+ h.e = nil
+ h.keyID = nil
+ h.keyOps = nil
+ h.keyUsage = nil
+ h.n = nil
+ h.x509CertChain = nil
+ h.x509CertThumbprint = nil
+ h.x509CertThumbprintS256 = nil
+ h.x509URL = nil
+ dec := json.NewDecoder(bytes.NewReader(buf))
+LOOP:
+ for {
+ tok, err := dec.Token()
+ if err != nil {
+ return fmt.Errorf(`error reading token: %w`, err)
+ }
+ switch tok := tok.(type) {
+ case json.Delim:
+ // Assuming we're doing everything correctly, we should ONLY
+ // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here.
+ if tok == tokens.CloseCurlyBracket { // End of object
+ break LOOP
+ } else if tok != tokens.OpenCurlyBracket {
+ return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok)
+ }
+ case string: // Objects can only have string keys
+ switch tok {
+ case KeyTypeKey:
+ val, err := json.ReadNextStringToken(dec)
+ if err != nil {
+ return fmt.Errorf(`error reading token: %w`, err)
+ }
+ if val != jwa.RSA().String() {
+ return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val)
+ }
+ case AlgorithmKey:
+ var s string
+ if err := dec.Decode(&s); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+ }
+ alg, err := jwa.KeyAlgorithmFrom(s)
+ if err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+ }
+ h.algorithm = &alg
+ case RSAEKey:
+ if err := json.AssignNextBytesToken(&h.e, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, RSAEKey, err)
+ }
+ case KeyIDKey:
+ if err := json.AssignNextStringToken(&h.keyID, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err)
+ }
+ case KeyOpsKey:
+ var decoded KeyOperationList
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err)
+ }
+ h.keyOps = &decoded
+ case KeyUsageKey:
+ if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err)
+ }
+ case RSANKey:
+ if err := json.AssignNextBytesToken(&h.n, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, RSANKey, err)
+ }
+ case X509CertChainKey:
+ var decoded cert.Chain
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err)
+ }
+ h.x509CertChain = &decoded
+ case X509CertThumbprintKey:
+ if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err)
+ }
+ case X509CertThumbprintS256Key:
+ if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err)
+ }
+ case X509URLKey:
+ if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err)
+ }
+ default:
+ if dc := h.dc; dc != nil {
+ if localReg := dc.Registry(); localReg != nil {
+ decoded, err := localReg.Decode(dec, tok)
+ if err == nil {
+ h.setNoLock(tok, decoded)
+ continue
+ }
+ }
+ }
+ decoded, err := registry.Decode(dec, tok)
+ if err == nil {
+ h.setNoLock(tok, decoded)
+ continue
+ }
+ return fmt.Errorf(`could not decode field %s: %w`, tok, err)
+ }
+ default:
+ return fmt.Errorf(`invalid token %T`, tok)
+ }
+ }
+ if h.e == nil {
+ return fmt.Errorf(`required field e is missing`)
+ }
+ if h.n == nil {
+ return fmt.Errorf(`required field n is missing`)
+ }
+ return nil
+}
+
+func (h rsaPublicKey) MarshalJSON() ([]byte, error) {
+ data := make(map[string]any)
+ fields := make([]string, 0, 10)
+ data[KeyTypeKey] = jwa.RSA()
+ fields = append(fields, KeyTypeKey)
+ if h.algorithm != nil {
+ data[AlgorithmKey] = *(h.algorithm)
+ fields = append(fields, AlgorithmKey)
+ }
+ if h.e != nil {
+ data[RSAEKey] = h.e
+ fields = append(fields, RSAEKey)
+ }
+ if h.keyID != nil {
+ data[KeyIDKey] = *(h.keyID)
+ fields = append(fields, KeyIDKey)
+ }
+ if h.keyOps != nil {
+ data[KeyOpsKey] = *(h.keyOps)
+ fields = append(fields, KeyOpsKey)
+ }
+ if h.keyUsage != nil {
+ data[KeyUsageKey] = *(h.keyUsage)
+ fields = append(fields, KeyUsageKey)
+ }
+ if h.n != nil {
+ data[RSANKey] = h.n
+ fields = append(fields, RSANKey)
+ }
+ if h.x509CertChain != nil {
+ data[X509CertChainKey] = h.x509CertChain
+ fields = append(fields, X509CertChainKey)
+ }
+ if h.x509CertThumbprint != nil {
+ data[X509CertThumbprintKey] = *(h.x509CertThumbprint)
+ fields = append(fields, X509CertThumbprintKey)
+ }
+ if h.x509CertThumbprintS256 != nil {
+ data[X509CertThumbprintS256Key] = *(h.x509CertThumbprintS256)
+ fields = append(fields, X509CertThumbprintS256Key)
+ }
+ if h.x509URL != nil {
+ data[X509URLKey] = *(h.x509URL)
+ fields = append(fields, X509URLKey)
+ }
+ for k, v := range h.privateParams {
+ data[k] = v
+ fields = append(fields, k)
+ }
+
+ sort.Strings(fields)
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+ buf.WriteByte(tokens.OpenCurlyBracket)
+ enc := json.NewEncoder(buf)
+ for i, f := range fields {
+ if i > 0 {
+ buf.WriteRune(tokens.Comma)
+ }
+ buf.WriteRune(tokens.DoubleQuote)
+ buf.WriteString(f)
+ buf.WriteString(`":`)
+ v := data[f]
+ switch v := v.(type) {
+ case []byte:
+ buf.WriteRune(tokens.DoubleQuote)
+ buf.WriteString(base64.EncodeToString(v))
+ buf.WriteRune(tokens.DoubleQuote)
+ default:
+ if err := enc.Encode(v); err != nil {
+ return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err)
+ }
+ buf.Truncate(buf.Len() - 1)
+ }
+ }
+ buf.WriteByte(tokens.CloseCurlyBracket)
+ ret := make([]byte, buf.Len())
+ copy(ret, buf.Bytes())
+ return ret, nil
+}
+
+func (h *rsaPublicKey) Keys() []string {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ keys := make([]string, 0, 10+len(h.privateParams))
+ keys = append(keys, KeyTypeKey)
+ if h.algorithm != nil {
+ keys = append(keys, AlgorithmKey)
+ }
+ if h.e != nil {
+ keys = append(keys, RSAEKey)
+ }
+ if h.keyID != nil {
+ keys = append(keys, KeyIDKey)
+ }
+ if h.keyOps != nil {
+ keys = append(keys, KeyOpsKey)
+ }
+ if h.keyUsage != nil {
+ keys = append(keys, KeyUsageKey)
+ }
+ if h.n != nil {
+ keys = append(keys, RSANKey)
+ }
+ if h.x509CertChain != nil {
+ keys = append(keys, X509CertChainKey)
+ }
+ if h.x509CertThumbprint != nil {
+ keys = append(keys, X509CertThumbprintKey)
+ }
+ if h.x509CertThumbprintS256 != nil {
+ keys = append(keys, X509CertThumbprintS256Key)
+ }
+ if h.x509URL != nil {
+ keys = append(keys, X509URLKey)
+ }
+ for k := range h.privateParams {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+type RSAPrivateKey interface {
+ Key
+ D() ([]byte, bool)
+ DP() ([]byte, bool)
+ DQ() ([]byte, bool)
+ E() ([]byte, bool)
+ N() ([]byte, bool)
+ P() ([]byte, bool)
+ Q() ([]byte, bool)
+ QI() ([]byte, bool)
+}
+
+type rsaPrivateKey struct {
+ algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4
+ d []byte
+ dp []byte
+ dq []byte
+ e []byte
+ keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4
+ keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3
+ keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2
+ n []byte
+ p []byte
+ q []byte
+ qi []byte
+ x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6
+ x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7
+ x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8
+ x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5
+ privateParams map[string]any
+ mu *sync.RWMutex
+ dc json.DecodeCtx
+}
+
+var _ RSAPrivateKey = &rsaPrivateKey{}
+var _ Key = &rsaPrivateKey{}
+
+func newRSAPrivateKey() *rsaPrivateKey {
+ return &rsaPrivateKey{
+ mu: &sync.RWMutex{},
+ privateParams: make(map[string]any),
+ }
+}
+
+func (h rsaPrivateKey) KeyType() jwa.KeyType {
+ return jwa.RSA()
+}
+
+func (h rsaPrivateKey) rlock() {
+ h.mu.RLock()
+}
+
+func (h rsaPrivateKey) runlock() {
+ h.mu.RUnlock()
+}
+
+func (h rsaPrivateKey) IsPrivate() bool {
+ return true
+}
+
+func (h *rsaPrivateKey) Algorithm() (jwa.KeyAlgorithm, bool) {
+ if h.algorithm != nil {
+ return *(h.algorithm), true
+ }
+ return nil, false
+}
+
+func (h *rsaPrivateKey) D() ([]byte, bool) {
+ if h.d != nil {
+ return h.d, true
+ }
+ return nil, false
+}
+
+func (h *rsaPrivateKey) DP() ([]byte, bool) {
+ if h.dp != nil {
+ return h.dp, true
+ }
+ return nil, false
+}
+
+func (h *rsaPrivateKey) DQ() ([]byte, bool) {
+ if h.dq != nil {
+ return h.dq, true
+ }
+ return nil, false
+}
+
+func (h *rsaPrivateKey) E() ([]byte, bool) {
+ if h.e != nil {
+ return h.e, true
+ }
+ return nil, false
+}
+
+func (h *rsaPrivateKey) KeyID() (string, bool) {
+ if h.keyID != nil {
+ return *(h.keyID), true
+ }
+ return "", false
+}
+
+func (h *rsaPrivateKey) KeyOps() (KeyOperationList, bool) {
+ if h.keyOps != nil {
+ return *(h.keyOps), true
+ }
+ return nil, false
+}
+
+func (h *rsaPrivateKey) KeyUsage() (string, bool) {
+ if h.keyUsage != nil {
+ return *(h.keyUsage), true
+ }
+ return "", false
+}
+
+func (h *rsaPrivateKey) N() ([]byte, bool) {
+ if h.n != nil {
+ return h.n, true
+ }
+ return nil, false
+}
+
+func (h *rsaPrivateKey) P() ([]byte, bool) {
+ if h.p != nil {
+ return h.p, true
+ }
+ return nil, false
+}
+
+func (h *rsaPrivateKey) Q() ([]byte, bool) {
+ if h.q != nil {
+ return h.q, true
+ }
+ return nil, false
+}
+
+func (h *rsaPrivateKey) QI() ([]byte, bool) {
+ if h.qi != nil {
+ return h.qi, true
+ }
+ return nil, false
+}
+
+func (h *rsaPrivateKey) X509CertChain() (*cert.Chain, bool) {
+ return h.x509CertChain, true
+}
+
+func (h *rsaPrivateKey) X509CertThumbprint() (string, bool) {
+ if h.x509CertThumbprint != nil {
+ return *(h.x509CertThumbprint), true
+ }
+ return "", false
+}
+
+func (h *rsaPrivateKey) X509CertThumbprintS256() (string, bool) {
+ if h.x509CertThumbprintS256 != nil {
+ return *(h.x509CertThumbprintS256), true
+ }
+ return "", false
+}
+
+func (h *rsaPrivateKey) X509URL() (string, bool) {
+ if h.x509URL != nil {
+ return *(h.x509URL), true
+ }
+ return "", false
+}
+
+func (h *rsaPrivateKey) Has(name string) bool {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ switch name {
+ case KeyTypeKey:
+ return true
+ case AlgorithmKey:
+ return h.algorithm != nil
+ case RSADKey:
+ return h.d != nil
+ case RSADPKey:
+ return h.dp != nil
+ case RSADQKey:
+ return h.dq != nil
+ case RSAEKey:
+ return h.e != nil
+ case KeyIDKey:
+ return h.keyID != nil
+ case KeyOpsKey:
+ return h.keyOps != nil
+ case KeyUsageKey:
+ return h.keyUsage != nil
+ case RSANKey:
+ return h.n != nil
+ case RSAPKey:
+ return h.p != nil
+ case RSAQKey:
+ return h.q != nil
+ case RSAQIKey:
+ return h.qi != nil
+ case X509CertChainKey:
+ return h.x509CertChain != nil
+ case X509CertThumbprintKey:
+ return h.x509CertThumbprint != nil
+ case X509CertThumbprintS256Key:
+ return h.x509CertThumbprintS256 != nil
+ case X509URLKey:
+ return h.x509URL != nil
+ default:
+ _, ok := h.privateParams[name]
+ return ok
+ }
+}
+
+func (h *rsaPrivateKey) Get(name string, dst any) error {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ switch name {
+ case KeyTypeKey:
+ if err := blackmagic.AssignIfCompatible(dst, h.KeyType()); err != nil {
+ return fmt.Errorf(`rsaPrivateKey.Get: failed to assign value for field %q to destination object: %w`, name, err)
+ }
+ case AlgorithmKey:
+ if h.algorithm == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.algorithm)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case RSADKey:
+ if h.d == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.d); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case RSADPKey:
+ if h.dp == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.dp); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case RSADQKey:
+ if h.dq == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.dq); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case RSAEKey:
+ if h.e == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.e); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyIDKey:
+ if h.keyID == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyID)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyOpsKey:
+ if h.keyOps == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyOps)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyUsageKey:
+ if h.keyUsage == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyUsage)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case RSANKey:
+ if h.n == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.n); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case RSAPKey:
+ if h.p == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.p); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case RSAQKey:
+ if h.q == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.q); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case RSAQIKey:
+ if h.qi == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.qi); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertChainKey:
+ if h.x509CertChain == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.x509CertChain); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertThumbprintKey:
+ if h.x509CertThumbprint == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprint)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertThumbprintS256Key:
+ if h.x509CertThumbprintS256 == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprintS256)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509URLKey:
+ if h.x509URL == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509URL)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ default:
+ v, ok := h.privateParams[name]
+ if !ok {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, v); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ }
+ return nil
+}
+
+func (h *rsaPrivateKey) Set(name string, value any) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ return h.setNoLock(name, value)
+}
+
+func (h *rsaPrivateKey) setNoLock(name string, value any) error {
+ switch name {
+ case "kty":
+ return nil
+ case AlgorithmKey:
+ switch v := value.(type) {
+ case string, jwa.SignatureAlgorithm, jwa.KeyEncryptionAlgorithm, jwa.ContentEncryptionAlgorithm:
+ tmp, err := jwa.KeyAlgorithmFrom(v)
+ if err != nil {
+ return fmt.Errorf(`invalid algorithm for %q key: %w`, AlgorithmKey, err)
+ }
+ h.algorithm = &tmp
+ default:
+ return fmt.Errorf(`invalid type for %q key: %T`, AlgorithmKey, value)
+ }
+ return nil
+ case RSADKey:
+ if v, ok := value.([]byte); ok {
+ h.d = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, RSADKey, value)
+ case RSADPKey:
+ if v, ok := value.([]byte); ok {
+ h.dp = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, RSADPKey, value)
+ case RSADQKey:
+ if v, ok := value.([]byte); ok {
+ h.dq = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, RSADQKey, value)
+ case RSAEKey:
+ if v, ok := value.([]byte); ok {
+ h.e = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, RSAEKey, value)
+ case KeyIDKey:
+ if v, ok := value.(string); ok {
+ h.keyID = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value)
+ case KeyOpsKey:
+ var acceptor KeyOperationList
+ if err := acceptor.Accept(value); err != nil {
+ return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err)
+ }
+ h.keyOps = &acceptor
+ return nil
+ case KeyUsageKey:
+ switch v := value.(type) {
+ case KeyUsageType:
+ switch v {
+ case ForSignature, ForEncryption:
+ tmp := v.String()
+ h.keyUsage = &tmp
+ default:
+ return fmt.Errorf(`invalid key usage type %s`, v)
+ }
+ case string:
+ h.keyUsage = &v
+ default:
+ return fmt.Errorf(`invalid key usage type %s`, v)
+ }
+ case RSANKey:
+ if v, ok := value.([]byte); ok {
+ h.n = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, RSANKey, value)
+ case RSAPKey:
+ if v, ok := value.([]byte); ok {
+ h.p = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, RSAPKey, value)
+ case RSAQKey:
+ if v, ok := value.([]byte); ok {
+ h.q = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, RSAQKey, value)
+ case RSAQIKey:
+ if v, ok := value.([]byte); ok {
+ h.qi = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, RSAQIKey, value)
+ case X509CertChainKey:
+ if v, ok := value.(*cert.Chain); ok {
+ h.x509CertChain = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value)
+ case X509CertThumbprintKey:
+ if v, ok := value.(string); ok {
+ h.x509CertThumbprint = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value)
+ case X509CertThumbprintS256Key:
+ if v, ok := value.(string); ok {
+ h.x509CertThumbprintS256 = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value)
+ case X509URLKey:
+ if v, ok := value.(string); ok {
+ h.x509URL = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value)
+ default:
+ if h.privateParams == nil {
+ h.privateParams = map[string]any{}
+ }
+ h.privateParams[name] = value
+ }
+ return nil
+}
+
+func (k *rsaPrivateKey) Remove(key string) error {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+ switch key {
+ case AlgorithmKey:
+ k.algorithm = nil
+ case RSADKey:
+ k.d = nil
+ case RSADPKey:
+ k.dp = nil
+ case RSADQKey:
+ k.dq = nil
+ case RSAEKey:
+ k.e = nil
+ case KeyIDKey:
+ k.keyID = nil
+ case KeyOpsKey:
+ k.keyOps = nil
+ case KeyUsageKey:
+ k.keyUsage = nil
+ case RSANKey:
+ k.n = nil
+ case RSAPKey:
+ k.p = nil
+ case RSAQKey:
+ k.q = nil
+ case RSAQIKey:
+ k.qi = nil
+ case X509CertChainKey:
+ k.x509CertChain = nil
+ case X509CertThumbprintKey:
+ k.x509CertThumbprint = nil
+ case X509CertThumbprintS256Key:
+ k.x509CertThumbprintS256 = nil
+ case X509URLKey:
+ k.x509URL = nil
+ default:
+ delete(k.privateParams, key)
+ }
+ return nil
+}
+
+func (k *rsaPrivateKey) Clone() (Key, error) {
+ key, err := cloneKey(k)
+ if err != nil {
+ return nil, fmt.Errorf(`rsaPrivateKey.Clone: %w`, err)
+ }
+ return key, nil
+}
+
+func (k *rsaPrivateKey) DecodeCtx() json.DecodeCtx {
+ k.mu.RLock()
+ defer k.mu.RUnlock()
+ return k.dc
+}
+
+func (k *rsaPrivateKey) SetDecodeCtx(dc json.DecodeCtx) {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+ k.dc = dc
+}
+
+func (h *rsaPrivateKey) UnmarshalJSON(buf []byte) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ h.algorithm = nil
+ h.d = nil
+ h.dp = nil
+ h.dq = nil
+ h.e = nil
+ h.keyID = nil
+ h.keyOps = nil
+ h.keyUsage = nil
+ h.n = nil
+ h.p = nil
+ h.q = nil
+ h.qi = nil
+ h.x509CertChain = nil
+ h.x509CertThumbprint = nil
+ h.x509CertThumbprintS256 = nil
+ h.x509URL = nil
+ dec := json.NewDecoder(bytes.NewReader(buf))
+LOOP:
+ for {
+ tok, err := dec.Token()
+ if err != nil {
+ return fmt.Errorf(`error reading token: %w`, err)
+ }
+ switch tok := tok.(type) {
+ case json.Delim:
+ // Assuming we're doing everything correctly, we should ONLY
+ // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here.
+ if tok == tokens.CloseCurlyBracket { // End of object
+ break LOOP
+ } else if tok != tokens.OpenCurlyBracket {
+ return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok)
+ }
+ case string: // Objects can only have string keys
+ switch tok {
+ case KeyTypeKey:
+ val, err := json.ReadNextStringToken(dec)
+ if err != nil {
+ return fmt.Errorf(`error reading token: %w`, err)
+ }
+ if val != jwa.RSA().String() {
+ return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val)
+ }
+ case AlgorithmKey:
+ var s string
+ if err := dec.Decode(&s); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+ }
+ alg, err := jwa.KeyAlgorithmFrom(s)
+ if err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+ }
+ h.algorithm = &alg
+ case RSADKey:
+ if err := json.AssignNextBytesToken(&h.d, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, RSADKey, err)
+ }
+ case RSADPKey:
+ if err := json.AssignNextBytesToken(&h.dp, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, RSADPKey, err)
+ }
+ case RSADQKey:
+ if err := json.AssignNextBytesToken(&h.dq, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, RSADQKey, err)
+ }
+ case RSAEKey:
+ if err := json.AssignNextBytesToken(&h.e, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, RSAEKey, err)
+ }
+ case KeyIDKey:
+ if err := json.AssignNextStringToken(&h.keyID, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err)
+ }
+ case KeyOpsKey:
+ var decoded KeyOperationList
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err)
+ }
+ h.keyOps = &decoded
+ case KeyUsageKey:
+ if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err)
+ }
+ case RSANKey:
+ if err := json.AssignNextBytesToken(&h.n, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, RSANKey, err)
+ }
+ case RSAPKey:
+ if err := json.AssignNextBytesToken(&h.p, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, RSAPKey, err)
+ }
+ case RSAQKey:
+ if err := json.AssignNextBytesToken(&h.q, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, RSAQKey, err)
+ }
+ case RSAQIKey:
+ if err := json.AssignNextBytesToken(&h.qi, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, RSAQIKey, err)
+ }
+ case X509CertChainKey:
+ var decoded cert.Chain
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err)
+ }
+ h.x509CertChain = &decoded
+ case X509CertThumbprintKey:
+ if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err)
+ }
+ case X509CertThumbprintS256Key:
+ if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err)
+ }
+ case X509URLKey:
+ if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err)
+ }
+ default:
+ if dc := h.dc; dc != nil {
+ if localReg := dc.Registry(); localReg != nil {
+ decoded, err := localReg.Decode(dec, tok)
+ if err == nil {
+ h.setNoLock(tok, decoded)
+ continue
+ }
+ }
+ }
+ decoded, err := registry.Decode(dec, tok)
+ if err == nil {
+ h.setNoLock(tok, decoded)
+ continue
+ }
+ return fmt.Errorf(`could not decode field %s: %w`, tok, err)
+ }
+ default:
+ return fmt.Errorf(`invalid token %T`, tok)
+ }
+ }
+ if h.d == nil {
+ return fmt.Errorf(`required field d is missing`)
+ }
+ if h.e == nil {
+ return fmt.Errorf(`required field e is missing`)
+ }
+ if h.n == nil {
+ return fmt.Errorf(`required field n is missing`)
+ }
+ return nil
+}
+
+func (h rsaPrivateKey) MarshalJSON() ([]byte, error) {
+ data := make(map[string]any)
+ fields := make([]string, 0, 16)
+ data[KeyTypeKey] = jwa.RSA()
+ fields = append(fields, KeyTypeKey)
+ if h.algorithm != nil {
+ data[AlgorithmKey] = *(h.algorithm)
+ fields = append(fields, AlgorithmKey)
+ }
+ if h.d != nil {
+ data[RSADKey] = h.d
+ fields = append(fields, RSADKey)
+ }
+ if h.dp != nil {
+ data[RSADPKey] = h.dp
+ fields = append(fields, RSADPKey)
+ }
+ if h.dq != nil {
+ data[RSADQKey] = h.dq
+ fields = append(fields, RSADQKey)
+ }
+ if h.e != nil {
+ data[RSAEKey] = h.e
+ fields = append(fields, RSAEKey)
+ }
+ if h.keyID != nil {
+ data[KeyIDKey] = *(h.keyID)
+ fields = append(fields, KeyIDKey)
+ }
+ if h.keyOps != nil {
+ data[KeyOpsKey] = *(h.keyOps)
+ fields = append(fields, KeyOpsKey)
+ }
+ if h.keyUsage != nil {
+ data[KeyUsageKey] = *(h.keyUsage)
+ fields = append(fields, KeyUsageKey)
+ }
+ if h.n != nil {
+ data[RSANKey] = h.n
+ fields = append(fields, RSANKey)
+ }
+ if h.p != nil {
+ data[RSAPKey] = h.p
+ fields = append(fields, RSAPKey)
+ }
+ if h.q != nil {
+ data[RSAQKey] = h.q
+ fields = append(fields, RSAQKey)
+ }
+ if h.qi != nil {
+ data[RSAQIKey] = h.qi
+ fields = append(fields, RSAQIKey)
+ }
+ if h.x509CertChain != nil {
+ data[X509CertChainKey] = h.x509CertChain
+ fields = append(fields, X509CertChainKey)
+ }
+ if h.x509CertThumbprint != nil {
+ data[X509CertThumbprintKey] = *(h.x509CertThumbprint)
+ fields = append(fields, X509CertThumbprintKey)
+ }
+ if h.x509CertThumbprintS256 != nil {
+ data[X509CertThumbprintS256Key] = *(h.x509CertThumbprintS256)
+ fields = append(fields, X509CertThumbprintS256Key)
+ }
+ if h.x509URL != nil {
+ data[X509URLKey] = *(h.x509URL)
+ fields = append(fields, X509URLKey)
+ }
+ for k, v := range h.privateParams {
+ data[k] = v
+ fields = append(fields, k)
+ }
+
+ sort.Strings(fields)
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+ buf.WriteByte(tokens.OpenCurlyBracket)
+ enc := json.NewEncoder(buf)
+ for i, f := range fields {
+ if i > 0 {
+ buf.WriteRune(tokens.Comma)
+ }
+ buf.WriteRune(tokens.DoubleQuote)
+ buf.WriteString(f)
+ buf.WriteString(`":`)
+ v := data[f]
+ switch v := v.(type) {
+ case []byte:
+ buf.WriteRune(tokens.DoubleQuote)
+ buf.WriteString(base64.EncodeToString(v))
+ buf.WriteRune(tokens.DoubleQuote)
+ default:
+ if err := enc.Encode(v); err != nil {
+ return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err)
+ }
+ buf.Truncate(buf.Len() - 1)
+ }
+ }
+ buf.WriteByte(tokens.CloseCurlyBracket)
+ ret := make([]byte, buf.Len())
+ copy(ret, buf.Bytes())
+ return ret, nil
+}
+
+func (h *rsaPrivateKey) Keys() []string {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ keys := make([]string, 0, 16+len(h.privateParams))
+ keys = append(keys, KeyTypeKey)
+ if h.algorithm != nil {
+ keys = append(keys, AlgorithmKey)
+ }
+ if h.d != nil {
+ keys = append(keys, RSADKey)
+ }
+ if h.dp != nil {
+ keys = append(keys, RSADPKey)
+ }
+ if h.dq != nil {
+ keys = append(keys, RSADQKey)
+ }
+ if h.e != nil {
+ keys = append(keys, RSAEKey)
+ }
+ if h.keyID != nil {
+ keys = append(keys, KeyIDKey)
+ }
+ if h.keyOps != nil {
+ keys = append(keys, KeyOpsKey)
+ }
+ if h.keyUsage != nil {
+ keys = append(keys, KeyUsageKey)
+ }
+ if h.n != nil {
+ keys = append(keys, RSANKey)
+ }
+ if h.p != nil {
+ keys = append(keys, RSAPKey)
+ }
+ if h.q != nil {
+ keys = append(keys, RSAQKey)
+ }
+ if h.qi != nil {
+ keys = append(keys, RSAQIKey)
+ }
+ if h.x509CertChain != nil {
+ keys = append(keys, X509CertChainKey)
+ }
+ if h.x509CertThumbprint != nil {
+ keys = append(keys, X509CertThumbprintKey)
+ }
+ if h.x509CertThumbprintS256 != nil {
+ keys = append(keys, X509CertThumbprintS256Key)
+ }
+ if h.x509URL != nil {
+ keys = append(keys, X509URLKey)
+ }
+ for k := range h.privateParams {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+var rsaStandardFields KeyFilter
+
+func init() {
+ rsaStandardFields = NewFieldNameFilter(KeyTypeKey, KeyUsageKey, KeyOpsKey, AlgorithmKey, KeyIDKey, X509URLKey, X509CertChainKey, X509CertThumbprintKey, X509CertThumbprintS256Key, RSAEKey, RSANKey, RSADKey, RSADPKey, RSADQKey, RSAPKey, RSAQKey, RSAQIKey)
+}
+
+// RSAStandardFieldsFilter returns a KeyFilter that filters out standard RSA fields.
+func RSAStandardFieldsFilter() KeyFilter {
+ return rsaStandardFields
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/set.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/set.go
new file mode 100644
index 0000000000..89d8646874
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/set.go
@@ -0,0 +1,311 @@
+package jwk
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "sort"
+
+ "github.com/lestrrat-go/blackmagic"
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+)
+
+const keysKey = `keys` // appease linter
+
+// NewSet creates and empty `jwk.Set` object
+func NewSet() Set {
+ return &set{
+ privateParams: make(map[string]any),
+ }
+}
+
+func (s *set) Set(n string, v any) error {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ if n == keysKey {
+ vl, ok := v.([]Key)
+ if !ok {
+ return fmt.Errorf(`value for field "keys" must be []jwk.Key`)
+ }
+ s.keys = vl
+ return nil
+ }
+
+ s.privateParams[n] = v
+ return nil
+}
+
+func (s *set) Get(name string, dst any) error {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ v, ok := s.privateParams[name]
+ if !ok {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, v); err != nil {
+ return fmt.Errorf(`failed to assign value to dst: %w`, err)
+ }
+ return nil
+}
+
+func (s *set) Key(idx int) (Key, bool) {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ if idx >= 0 && idx < len(s.keys) {
+ return s.keys[idx], true
+ }
+ return nil, false
+}
+
+func (s *set) Len() int {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ return len(s.keys)
+}
+
+// indexNL is Index(), but without the locking
+func (s *set) indexNL(key Key) int {
+ for i, k := range s.keys {
+ if k == key {
+ return i
+ }
+ }
+ return -1
+}
+
+func (s *set) Index(key Key) int {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ return s.indexNL(key)
+}
+
+func (s *set) AddKey(key Key) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if reflect.ValueOf(key).IsNil() {
+ panic("nil key")
+ }
+
+ if i := s.indexNL(key); i > -1 {
+ return fmt.Errorf(`(jwk.Set).AddKey: key already exists`)
+ }
+ s.keys = append(s.keys, key)
+ return nil
+}
+
+func (s *set) Remove(name string) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ delete(s.privateParams, name)
+ return nil
+}
+
+func (s *set) RemoveKey(key Key) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ for i, k := range s.keys {
+ if k == key {
+ switch i {
+ case 0:
+ s.keys = s.keys[1:]
+ case len(s.keys) - 1:
+ s.keys = s.keys[:i]
+ default:
+ s.keys = append(s.keys[:i], s.keys[i+1:]...)
+ }
+ return nil
+ }
+ }
+ return fmt.Errorf(`(jwk.Set).RemoveKey: specified key does not exist in set`)
+}
+
+func (s *set) Clear() error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.keys = nil
+ s.privateParams = make(map[string]any)
+ return nil
+}
+
+func (s *set) Keys() []string {
+ ret := make([]string, len(s.privateParams))
+ var i int
+ for k := range s.privateParams {
+ ret[i] = k
+ i++
+ }
+ return ret
+}
+
+func (s *set) MarshalJSON() ([]byte, error) {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+ enc := json.NewEncoder(buf)
+
+ fields := []string{keysKey}
+ for k := range s.privateParams {
+ fields = append(fields, k)
+ }
+ sort.Strings(fields)
+
+ buf.WriteByte(tokens.OpenCurlyBracket)
+ for i, field := range fields {
+ if i > 0 {
+ buf.WriteByte(tokens.Comma)
+ }
+ fmt.Fprintf(buf, `%q:`, field)
+ if field != keysKey {
+ if err := enc.Encode(s.privateParams[field]); err != nil {
+ return nil, fmt.Errorf(`failed to marshal field %q: %w`, field, err)
+ }
+ } else {
+ buf.WriteByte(tokens.OpenSquareBracket)
+ for j, k := range s.keys {
+ if j > 0 {
+ buf.WriteByte(tokens.Comma)
+ }
+ if err := enc.Encode(k); err != nil {
+ return nil, fmt.Errorf(`failed to marshal key #%d: %w`, i, err)
+ }
+ }
+ buf.WriteByte(tokens.CloseSquareBracket)
+ }
+ }
+ buf.WriteByte(tokens.CloseCurlyBracket)
+
+ ret := make([]byte, buf.Len())
+ copy(ret, buf.Bytes())
+ return ret, nil
+}
+
+func (s *set) UnmarshalJSON(data []byte) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.privateParams = make(map[string]any)
+ s.keys = nil
+
+ var options []ParseOption
+ var ignoreParseError bool
+ if dc := s.dc; dc != nil {
+ if localReg := dc.Registry(); localReg != nil {
+ options = append(options, withLocalRegistry(localReg))
+ }
+ ignoreParseError = dc.IgnoreParseError()
+ }
+
+ var sawKeysField bool
+ dec := json.NewDecoder(bytes.NewReader(data))
+LOOP:
+ for {
+ tok, err := dec.Token()
+ if err != nil {
+ return fmt.Errorf(`error reading token: %w`, err)
+ }
+
+ switch tok := tok.(type) {
+ case json.Delim:
+ // Assuming we're doing everything correctly, we should ONLY
+ // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here.
+ if tok == tokens.CloseCurlyBracket { // End of object
+ break LOOP
+ } else if tok != tokens.OpenCurlyBracket {
+ return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok)
+ }
+ case string:
+ switch tok {
+ case "keys":
+ sawKeysField = true
+ var list []json.RawMessage
+ if err := dec.Decode(&list); err != nil {
+ return fmt.Errorf(`failed to decode "keys": %w`, err)
+ }
+
+ for i, keysrc := range list {
+ key, err := ParseKey(keysrc, options...)
+ if err != nil {
+ if !ignoreParseError {
+ return fmt.Errorf(`failed to decode key #%d in "keys": %w`, i, err)
+ }
+ continue
+ }
+ s.keys = append(s.keys, key)
+ }
+ default:
+ var v any
+ if err := dec.Decode(&v); err != nil {
+ return fmt.Errorf(`failed to decode value for key %q: %w`, tok, err)
+ }
+ s.privateParams[tok] = v
+ }
+ }
+ }
+
+ // This is really silly, but we can only detect the
+ // lack of the "keys" field after going through the
+ // entire object once
+ // Not checking for len(s.keys) == 0, because it could be
+ // an empty key set
+ if !sawKeysField {
+ key, err := ParseKey(data, options...)
+ if err != nil {
+ return fmt.Errorf(`failed to parse sole key in key set`)
+ }
+ s.keys = append(s.keys, key)
+ }
+ return nil
+}
+
+func (s *set) LookupKeyID(kid string) (Key, bool) {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ for i := range s.Len() {
+ key, ok := s.Key(i)
+ if !ok {
+ return nil, false
+ }
+ gotkid, ok := key.KeyID()
+ if ok && gotkid == kid {
+ return key, true
+ }
+ }
+ return nil, false
+}
+
+func (s *set) DecodeCtx() DecodeCtx {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return s.dc
+}
+
+func (s *set) SetDecodeCtx(dc DecodeCtx) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.dc = dc
+}
+
+func (s *set) Clone() (Set, error) {
+ s2 := &set{}
+
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ s2.keys = make([]Key, len(s.keys))
+ copy(s2.keys, s.keys)
+ return s2, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/symmetric.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/symmetric.go
new file mode 100644
index 0000000000..16427ff86f
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/symmetric.go
@@ -0,0 +1,105 @@
+package jwk
+
+import (
+ "crypto"
+ "fmt"
+ "reflect"
+
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+)
+
+func init() {
+ RegisterKeyExporter(jwa.OctetSeq(), KeyExportFunc(octetSeqToRaw))
+}
+
+func (k *symmetricKey) Import(rawKey []byte) error {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+
+ if len(rawKey) == 0 {
+ return fmt.Errorf(`non-empty []byte key required`)
+ }
+
+ k.octets = rawKey
+
+ return nil
+}
+
+var symmetricConvertibleKeys = []reflect.Type{
+ reflect.TypeOf((*SymmetricKey)(nil)).Elem(),
+}
+
+func octetSeqToRaw(key Key, hint any) (any, error) {
+ extracted, err := extractEmbeddedKey(key, symmetricConvertibleKeys)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to extract embedded key: %w`, err)
+ }
+
+ switch key := extracted.(type) {
+ case SymmetricKey:
+ switch hint.(type) {
+ case *[]byte, *any:
+ default:
+ return nil, fmt.Errorf(`invalid destination object type %T for symmetric key: %w`, hint, ContinueError())
+ }
+
+ locker, ok := key.(rlocker)
+ if ok {
+ locker.rlock()
+ defer locker.runlock()
+ }
+
+ ooctets, ok := key.Octets()
+ if !ok {
+ return nil, fmt.Errorf(`jwk.SymmetricKey: missing "k" field`)
+ }
+
+ octets := make([]byte, len(ooctets))
+ copy(octets, ooctets)
+ return octets, nil
+ default:
+ return nil, ContinueError()
+ }
+}
+
+// Thumbprint returns the JWK thumbprint using the indicated
+// hashing algorithm, according to RFC 7638
+func (k *symmetricKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
+ k.mu.RLock()
+ defer k.mu.RUnlock()
+ var octets []byte
+ if err := Export(k, &octets); err != nil {
+ return nil, fmt.Errorf(`failed to export symmetric key: %w`, err)
+ }
+
+ h := hash.New()
+ fmt.Fprint(h, `{"k":"`)
+ fmt.Fprint(h, base64.EncodeToString(octets))
+ fmt.Fprint(h, `","kty":"oct"}`)
+ return h.Sum(nil), nil
+}
+
+func (k *symmetricKey) PublicKey() (Key, error) {
+ newKey := newSymmetricKey()
+
+ for _, key := range k.Keys() {
+ var v any
+ if err := k.Get(key, &v); err != nil {
+ return nil, fmt.Errorf(`failed to get field %q: %w`, key, err)
+ }
+
+ if err := newKey.Set(key, v); err != nil {
+ return nil, fmt.Errorf(`failed to set field %q: %w`, key, err)
+ }
+ }
+ return newKey, nil
+}
+
+func (k *symmetricKey) Validate() error {
+ octets, ok := k.Octets()
+ if !ok || len(octets) == 0 {
+ return NewKeyValidationError(fmt.Errorf(`jwk.SymmetricKey: missing "k" field`))
+ }
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/symmetric_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/symmetric_gen.go
new file mode 100644
index 0000000000..bfd2f8497d
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/symmetric_gen.go
@@ -0,0 +1,620 @@
+// Code generated by tools/cmd/genjwk/main.go. DO NOT EDIT.
+
+package jwk
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "sync"
+
+ "github.com/lestrrat-go/blackmagic"
+ "github.com/lestrrat-go/jwx/v3/cert"
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+)
+
+const (
+ SymmetricOctetsKey = "k"
+)
+
+type SymmetricKey interface {
+ Key
+ Octets() ([]byte, bool)
+}
+
+type symmetricKey struct {
+ algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4
+ keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4
+ keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3
+ keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2
+ octets []byte
+ x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6
+ x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7
+ x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8
+ x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5
+ privateParams map[string]any
+ mu *sync.RWMutex
+ dc json.DecodeCtx
+}
+
+var _ SymmetricKey = &symmetricKey{}
+var _ Key = &symmetricKey{}
+
+func newSymmetricKey() *symmetricKey {
+ return &symmetricKey{
+ mu: &sync.RWMutex{},
+ privateParams: make(map[string]any),
+ }
+}
+
+func (h symmetricKey) KeyType() jwa.KeyType {
+ return jwa.OctetSeq()
+}
+
+func (h symmetricKey) rlock() {
+ h.mu.RLock()
+}
+
+func (h symmetricKey) runlock() {
+ h.mu.RUnlock()
+}
+
+func (h *symmetricKey) Algorithm() (jwa.KeyAlgorithm, bool) {
+ if h.algorithm != nil {
+ return *(h.algorithm), true
+ }
+ return nil, false
+}
+
+func (h *symmetricKey) KeyID() (string, bool) {
+ if h.keyID != nil {
+ return *(h.keyID), true
+ }
+ return "", false
+}
+
+func (h *symmetricKey) KeyOps() (KeyOperationList, bool) {
+ if h.keyOps != nil {
+ return *(h.keyOps), true
+ }
+ return nil, false
+}
+
+func (h *symmetricKey) KeyUsage() (string, bool) {
+ if h.keyUsage != nil {
+ return *(h.keyUsage), true
+ }
+ return "", false
+}
+
+func (h *symmetricKey) Octets() ([]byte, bool) {
+ if h.octets != nil {
+ return h.octets, true
+ }
+ return nil, false
+}
+
+func (h *symmetricKey) X509CertChain() (*cert.Chain, bool) {
+ return h.x509CertChain, true
+}
+
+func (h *symmetricKey) X509CertThumbprint() (string, bool) {
+ if h.x509CertThumbprint != nil {
+ return *(h.x509CertThumbprint), true
+ }
+ return "", false
+}
+
+func (h *symmetricKey) X509CertThumbprintS256() (string, bool) {
+ if h.x509CertThumbprintS256 != nil {
+ return *(h.x509CertThumbprintS256), true
+ }
+ return "", false
+}
+
+func (h *symmetricKey) X509URL() (string, bool) {
+ if h.x509URL != nil {
+ return *(h.x509URL), true
+ }
+ return "", false
+}
+
+func (h *symmetricKey) Has(name string) bool {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ switch name {
+ case KeyTypeKey:
+ return true
+ case AlgorithmKey:
+ return h.algorithm != nil
+ case KeyIDKey:
+ return h.keyID != nil
+ case KeyOpsKey:
+ return h.keyOps != nil
+ case KeyUsageKey:
+ return h.keyUsage != nil
+ case SymmetricOctetsKey:
+ return h.octets != nil
+ case X509CertChainKey:
+ return h.x509CertChain != nil
+ case X509CertThumbprintKey:
+ return h.x509CertThumbprint != nil
+ case X509CertThumbprintS256Key:
+ return h.x509CertThumbprintS256 != nil
+ case X509URLKey:
+ return h.x509URL != nil
+ default:
+ _, ok := h.privateParams[name]
+ return ok
+ }
+}
+
+func (h *symmetricKey) Get(name string, dst any) error {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ switch name {
+ case KeyTypeKey:
+ if err := blackmagic.AssignIfCompatible(dst, h.KeyType()); err != nil {
+ return fmt.Errorf(`symmetricKey.Get: failed to assign value for field %q to destination object: %w`, name, err)
+ }
+ case AlgorithmKey:
+ if h.algorithm == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.algorithm)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyIDKey:
+ if h.keyID == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyID)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyOpsKey:
+ if h.keyOps == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyOps)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyUsageKey:
+ if h.keyUsage == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyUsage)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case SymmetricOctetsKey:
+ if h.octets == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.octets); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertChainKey:
+ if h.x509CertChain == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, h.x509CertChain); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertThumbprintKey:
+ if h.x509CertThumbprint == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprint)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertThumbprintS256Key:
+ if h.x509CertThumbprintS256 == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprintS256)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509URLKey:
+ if h.x509URL == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509URL)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ default:
+ v, ok := h.privateParams[name]
+ if !ok {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, v); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ }
+ return nil
+}
+
+func (h *symmetricKey) Set(name string, value any) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ return h.setNoLock(name, value)
+}
+
+func (h *symmetricKey) setNoLock(name string, value any) error {
+ switch name {
+ case "kty":
+ return nil
+ case AlgorithmKey:
+ switch v := value.(type) {
+ case string, jwa.SignatureAlgorithm, jwa.KeyEncryptionAlgorithm, jwa.ContentEncryptionAlgorithm:
+ tmp, err := jwa.KeyAlgorithmFrom(v)
+ if err != nil {
+ return fmt.Errorf(`invalid algorithm for %q key: %w`, AlgorithmKey, err)
+ }
+ h.algorithm = &tmp
+ default:
+ return fmt.Errorf(`invalid type for %q key: %T`, AlgorithmKey, value)
+ }
+ return nil
+ case KeyIDKey:
+ if v, ok := value.(string); ok {
+ h.keyID = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value)
+ case KeyOpsKey:
+ var acceptor KeyOperationList
+ if err := acceptor.Accept(value); err != nil {
+ return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err)
+ }
+ h.keyOps = &acceptor
+ return nil
+ case KeyUsageKey:
+ switch v := value.(type) {
+ case KeyUsageType:
+ switch v {
+ case ForSignature, ForEncryption:
+ tmp := v.String()
+ h.keyUsage = &tmp
+ default:
+ return fmt.Errorf(`invalid key usage type %s`, v)
+ }
+ case string:
+ h.keyUsage = &v
+ default:
+ return fmt.Errorf(`invalid key usage type %s`, v)
+ }
+ case SymmetricOctetsKey:
+ if v, ok := value.([]byte); ok {
+ h.octets = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, SymmetricOctetsKey, value)
+ case X509CertChainKey:
+ if v, ok := value.(*cert.Chain); ok {
+ h.x509CertChain = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value)
+ case X509CertThumbprintKey:
+ if v, ok := value.(string); ok {
+ h.x509CertThumbprint = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value)
+ case X509CertThumbprintS256Key:
+ if v, ok := value.(string); ok {
+ h.x509CertThumbprintS256 = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value)
+ case X509URLKey:
+ if v, ok := value.(string); ok {
+ h.x509URL = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value)
+ default:
+ if h.privateParams == nil {
+ h.privateParams = map[string]any{}
+ }
+ h.privateParams[name] = value
+ }
+ return nil
+}
+
+func (k *symmetricKey) Remove(key string) error {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+ switch key {
+ case AlgorithmKey:
+ k.algorithm = nil
+ case KeyIDKey:
+ k.keyID = nil
+ case KeyOpsKey:
+ k.keyOps = nil
+ case KeyUsageKey:
+ k.keyUsage = nil
+ case SymmetricOctetsKey:
+ k.octets = nil
+ case X509CertChainKey:
+ k.x509CertChain = nil
+ case X509CertThumbprintKey:
+ k.x509CertThumbprint = nil
+ case X509CertThumbprintS256Key:
+ k.x509CertThumbprintS256 = nil
+ case X509URLKey:
+ k.x509URL = nil
+ default:
+ delete(k.privateParams, key)
+ }
+ return nil
+}
+
+func (k *symmetricKey) Clone() (Key, error) {
+ key, err := cloneKey(k)
+ if err != nil {
+ return nil, fmt.Errorf(`symmetricKey.Clone: %w`, err)
+ }
+ return key, nil
+}
+
+func (k *symmetricKey) DecodeCtx() json.DecodeCtx {
+ k.mu.RLock()
+ defer k.mu.RUnlock()
+ return k.dc
+}
+
+func (k *symmetricKey) SetDecodeCtx(dc json.DecodeCtx) {
+ k.mu.Lock()
+ defer k.mu.Unlock()
+ k.dc = dc
+}
+
+func (h *symmetricKey) UnmarshalJSON(buf []byte) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ h.algorithm = nil
+ h.keyID = nil
+ h.keyOps = nil
+ h.keyUsage = nil
+ h.octets = nil
+ h.x509CertChain = nil
+ h.x509CertThumbprint = nil
+ h.x509CertThumbprintS256 = nil
+ h.x509URL = nil
+ dec := json.NewDecoder(bytes.NewReader(buf))
+LOOP:
+ for {
+ tok, err := dec.Token()
+ if err != nil {
+ return fmt.Errorf(`error reading token: %w`, err)
+ }
+ switch tok := tok.(type) {
+ case json.Delim:
+ // Assuming we're doing everything correctly, we should ONLY
+ // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here.
+ if tok == tokens.CloseCurlyBracket { // End of object
+ break LOOP
+ } else if tok != tokens.OpenCurlyBracket {
+ return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok)
+ }
+ case string: // Objects can only have string keys
+ switch tok {
+ case KeyTypeKey:
+ val, err := json.ReadNextStringToken(dec)
+ if err != nil {
+ return fmt.Errorf(`error reading token: %w`, err)
+ }
+ if val != jwa.OctetSeq().String() {
+ return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val)
+ }
+ case AlgorithmKey:
+ var s string
+ if err := dec.Decode(&s); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+ }
+ alg, err := jwa.KeyAlgorithmFrom(s)
+ if err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+ }
+ h.algorithm = &alg
+ case KeyIDKey:
+ if err := json.AssignNextStringToken(&h.keyID, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err)
+ }
+ case KeyOpsKey:
+ var decoded KeyOperationList
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err)
+ }
+ h.keyOps = &decoded
+ case KeyUsageKey:
+ if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err)
+ }
+ case SymmetricOctetsKey:
+ if err := json.AssignNextBytesToken(&h.octets, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, SymmetricOctetsKey, err)
+ }
+ case X509CertChainKey:
+ var decoded cert.Chain
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err)
+ }
+ h.x509CertChain = &decoded
+ case X509CertThumbprintKey:
+ if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err)
+ }
+ case X509CertThumbprintS256Key:
+ if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err)
+ }
+ case X509URLKey:
+ if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err)
+ }
+ default:
+ if dc := h.dc; dc != nil {
+ if localReg := dc.Registry(); localReg != nil {
+ decoded, err := localReg.Decode(dec, tok)
+ if err == nil {
+ h.setNoLock(tok, decoded)
+ continue
+ }
+ }
+ }
+ decoded, err := registry.Decode(dec, tok)
+ if err == nil {
+ h.setNoLock(tok, decoded)
+ continue
+ }
+ return fmt.Errorf(`could not decode field %s: %w`, tok, err)
+ }
+ default:
+ return fmt.Errorf(`invalid token %T`, tok)
+ }
+ }
+ if h.octets == nil {
+ return fmt.Errorf(`required field k is missing`)
+ }
+ return nil
+}
+
+func (h symmetricKey) MarshalJSON() ([]byte, error) {
+ data := make(map[string]any)
+ fields := make([]string, 0, 9)
+ data[KeyTypeKey] = jwa.OctetSeq()
+ fields = append(fields, KeyTypeKey)
+ if h.algorithm != nil {
+ data[AlgorithmKey] = *(h.algorithm)
+ fields = append(fields, AlgorithmKey)
+ }
+ if h.keyID != nil {
+ data[KeyIDKey] = *(h.keyID)
+ fields = append(fields, KeyIDKey)
+ }
+ if h.keyOps != nil {
+ data[KeyOpsKey] = *(h.keyOps)
+ fields = append(fields, KeyOpsKey)
+ }
+ if h.keyUsage != nil {
+ data[KeyUsageKey] = *(h.keyUsage)
+ fields = append(fields, KeyUsageKey)
+ }
+ if h.octets != nil {
+ data[SymmetricOctetsKey] = h.octets
+ fields = append(fields, SymmetricOctetsKey)
+ }
+ if h.x509CertChain != nil {
+ data[X509CertChainKey] = h.x509CertChain
+ fields = append(fields, X509CertChainKey)
+ }
+ if h.x509CertThumbprint != nil {
+ data[X509CertThumbprintKey] = *(h.x509CertThumbprint)
+ fields = append(fields, X509CertThumbprintKey)
+ }
+ if h.x509CertThumbprintS256 != nil {
+ data[X509CertThumbprintS256Key] = *(h.x509CertThumbprintS256)
+ fields = append(fields, X509CertThumbprintS256Key)
+ }
+ if h.x509URL != nil {
+ data[X509URLKey] = *(h.x509URL)
+ fields = append(fields, X509URLKey)
+ }
+ for k, v := range h.privateParams {
+ data[k] = v
+ fields = append(fields, k)
+ }
+
+ sort.Strings(fields)
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+ buf.WriteByte(tokens.OpenCurlyBracket)
+ enc := json.NewEncoder(buf)
+ for i, f := range fields {
+ if i > 0 {
+ buf.WriteRune(tokens.Comma)
+ }
+ buf.WriteRune(tokens.DoubleQuote)
+ buf.WriteString(f)
+ buf.WriteString(`":`)
+ v := data[f]
+ switch v := v.(type) {
+ case []byte:
+ buf.WriteRune(tokens.DoubleQuote)
+ buf.WriteString(base64.EncodeToString(v))
+ buf.WriteRune(tokens.DoubleQuote)
+ default:
+ if err := enc.Encode(v); err != nil {
+ return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err)
+ }
+ buf.Truncate(buf.Len() - 1)
+ }
+ }
+ buf.WriteByte(tokens.CloseCurlyBracket)
+ ret := make([]byte, buf.Len())
+ copy(ret, buf.Bytes())
+ return ret, nil
+}
+
+func (h *symmetricKey) Keys() []string {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ keys := make([]string, 0, 9+len(h.privateParams))
+ keys = append(keys, KeyTypeKey)
+ if h.algorithm != nil {
+ keys = append(keys, AlgorithmKey)
+ }
+ if h.keyID != nil {
+ keys = append(keys, KeyIDKey)
+ }
+ if h.keyOps != nil {
+ keys = append(keys, KeyOpsKey)
+ }
+ if h.keyUsage != nil {
+ keys = append(keys, KeyUsageKey)
+ }
+ if h.octets != nil {
+ keys = append(keys, SymmetricOctetsKey)
+ }
+ if h.x509CertChain != nil {
+ keys = append(keys, X509CertChainKey)
+ }
+ if h.x509CertThumbprint != nil {
+ keys = append(keys, X509CertThumbprintKey)
+ }
+ if h.x509CertThumbprintS256 != nil {
+ keys = append(keys, X509CertThumbprintS256Key)
+ }
+ if h.x509URL != nil {
+ keys = append(keys, X509URLKey)
+ }
+ for k := range h.privateParams {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+var symmetricStandardFields KeyFilter
+
+func init() {
+ symmetricStandardFields = NewFieldNameFilter(KeyTypeKey, KeyUsageKey, KeyOpsKey, AlgorithmKey, KeyIDKey, X509URLKey, X509CertChainKey, X509CertThumbprintKey, X509CertThumbprintS256Key, SymmetricOctetsKey)
+}
+
+// SymmetricStandardFieldsFilter returns a KeyFilter that filters out standard Symmetric fields.
+func SymmetricStandardFieldsFilter() KeyFilter {
+ return symmetricStandardFields
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/usage.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/usage.go
new file mode 100644
index 0000000000..ed724153b8
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/usage.go
@@ -0,0 +1,74 @@
+package jwk
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+)
+
+var strictKeyUsage = atomic.Bool{}
+var keyUsageNames = map[string]struct{}{}
+var muKeyUsageName sync.RWMutex
+
+// RegisterKeyUsage registers a possible value that can be used for KeyUsageType.
+// Normally, key usage (or the "use" field in a JWK) is either "sig" or "enc",
+// but other values may be used.
+//
+// While this module only works with "sig" and "enc", it is possible that
+// systems choose to use other values. This function allows users to register
+// new values to be accepted as valid key usage types. Values are case sensitive.
+//
+// Furthermore, the check against registered values can be completely turned off
+// by setting the global option `jwk.WithStrictKeyUsage(false)`.
+func RegisterKeyUsage(v string) {
+ muKeyUsageName.Lock()
+ defer muKeyUsageName.Unlock()
+ keyUsageNames[v] = struct{}{}
+}
+
+func UnregisterKeyUsage(v string) {
+ muKeyUsageName.Lock()
+ defer muKeyUsageName.Unlock()
+ delete(keyUsageNames, v)
+}
+
+func init() {
+ strictKeyUsage.Store(true)
+ RegisterKeyUsage("sig")
+ RegisterKeyUsage("enc")
+}
+
+func isValidUsage(v string) bool {
+ // This function can return true if strictKeyUsage is false
+ if !strictKeyUsage.Load() {
+ return true
+ }
+
+ muKeyUsageName.RLock()
+ defer muKeyUsageName.RUnlock()
+ _, ok := keyUsageNames[v]
+ return ok
+}
+
+func (k KeyUsageType) String() string {
+ return string(k)
+}
+
+func (k *KeyUsageType) Accept(v any) error {
+ switch v := v.(type) {
+ case KeyUsageType:
+ if !isValidUsage(v.String()) {
+ return fmt.Errorf("invalid key usage type: %q", v)
+ }
+ *k = v
+ return nil
+ case string:
+ if !isValidUsage(v) {
+ return fmt.Errorf("invalid key usage type: %q", v)
+ }
+ *k = KeyUsageType(v)
+ return nil
+ }
+
+ return fmt.Errorf("invalid Go type for key usage type: %T", v)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/whitelist.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/whitelist.go
new file mode 100644
index 0000000000..0b0df701ae
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/whitelist.go
@@ -0,0 +1,38 @@
+package jwk
+
+import "github.com/lestrrat-go/httprc/v3"
+
+type Whitelist = httprc.Whitelist
+type WhitelistFunc = httprc.WhitelistFunc
+
+// InsecureWhitelist is an alias to httprc.InsecureWhitelist. Use
+// functions in the `httprc` package to interact with this type.
+type InsecureWhitelist = httprc.InsecureWhitelist
+
+func NewInsecureWhitelist() InsecureWhitelist {
+ return httprc.NewInsecureWhitelist()
+}
+
+// BlockAllWhitelist is an alias to httprc.BlockAllWhitelist. Use
+// functions in the `httprc` package to interact with this type.
+type BlockAllWhitelist = httprc.BlockAllWhitelist
+
+func NewBlockAllWhitelist() BlockAllWhitelist {
+ return httprc.NewBlockAllWhitelist()
+}
+
+// RegexpWhitelist is an alias to httprc.RegexpWhitelist. Use
+// functions in the `httprc` package to interact with this type.
+type RegexpWhitelist = httprc.RegexpWhitelist
+
+func NewRegexpWhitelist() *RegexpWhitelist {
+ return httprc.NewRegexpWhitelist()
+}
+
+// MapWhitelist is an alias to httprc.MapWhitelist. Use
+// functions in the `httprc` package to interact with this type.
+type MapWhitelist = httprc.MapWhitelist
+
+func NewMapWhitelist() MapWhitelist {
+ return httprc.NewMapWhitelist()
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/x509.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/x509.go
new file mode 100644
index 0000000000..c0a7c4c4d9
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/x509.go
@@ -0,0 +1,249 @@
+package jwk
+
+import (
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/lestrrat-go/blackmagic"
+ "github.com/lestrrat-go/jwx/v3/jwk/jwkbb"
+)
+
+// PEMDecoder is an interface to describe an object that can decode
+// a key from PEM encoded ASN.1 DER format.
+//
+// A PEMDecoder can be specified as an option to `jwk.Parse()` or `jwk.ParseKey()`
+// along with the `jwk.WithPEM()` option.
+type PEMDecoder interface {
+ Decode([]byte) (any, []byte, error)
+}
+
+// PEMEncoder is an interface to describe an object that can encode
+// a key into PEM encoded ASN.1 DER format.
+//
+// `jwk.Key` instances do not implement a way to encode themselves into
+// PEM format. Normally you can just use `jwk.EncodePEM()` to do this, but
+// this interface allows you to generalize the encoding process by
+// abstracting the `jwk.EncodePEM()` function using `jwk.PEMEncodeFunc`
+// along with alternate implementations, should you need them.
+type PEMEncoder interface {
+ Encode(any) (string, []byte, error)
+}
+
+type PEMEncodeFunc func(any) (string, []byte, error)
+
+func (f PEMEncodeFunc) Encode(v any) (string, []byte, error) {
+ return f(v)
+}
+
+func encodeX509(v any) (string, []byte, error) {
+ // we can't import jwk, so just use the interface
+ if key, ok := v.(Key); ok {
+ var raw any
+ if err := Export(key, &raw); err != nil {
+ return "", nil, fmt.Errorf(`failed to get raw key out of %T: %w`, key, err)
+ }
+
+ v = raw
+ }
+
+ // Try to convert it into a certificate
+ switch v := v.(type) {
+ case *rsa.PrivateKey:
+ return pmRSAPrivateKey, x509.MarshalPKCS1PrivateKey(v), nil
+ case *ecdsa.PrivateKey:
+ marshaled, err := x509.MarshalECPrivateKey(v)
+ if err != nil {
+ return "", nil, err
+ }
+ return pmECPrivateKey, marshaled, nil
+ case ed25519.PrivateKey:
+ marshaled, err := x509.MarshalPKCS8PrivateKey(v)
+ if err != nil {
+ return "", nil, err
+ }
+ return pmPrivateKey, marshaled, nil
+ case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey:
+ marshaled, err := x509.MarshalPKIXPublicKey(v)
+ if err != nil {
+ return "", nil, err
+ }
+ return pmPublicKey, marshaled, nil
+ default:
+ return "", nil, fmt.Errorf(`unsupported type %T for ASN.1 DER encoding`, v)
+ }
+}
+
+// EncodePEM encodes the key into a PEM encoded ASN.1 DER format.
+// The key can be a jwk.Key or a raw key instance, but it must be one of
+// the types supported by `x509` package.
+//
+// Internally, it uses the same routine as `jwk.EncodeX509()`, and therefore
+// the same caveats apply
+func EncodePEM(v any) ([]byte, error) {
+ typ, marshaled, err := encodeX509(v)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to encode key in x509: %w`, err)
+ }
+
+ block := &pem.Block{
+ Type: typ,
+ Bytes: marshaled,
+ }
+ return pem.EncodeToMemory(block), nil
+}
+
+const (
+ pmPrivateKey = `PRIVATE KEY`
+ pmPublicKey = `PUBLIC KEY`
+ pmECPrivateKey = `EC PRIVATE KEY`
+ pmRSAPublicKey = `RSA PUBLIC KEY`
+ pmRSAPrivateKey = `RSA PRIVATE KEY`
+)
+
+// NewPEMDecoder returns a PEMDecoder that decodes keys in PEM encoded ASN.1 DER format.
+// You can use it as argument to `jwk.WithPEMDecoder()` option.
+//
+// The use of this function is planned to be deprecated. The plan is to replace the
+// `jwk.WithPEMDecoder()` option with globally available custom X509 decoders which
+// can be registered via `jwk.RegisterX509Decoder()` function.
+func NewPEMDecoder() PEMDecoder {
+ return pemDecoder{}
+}
+
+type pemDecoder struct{}
+
+// DecodePEM decodes a key in PEM encoded ASN.1 DER format.
+// and returns a raw key.
+func (pemDecoder) Decode(src []byte) (any, []byte, error) {
+ block, rest := pem.Decode(src)
+ if block == nil {
+ return nil, rest, fmt.Errorf(`failed to decode PEM data`)
+ }
+ var ret any
+ if err := jwkbb.DecodeX509(&ret, block); err != nil {
+ return nil, rest, err
+ }
+ return ret, rest, nil
+}
+
+// X509Decoder is an interface that describes an object that can decode
+// a PEM encoded ASN.1 DER format into a specific type of key.
+//
+// This interface is experimental, and may change in the future.
+type X509Decoder interface {
+ // DecodeX509 decodes the given PEM block into the destination object.
+ // The destination object must be a pointer to a type that can hold the
+ // decoded key, such as *rsa.PrivateKey, *ecdsa.PrivateKey, etc.
+ DecodeX509(dst any, block *pem.Block) error
+}
+
+// X509DecodeFunc is a function type that implements the X509Decoder interface.
+// It allows you to create a custom X509Decoder by providing a function
+// that takes a destination and a PEM block, and returns an error if the decoding fails.
+//
+// This interface is experimental, and may change in the future.
+type X509DecodeFunc func(dst any, block *pem.Block) error
+
+func (f X509DecodeFunc) DecodeX509(dst any, block *pem.Block) error {
+ return f(dst, block)
+}
+
+var muX509Decoders sync.Mutex
+var x509Decoders = map[any]int{}
+var x509DecoderList = []X509Decoder{}
+
+type identDefaultX509Decoder struct{}
+
+func init() {
+ RegisterX509Decoder(identDefaultX509Decoder{}, X509DecodeFunc(jwkbb.DecodeX509))
+}
+
+// RegisterX509Decoder registers a new X509Decoder that can decode PEM encoded ASN.1 DER format.
+// Because the decoder could be non-comparable, you must provide an identifier that can be used
+// as a map key to identify the decoder.
+//
+// This function is experimental, and may change in the future.
+func RegisterX509Decoder(ident any, decoder X509Decoder) {
+ if decoder == nil {
+ panic(`jwk.RegisterX509Decoder: decoder cannot be nil`)
+ }
+
+ muX509Decoders.Lock()
+ defer muX509Decoders.Unlock()
+ if _, ok := x509Decoders[ident]; ok {
+ return // already registered
+ }
+
+ x509Decoders[ident] = len(x509DecoderList)
+ x509DecoderList = append(x509DecoderList, decoder)
+}
+
+// UnregisterX509Decoder unregisters the X509Decoder identified by the given identifier.
+// If the identifier is not registered, it does nothing.
+//
+// This function is experimental, and may change in the future.
+func UnregisterX509Decoder(ident any) {
+ muX509Decoders.Lock()
+ defer muX509Decoders.Unlock()
+ idx, ok := x509Decoders[ident]
+ if !ok {
+ return // not registered
+ }
+
+ delete(x509Decoders, ident)
+
+ l := len(x509DecoderList)
+ switch idx {
+ case l - 1:
+ // if the last element, just truncate the slice
+ x509DecoderList = x509DecoderList[:l-1]
+ case 0:
+ // if the first element, just shift the slice
+ x509DecoderList = x509DecoderList[1:]
+ default:
+ // if the element is in the middle, remove it by slicing
+ // and appending the two slices together
+ x509DecoderList = append(x509DecoderList[:idx], x509DecoderList[idx+1:]...)
+ }
+}
+
+// decodeX509 decodes a PEM encoded ASN.1 DER format into the given destination.
+// It tries all registered X509 decoders until one of them succeeds.
+// If no decoder can handle the PEM block, it returns an error.
+func decodeX509(dst any, src []byte) error {
+ block, _ := pem.Decode(src)
+ if block == nil {
+ return fmt.Errorf(`failed to decode PEM data`)
+ }
+
+ var errs []error
+ for _, d := range x509DecoderList {
+ if err := d.DecodeX509(dst, block); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ // successfully decoded
+ return nil
+ }
+
+ return fmt.Errorf(`failed to decode X509 data using any of the decoders: %w`, errors.Join(errs...))
+}
+
+func decodeX509WithPEMDEcoder(dst any, src []byte, decoder PEMDecoder) error {
+ ret, _, err := decoder.Decode(src)
+ if err != nil {
+ return fmt.Errorf(`failed to decode PEM data: %w`, err)
+ }
+
+ if err := blackmagic.AssignIfCompatible(dst, ret); err != nil {
+ return fmt.Errorf(`failed to assign decoded key to destination: %w`, err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jws/BUILD.bazel
new file mode 100644
index 0000000000..920d3f87b1
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/BUILD.bazel
@@ -0,0 +1,76 @@
+load("@rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "jws",
+ srcs = [
+ "errors.go",
+ "filter.go",
+ "headers.go",
+ "headers_gen.go",
+ "interface.go",
+ "io.go",
+ "jws.go",
+ "key_provider.go",
+ "legacy.go",
+ "message.go",
+ "options.go",
+ "options_gen.go",
+ "signer.go",
+ "sign_context.go",
+ "signature_builder.go",
+ "verifier.go",
+ "verify_context.go",
+ ],
+ importpath = "github.com/lestrrat-go/jwx/v3/jws",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//cert",
+ "//internal/base64",
+ "//internal/ecutil",
+ "//internal/json",
+ "//internal/jwxio",
+ "//internal/tokens",
+ "//internal/keyconv",
+ "//internal/pool",
+ "//jwa",
+ "//jwk",
+ "//jws/internal/keytype",
+ "//jws/jwsbb",
+ "//jws/legacy",
+ "//transform",
+ "@com_github_lestrrat_go_blackmagic//:blackmagic",
+ "@com_github_lestrrat_go_option_v2//:option",
+ ],
+)
+
+go_test(
+ name = "jws_test",
+ srcs = [
+ "es256k_test.go",
+ "filter_test.go",
+ "headers_test.go",
+ "jws_test.go",
+ "message_test.go",
+ "options_gen_test.go",
+ "signer_test.go",
+ ],
+ embed = [":jws"],
+ deps = [
+ "//cert",
+ "//internal/base64",
+ "//internal/ecutil",
+ "//internal/json",
+ "//internal/jwxtest",
+ "//jwa",
+ "//jwk",
+ "//jwt",
+ "@com_github_lestrrat_go_httprc_v3//:httprc",
+ "@com_github_stretchr_testify//require",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":jws",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/README.md b/vendor/github.com/lestrrat-go/jwx/v3/jws/README.md
new file mode 100644
index 0000000000..29ca7218e4
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/README.md
@@ -0,0 +1,111 @@
+# JWS [](https://pkg.go.dev/github.com/lestrrat-go/jwx/v3/jws)
+
+Package jws implements JWS as described in [RFC7515](https://tools.ietf.org/html/rfc7515) and [RFC7797](https://tools.ietf.org/html/rfc7797)
+
+* Parse and generate compact or JSON serializations
+* Sign and verify arbitrary payload
+* Use any of the keys supported in [github.com/lestrrat-go/jwx/v3/jwk](../jwk)
+* Add arbitrary fields in the JWS object
+* Ability to add/replace existing signature methods
+* Respect "b64" settings for RFC7797
+
+How-to style documentation can be found in the [docs directory](../docs).
+
+Examples are located in the examples directory ([jws_example_test.go](../examples/jws_example_test.go))
+
+Supported signature algorithms:
+
+| Algorithm | Supported? | Constant in [jwa](../jwa) |
+|:----------------------------------------|:-----------|:-------------------------|
+| HMAC using SHA-256 | YES | jwa.HS256 |
+| HMAC using SHA-384 | YES | jwa.HS384 |
+| HMAC using SHA-512 | YES | jwa.HS512 |
+| RSASSA-PKCS-v1.5 using SHA-256 | YES | jwa.RS256 |
+| RSASSA-PKCS-v1.5 using SHA-384 | YES | jwa.RS384 |
+| RSASSA-PKCS-v1.5 using SHA-512 | YES | jwa.RS512 |
+| ECDSA using P-256 and SHA-256 | YES | jwa.ES256 |
+| ECDSA using P-384 and SHA-384 | YES | jwa.ES384 |
+| ECDSA using P-521 and SHA-512 | YES | jwa.ES512 |
+| ECDSA using secp256k1 and SHA-256 (2) | YES | jwa.ES256K |
+| RSASSA-PSS using SHA256 and MGF1-SHA256 | YES | jwa.PS256 |
+| RSASSA-PSS using SHA384 and MGF1-SHA384 | YES | jwa.PS384 |
+| RSASSA-PSS using SHA512 and MGF1-SHA512 | YES | jwa.PS512 |
+| EdDSA (1) | YES | jwa.EdDSA |
+
+* Note 1: Experimental
+* Note 2: Experimental, and must be toggled using `-tags jwx_es256k` build tag
+
+# SYNOPSIS
+
+## Sign and verify arbitrary data
+
+```go
+import(
+ "crypto/rand"
+ "crypto/rsa"
+ "log"
+
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jws"
+)
+
+func main() {
+ privkey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ log.Printf("failed to generate private key: %s", err)
+ return
+ }
+
+ buf, err := jws.Sign([]byte("Lorem ipsum"), jws.WithKey(jwa.RS256, privkey))
+ if err != nil {
+ log.Printf("failed to created JWS message: %s", err)
+ return
+ }
+
+ // When you receive a JWS message, you can verify the signature
+ // and grab the payload sent in the message in one go:
+ verified, err := jws.Verify(buf, jws.WithKey(jwa.RS256, &privkey.PublicKey))
+ if err != nil {
+ log.Printf("failed to verify message: %s", err)
+ return
+ }
+
+ log.Printf("signed message verified! -> %s", verified)
+}
+```
+
+## Programmatically manipulate `jws.Message`
+
+```go
+func ExampleMessage() {
+ // initialization for the following variables have been omitted.
+ // please see jws_example_test.go for details
+ var decodedPayload, decodedSig1, decodedSig2 []byte
+ var public1, protected1, public2, protected2 jws.Header
+
+ // Construct a message. DO NOT use values that are base64 encoded
+ m := jws.NewMessage().
+ SetPayload(decodedPayload).
+ AppendSignature(
+ jws.NewSignature().
+ SetSignature(decodedSig1).
+ SetProtectedHeaders(public1).
+ SetPublicHeaders(protected1),
+ ).
+ AppendSignature(
+ jws.NewSignature().
+ SetSignature(decodedSig2).
+ SetProtectedHeaders(public2).
+ SetPublicHeaders(protected2),
+ )
+
+ buf, err := json.MarshalIndent(m, "", " ")
+ if err != nil {
+ fmt.Printf("%s\n", err)
+ return
+ }
+
+ _ = buf
+}
+```
+
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/errors.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/errors.go
new file mode 100644
index 0000000000..d5e1762a6a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/errors.go
@@ -0,0 +1,112 @@
+package jws
+
+import (
+ "fmt"
+)
+
+type signError struct {
+ error
+}
+
+var errDefaultSignError = signerr(`unknown error`)
+
+// SignError returns an error that can be passed to `errors.Is` to check if the error is a sign error.
+func SignError() error {
+ return errDefaultSignError
+}
+
+func (e signError) Unwrap() error {
+ return e.error
+}
+
+func (signError) Is(err error) bool {
+ _, ok := err.(signError)
+ return ok
+}
+
+func signerr(f string, args ...any) error {
+ return signError{fmt.Errorf(`jws.Sign: `+f, args...)}
+}
+
+// This error is returned when jws.Verify fails, but note that there's another type of
+// message that can be returned by jws.Verify, which is `errVerification`.
+type verifyError struct {
+ error
+}
+
+var errDefaultVerifyError = verifyerr(`unknown error`)
+
+// VerifyError returns an error that can be passed to `errors.Is` to check if the error is a verify error.
+func VerifyError() error {
+ return errDefaultVerifyError
+}
+
+func (e verifyError) Unwrap() error {
+ return e.error
+}
+
+func (verifyError) Is(err error) bool {
+ _, ok := err.(verifyError)
+ return ok
+}
+
+func verifyerr(f string, args ...any) error {
+ return verifyError{fmt.Errorf(`jws.Verify: `+f, args...)}
+}
+
+// verificationError is returned when the actual _verification_ of the key/payload fails.
+type verificationError struct {
+ error
+}
+
+var errDefaultVerificationError = verificationError{fmt.Errorf(`unknown verification error`)}
+
+// VerificationError returns an error that can be passed to `errors.Is` to check if the error is a verification error.
+func VerificationError() error {
+ return errDefaultVerificationError
+}
+
+func (e verificationError) Unwrap() error {
+ return e.error
+}
+
+func (verificationError) Is(err error) bool {
+ _, ok := err.(verificationError)
+ return ok
+}
+
+type parseError struct {
+ error
+}
+
+var errDefaultParseError = parseerr(`unknown error`)
+
+// ParseError returns an error that can be passed to `errors.Is` to check if the error is a parse error.
+func ParseError() error {
+ return errDefaultParseError
+}
+
+func (e parseError) Unwrap() error {
+ return e.error
+}
+
+func (parseError) Is(err error) bool {
+ _, ok := err.(parseError)
+ return ok
+}
+
+func bparseerr(prefix string, f string, args ...any) error {
+ return parseError{fmt.Errorf(prefix+": "+f, args...)}
+}
+
+func parseerr(f string, args ...any) error {
+ return bparseerr(`jws.Parse`, f, args...)
+}
+
+func sparseerr(f string, args ...any) error {
+ return bparseerr(`jws.ParseString`, f, args...)
+}
+
+func rparseerr(f string, args ...any) error {
+ return bparseerr(`jws.ParseReader`, f, args...)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/es256k.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/es256k.go
new file mode 100644
index 0000000000..3b68c46144
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/es256k.go
@@ -0,0 +1,13 @@
+//go:build jwx_es256k
+// +build jwx_es256k
+
+package jws
+
+import (
+ "github.com/lestrrat-go/jwx/v3/jwa"
+)
+
+func init() {
+ // Register ES256K to EC algorithm family
+ addAlgorithmForKeyType(jwa.EC(), jwa.ES256K())
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/filter.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/filter.go
new file mode 100644
index 0000000000..9351ab870b
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/filter.go
@@ -0,0 +1,36 @@
+package jws
+
+import (
+ "github.com/lestrrat-go/jwx/v3/transform"
+)
+
+// HeaderFilter is an interface that allows users to filter JWS header fields.
+// It provides two methods: Filter and Reject; Filter returns a new header with only
+// the fields that match the filter criteria, while Reject returns a new header with
+// only the fields that DO NOT match the filter.
+//
+// EXPERIMENTAL: This API is experimental and its interface and behavior is
+// subject to change in future releases. This API is not subject to semver
+// compatibility guarantees.
+type HeaderFilter interface {
+ Filter(header Headers) (Headers, error)
+ Reject(header Headers) (Headers, error)
+}
+
+// StandardHeadersFilter returns a HeaderFilter that filters out standard JWS header fields.
+//
+// You can use this filter to create headers that either only have standard fields
+// or only custom fields.
+//
+// If you need to configure the filter more precisely, consider
+// using the HeaderNameFilter directly.
+func StandardHeadersFilter() HeaderFilter {
+ return stdHeadersFilter
+}
+
+var stdHeadersFilter = NewHeaderNameFilter(stdHeaderNames...)
+
+// NewHeaderNameFilter creates a new HeaderNameFilter with the specified field names.
+func NewHeaderNameFilter(names ...string) HeaderFilter {
+ return transform.NewNameBasedFilter[Headers](names...)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/headers.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/headers.go
new file mode 100644
index 0000000000..45f8e8959e
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/headers.go
@@ -0,0 +1,52 @@
+package jws
+
+import (
+ "fmt"
+)
+
+func (h *stdHeaders) Copy(dst Headers) error {
+ for _, k := range h.Keys() {
+ var v any
+ if err := h.Get(k, &v); err != nil {
+ return fmt.Errorf(`failed to get header %q: %w`, k, err)
+ }
+ if err := dst.Set(k, v); err != nil {
+ return fmt.Errorf(`failed to set header %q: %w`, k, err)
+ }
+ }
+ return nil
+}
+
+// mergeHeaders merges two headers, and works even if the first Header
+// object is nil. This is not exported because ATM it felt like this
+// function is not frequently used, and MergeHeaders seemed a clunky name
+func mergeHeaders(h1, h2 Headers) (Headers, error) {
+ h3 := NewHeaders()
+
+ if h1 != nil {
+ if err := h1.Copy(h3); err != nil {
+ return nil, fmt.Errorf(`failed to copy headers from first Header: %w`, err)
+ }
+ }
+
+ if h2 != nil {
+ if err := h2.Copy(h3); err != nil {
+ return nil, fmt.Errorf(`failed to copy headers from second Header: %w`, err)
+ }
+ }
+
+ return h3, nil
+}
+
+func (h *stdHeaders) Merge(h2 Headers) (Headers, error) {
+ return mergeHeaders(h, h2)
+}
+
+// Clone creates a deep copy of the header
+func (h *stdHeaders) Clone() (Headers, error) {
+ dst := NewHeaders()
+ if err := h.Copy(dst); err != nil {
+ return nil, fmt.Errorf(`failed to copy header: %w`, err)
+ }
+ return dst, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/headers_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/headers_gen.go
new file mode 100644
index 0000000000..8465eda2b1
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/headers_gen.go
@@ -0,0 +1,704 @@
+// Code generated by tools/cmd/genjws/main.go. DO NOT EDIT.
+
+package jws
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "sync"
+
+ "github.com/lestrrat-go/blackmagic"
+ "github.com/lestrrat-go/jwx/v3/cert"
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jwk"
+)
+
+const (
+ AlgorithmKey = "alg"
+ ContentTypeKey = "cty"
+ CriticalKey = "crit"
+ JWKKey = "jwk"
+ JWKSetURLKey = "jku"
+ KeyIDKey = "kid"
+ TypeKey = "typ"
+ X509CertChainKey = "x5c"
+ X509CertThumbprintKey = "x5t"
+ X509CertThumbprintS256Key = "x5t#S256"
+ X509URLKey = "x5u"
+)
+
+// Headers describe a standard JWS Header set. It is part of the JWS message
+// and is used to represet both Public or Protected headers, which in turn
+// can be found in each Signature object. If you are not sure how this works,
+// it is strongly recommended that you read RFC7515, especially the section
+// that describes the full JSON serialization format of JWS messages.
+//
+// In most cases, you likely want to use the protected headers, as this is part of the signed content.
+type Headers interface {
+ Algorithm() (jwa.SignatureAlgorithm, bool)
+ ContentType() (string, bool)
+ Critical() ([]string, bool)
+ JWK() (jwk.Key, bool)
+ JWKSetURL() (string, bool)
+ KeyID() (string, bool)
+ Type() (string, bool)
+ X509CertChain() (*cert.Chain, bool)
+ X509CertThumbprint() (string, bool)
+ X509CertThumbprintS256() (string, bool)
+ X509URL() (string, bool)
+ Copy(Headers) error
+ Merge(Headers) (Headers, error)
+ Clone() (Headers, error)
+ // Get is used to extract the value of any field, including non-standard fields, out of the header.
+ //
+ // The first argument is the name of the field. The second argument is a pointer
+ // to a variable that will receive the value of the field. The method returns
+ // an error if the field does not exist, or if the value cannot be assigned to
+ // the destination variable. Note that a field is considered to "exist" even if
+ // the value is empty-ish (e.g. 0, false, ""), as long as it is explicitly set.
+ Get(string, any) error
+ Set(string, any) error
+ Remove(string) error
+ // Has returns true if the specified header has a value, even if
+ // the value is empty-ish (e.g. 0, false, "") as long as it has been
+ // explicitly set.
+ Has(string) bool
+ Keys() []string
+}
+
+// stdHeaderNames is a list of all standard header names defined in the JWS specification.
+var stdHeaderNames = []string{AlgorithmKey, ContentTypeKey, CriticalKey, JWKKey, JWKSetURLKey, KeyIDKey, TypeKey, X509CertChainKey, X509CertThumbprintKey, X509CertThumbprintS256Key, X509URLKey}
+
+type stdHeaders struct {
+ algorithm *jwa.SignatureAlgorithm // https://tools.ietf.org/html/rfc7515#section-4.1.1
+ contentType *string // https://tools.ietf.org/html/rfc7515#section-4.1.10
+ critical []string // https://tools.ietf.org/html/rfc7515#section-4.1.11
+ jwk jwk.Key // https://tools.ietf.org/html/rfc7515#section-4.1.3
+ jwkSetURL *string // https://tools.ietf.org/html/rfc7515#section-4.1.2
+ keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4
+ typ *string // https://tools.ietf.org/html/rfc7515#section-4.1.9
+ x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6
+ x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7
+ x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8
+ x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5
+ privateParams map[string]any
+ mu *sync.RWMutex
+ dc DecodeCtx
+ raw []byte // stores the raw version of the header so it can be used later
+}
+
+func NewHeaders() Headers {
+ return &stdHeaders{
+ mu: &sync.RWMutex{},
+ }
+}
+
+func (h *stdHeaders) Algorithm() (jwa.SignatureAlgorithm, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ if h.algorithm == nil {
+ return jwa.EmptySignatureAlgorithm(), false
+ }
+ return *(h.algorithm), true
+}
+
+func (h *stdHeaders) ContentType() (string, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ if h.contentType == nil {
+ return "", false
+ }
+ return *(h.contentType), true
+}
+
+func (h *stdHeaders) Critical() ([]string, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ return h.critical, true
+}
+
+func (h *stdHeaders) JWK() (jwk.Key, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ return h.jwk, true
+}
+
+func (h *stdHeaders) JWKSetURL() (string, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ if h.jwkSetURL == nil {
+ return "", false
+ }
+ return *(h.jwkSetURL), true
+}
+
+func (h *stdHeaders) KeyID() (string, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ if h.keyID == nil {
+ return "", false
+ }
+ return *(h.keyID), true
+}
+
+func (h *stdHeaders) Type() (string, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ if h.typ == nil {
+ return "", false
+ }
+ return *(h.typ), true
+}
+
+func (h *stdHeaders) X509CertChain() (*cert.Chain, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ return h.x509CertChain, true
+}
+
+func (h *stdHeaders) X509CertThumbprint() (string, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ if h.x509CertThumbprint == nil {
+ return "", false
+ }
+ return *(h.x509CertThumbprint), true
+}
+
+func (h *stdHeaders) X509CertThumbprintS256() (string, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ if h.x509CertThumbprintS256 == nil {
+ return "", false
+ }
+ return *(h.x509CertThumbprintS256), true
+}
+
+func (h *stdHeaders) X509URL() (string, bool) {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ if h.x509URL == nil {
+ return "", false
+ }
+ return *(h.x509URL), true
+}
+
+func (h *stdHeaders) clear() {
+ h.algorithm = nil
+ h.contentType = nil
+ h.critical = nil
+ h.jwk = nil
+ h.jwkSetURL = nil
+ h.keyID = nil
+ h.typ = nil
+ h.x509CertChain = nil
+ h.x509CertThumbprint = nil
+ h.x509CertThumbprintS256 = nil
+ h.x509URL = nil
+ h.privateParams = nil
+ h.raw = nil
+}
+
+func (h *stdHeaders) DecodeCtx() DecodeCtx {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ return h.dc
+}
+
+func (h *stdHeaders) SetDecodeCtx(dc DecodeCtx) {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ h.dc = dc
+}
+
+func (h *stdHeaders) rawBuffer() []byte {
+ return h.raw
+}
+
+func (h *stdHeaders) PrivateParams() map[string]any {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ return h.privateParams
+}
+
+func (h *stdHeaders) Has(name string) bool {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ switch name {
+ case AlgorithmKey:
+ return h.algorithm != nil
+ case ContentTypeKey:
+ return h.contentType != nil
+ case CriticalKey:
+ return h.critical != nil
+ case JWKKey:
+ return h.jwk != nil
+ case JWKSetURLKey:
+ return h.jwkSetURL != nil
+ case KeyIDKey:
+ return h.keyID != nil
+ case TypeKey:
+ return h.typ != nil
+ case X509CertChainKey:
+ return h.x509CertChain != nil
+ case X509CertThumbprintKey:
+ return h.x509CertThumbprint != nil
+ case X509CertThumbprintS256Key:
+ return h.x509CertThumbprintS256 != nil
+ case X509URLKey:
+ return h.x509URL != nil
+ default:
+ _, ok := h.privateParams[name]
+ return ok
+ }
+}
+
+func (h *stdHeaders) Get(name string, dst any) error {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ switch name {
+ case AlgorithmKey:
+ if h.algorithm == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.algorithm)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case ContentTypeKey:
+ if h.contentType == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.contentType)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case CriticalKey:
+ if h.critical == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst,
+ h.critical); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case JWKKey:
+ if h.jwk == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst,
+ h.jwk); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case JWKSetURLKey:
+ if h.jwkSetURL == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.jwkSetURL)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case KeyIDKey:
+ if h.keyID == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.keyID)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case TypeKey:
+ if h.typ == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.typ)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertChainKey:
+ if h.x509CertChain == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst,
+ h.x509CertChain); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertThumbprintKey:
+ if h.x509CertThumbprint == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprint)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509CertThumbprintS256Key:
+ if h.x509CertThumbprintS256 == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprintS256)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ case X509URLKey:
+ if h.x509URL == nil {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(h.x509URL)); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ return nil
+ default:
+ v, ok := h.privateParams[name]
+ if !ok {
+ return fmt.Errorf(`field %q not found`, name)
+ }
+ if err := blackmagic.AssignIfCompatible(dst, v); err != nil {
+ return fmt.Errorf(`failed to assign value for field %q: %w`, name, err)
+ }
+ }
+ return nil
+}
+
+func (h *stdHeaders) Set(name string, value any) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ return h.setNoLock(name, value)
+}
+
+func (h *stdHeaders) setNoLock(name string, value any) error {
+ switch name {
+ case AlgorithmKey:
+ alg, err := jwa.KeyAlgorithmFrom(value)
+ if err != nil {
+ return fmt.Errorf("invalid value for %s key: %w", AlgorithmKey, err)
+ }
+ if salg, ok := alg.(jwa.SignatureAlgorithm); ok {
+ h.algorithm = &salg
+ return nil
+ }
+ return fmt.Errorf("expecte jwa.SignatureAlgorithm, received %T", alg)
+ case ContentTypeKey:
+ if v, ok := value.(string); ok {
+ h.contentType = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, ContentTypeKey, value)
+ case CriticalKey:
+ if v, ok := value.([]string); ok {
+ h.critical = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, CriticalKey, value)
+ case JWKKey:
+ if v, ok := value.(jwk.Key); ok {
+ h.jwk = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, JWKKey, value)
+ case JWKSetURLKey:
+ if v, ok := value.(string); ok {
+ h.jwkSetURL = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, JWKSetURLKey, value)
+ case KeyIDKey:
+ if v, ok := value.(string); ok {
+ h.keyID = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value)
+ case TypeKey:
+ if v, ok := value.(string); ok {
+ h.typ = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, TypeKey, value)
+ case X509CertChainKey:
+ if v, ok := value.(*cert.Chain); ok {
+ h.x509CertChain = v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value)
+ case X509CertThumbprintKey:
+ if v, ok := value.(string); ok {
+ h.x509CertThumbprint = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value)
+ case X509CertThumbprintS256Key:
+ if v, ok := value.(string); ok {
+ h.x509CertThumbprintS256 = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value)
+ case X509URLKey:
+ if v, ok := value.(string); ok {
+ h.x509URL = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value)
+ default:
+ if h.privateParams == nil {
+ h.privateParams = map[string]any{}
+ }
+ h.privateParams[name] = value
+ }
+ return nil
+}
+
+func (h *stdHeaders) Remove(key string) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ switch key {
+ case AlgorithmKey:
+ h.algorithm = nil
+ case ContentTypeKey:
+ h.contentType = nil
+ case CriticalKey:
+ h.critical = nil
+ case JWKKey:
+ h.jwk = nil
+ case JWKSetURLKey:
+ h.jwkSetURL = nil
+ case KeyIDKey:
+ h.keyID = nil
+ case TypeKey:
+ h.typ = nil
+ case X509CertChainKey:
+ h.x509CertChain = nil
+ case X509CertThumbprintKey:
+ h.x509CertThumbprint = nil
+ case X509CertThumbprintS256Key:
+ h.x509CertThumbprintS256 = nil
+ case X509URLKey:
+ h.x509URL = nil
+ default:
+ delete(h.privateParams, key)
+ }
+ return nil
+}
+
+func (h *stdHeaders) UnmarshalJSON(buf []byte) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ h.clear()
+ dec := json.NewDecoder(bytes.NewReader(buf))
+LOOP:
+ for {
+ tok, err := dec.Token()
+ if err != nil {
+ return fmt.Errorf(`error reading token: %w`, err)
+ }
+ switch tok := tok.(type) {
+ case json.Delim:
+ // Assuming we're doing everything correctly, we should ONLY
+ // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here.
+ if tok == tokens.CloseCurlyBracket { // End of object
+ break LOOP
+ } else if tok != tokens.OpenCurlyBracket {
+ return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok)
+ }
+ case string: // Objects can only have string keys
+ switch tok {
+ case AlgorithmKey:
+ var decoded jwa.SignatureAlgorithm
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+ }
+ h.algorithm = &decoded
+ case ContentTypeKey:
+ if err := json.AssignNextStringToken(&h.contentType, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, ContentTypeKey, err)
+ }
+ case CriticalKey:
+ var decoded []string
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, CriticalKey, err)
+ }
+ h.critical = decoded
+ case JWKKey:
+ var buf json.RawMessage
+ if err := dec.Decode(&buf); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, JWKKey, err)
+ }
+ key, err := jwk.ParseKey(buf)
+ if err != nil {
+ return fmt.Errorf(`failed to parse JWK for key %s: %w`, JWKKey, err)
+ }
+ h.jwk = key
+ case JWKSetURLKey:
+ if err := json.AssignNextStringToken(&h.jwkSetURL, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, JWKSetURLKey, err)
+ }
+ case KeyIDKey:
+ if err := json.AssignNextStringToken(&h.keyID, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err)
+ }
+ case TypeKey:
+ if err := json.AssignNextStringToken(&h.typ, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, TypeKey, err)
+ }
+ case X509CertChainKey:
+ var decoded cert.Chain
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err)
+ }
+ h.x509CertChain = &decoded
+ case X509CertThumbprintKey:
+ if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err)
+ }
+ case X509CertThumbprintS256Key:
+ if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err)
+ }
+ case X509URLKey:
+ if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err)
+ }
+ default:
+ decoded, err := registry.Decode(dec, tok)
+ if err != nil {
+ return err
+ }
+ h.setNoLock(tok, decoded)
+ }
+ default:
+ return fmt.Errorf(`invalid token %T`, tok)
+ }
+ }
+ h.raw = buf
+ return nil
+}
+
+func (h *stdHeaders) Keys() []string {
+ h.mu.RLock()
+ defer h.mu.RUnlock()
+ keys := make([]string, 0, 11+len(h.privateParams))
+ if h.algorithm != nil {
+ keys = append(keys, AlgorithmKey)
+ }
+ if h.contentType != nil {
+ keys = append(keys, ContentTypeKey)
+ }
+ if h.critical != nil {
+ keys = append(keys, CriticalKey)
+ }
+ if h.jwk != nil {
+ keys = append(keys, JWKKey)
+ }
+ if h.jwkSetURL != nil {
+ keys = append(keys, JWKSetURLKey)
+ }
+ if h.keyID != nil {
+ keys = append(keys, KeyIDKey)
+ }
+ if h.typ != nil {
+ keys = append(keys, TypeKey)
+ }
+ if h.x509CertChain != nil {
+ keys = append(keys, X509CertChainKey)
+ }
+ if h.x509CertThumbprint != nil {
+ keys = append(keys, X509CertThumbprintKey)
+ }
+ if h.x509CertThumbprintS256 != nil {
+ keys = append(keys, X509CertThumbprintS256Key)
+ }
+ if h.x509URL != nil {
+ keys = append(keys, X509URLKey)
+ }
+ for k := range h.privateParams {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+func (h stdHeaders) MarshalJSON() ([]byte, error) {
+ h.mu.RLock()
+ data := make(map[string]any)
+ keys := make([]string, 0, 11+len(h.privateParams))
+ if h.algorithm != nil {
+ data[AlgorithmKey] = *(h.algorithm)
+ keys = append(keys, AlgorithmKey)
+ }
+ if h.contentType != nil {
+ data[ContentTypeKey] = *(h.contentType)
+ keys = append(keys, ContentTypeKey)
+ }
+ if h.critical != nil {
+ data[CriticalKey] = h.critical
+ keys = append(keys, CriticalKey)
+ }
+ if h.jwk != nil {
+ data[JWKKey] = h.jwk
+ keys = append(keys, JWKKey)
+ }
+ if h.jwkSetURL != nil {
+ data[JWKSetURLKey] = *(h.jwkSetURL)
+ keys = append(keys, JWKSetURLKey)
+ }
+ if h.keyID != nil {
+ data[KeyIDKey] = *(h.keyID)
+ keys = append(keys, KeyIDKey)
+ }
+ if h.typ != nil {
+ data[TypeKey] = *(h.typ)
+ keys = append(keys, TypeKey)
+ }
+ if h.x509CertChain != nil {
+ data[X509CertChainKey] = h.x509CertChain
+ keys = append(keys, X509CertChainKey)
+ }
+ if h.x509CertThumbprint != nil {
+ data[X509CertThumbprintKey] = *(h.x509CertThumbprint)
+ keys = append(keys, X509CertThumbprintKey)
+ }
+ if h.x509CertThumbprintS256 != nil {
+ data[X509CertThumbprintS256Key] = *(h.x509CertThumbprintS256)
+ keys = append(keys, X509CertThumbprintS256Key)
+ }
+ if h.x509URL != nil {
+ data[X509URLKey] = *(h.x509URL)
+ keys = append(keys, X509URLKey)
+ }
+ for k, v := range h.privateParams {
+ data[k] = v
+ keys = append(keys, k)
+ }
+ h.mu.RUnlock()
+ sort.Strings(keys)
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+ enc := json.NewEncoder(buf)
+ buf.WriteByte(tokens.OpenCurlyBracket)
+ for i, k := range keys {
+ if i > 0 {
+ buf.WriteRune(tokens.Comma)
+ }
+ buf.WriteRune(tokens.DoubleQuote)
+ buf.WriteString(k)
+ buf.WriteString(`":`)
+ switch v := data[k].(type) {
+ case []byte:
+ buf.WriteRune(tokens.DoubleQuote)
+ buf.WriteString(base64.EncodeToString(v))
+ buf.WriteRune(tokens.DoubleQuote)
+ default:
+ if err := enc.Encode(v); err != nil {
+ return nil, fmt.Errorf(`failed to encode value for field %s: %w`, k, err)
+ }
+ buf.Truncate(buf.Len() - 1)
+ }
+ }
+ buf.WriteByte(tokens.CloseCurlyBracket)
+ ret := make([]byte, buf.Len())
+ copy(ret, buf.Bytes())
+ return ret, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/interface.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/interface.go
new file mode 100644
index 0000000000..e3ad296844
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/interface.go
@@ -0,0 +1,80 @@
+package jws
+
+import (
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/jws/legacy"
+)
+
+type Signer = legacy.Signer
+type Verifier = legacy.Verifier
+type HMACSigner = legacy.HMACSigner
+type HMACVerifier = legacy.HMACVerifier
+
+// Base64Encoder is an interface that can be used when encoding JWS message
+// components to base64. This is useful when you want to use a non-standard
+// base64 encoder while generating or verifying signatures. By default JWS
+// uses raw url base64 encoding (without padding), but there are apparently
+// some cases where you may want to use a base64 encoders that uses padding.
+//
+// For example, apparently AWS ALB User Claims is provided in JWT format,
+// but it uses a base64 encoding with padding.
+type Base64Encoder = base64.Encoder
+
+type DecodeCtx interface {
+ CollectRaw() bool
+}
+
+// Message represents a full JWS encoded message. Flattened serialization
+// is not supported as a struct, but rather it's represented as a
+// Message struct with only one `signature` element.
+//
+// Do not expect to use the Message object to verify or construct a
+// signed payload with. You should only use this when you want to actually
+// programmatically view the contents of the full JWS payload.
+//
+// As of this version, there is one big incompatibility when using Message
+// objects to convert between compact and JSON representations.
+// The protected header is sometimes encoded differently from the original
+// message and the JSON serialization that we use in Go.
+//
+// For example, the protected header `eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9`
+// decodes to
+//
+// {"typ":"JWT",
+// "alg":"HS256"}
+//
+// However, when we parse this into a message, we create a jws.Header object,
+// which, when we marshal into a JSON object again, becomes
+//
+// {"typ":"JWT","alg":"HS256"}
+//
+// Notice that serialization lacks a line break and a space between `"JWT",`
+// and `"alg"`. This causes a problem when verifying the signatures AFTER
+// a compact JWS message has been unmarshaled into a jws.Message.
+//
+// jws.Verify() doesn't go through this step, and therefore this does not
+// manifest itself. However, you may see this discrepancy when you manually
+// go through these conversions, and/or use the `jwx` tool like so:
+//
+// jwx jws parse message.jws | jwx jws verify --key somekey.jwk --stdin
+//
+// In this scenario, the first `jwx jws parse` outputs a parsed jws.Message
+// which is marshaled into JSON. At this point the message's protected
+// headers and the signatures don't match.
+//
+// To sign and verify, use the appropriate `Sign()` and `Verify()` functions.
+type Message struct {
+ dc DecodeCtx
+ payload []byte
+ signatures []*Signature
+ b64 bool // true if payload should be base64 encoded
+}
+
+type Signature struct {
+ encoder Base64Encoder
+ dc DecodeCtx
+ headers Headers // Unprotected Headers
+ protected Headers // Protected Headers
+ signature []byte // Signature
+ detached bool
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/internal/keytype/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jws/internal/keytype/BUILD.bazel
new file mode 100644
index 0000000000..eb8bd94acb
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/internal/keytype/BUILD.bazel
@@ -0,0 +1,11 @@
+load("@rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "keytype",
+ srcs = ["keytype.go"],
+ importpath = "github.com/lestrrat-go/jwx/v3/jws/internal/keytype",
+ visibility = ["//jws:__subpackages__"],
+ deps = [
+ "//jwk",
+ ],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/internal/keytype/keytype.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/internal/keytype/keytype.go
new file mode 100644
index 0000000000..6b57ed10d8
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/internal/keytype/keytype.go
@@ -0,0 +1,57 @@
+package keytype
+
+import (
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+
+ "github.com/lestrrat-go/jwx/v3/jwk"
+)
+
+// Because the keys defined in github.com/lestrrat-go/jwx/jwk may also implement
+// crypto.Signer, it would be possible for to mix up key types when signing/verifying
+// for example, when we specify jws.WithKey(jwa.RSA256, cryptoSigner), the cryptoSigner
+// can be for RSA, or any other type that implements crypto.Signer... even if it's for the
+// wrong algorithm.
+//
+// These functions are there to differentiate between the valid KNOWN key types.
+// For any other key type that is outside of the Go std library and our own code,
+// we must rely on the user to be vigilant.
+//
+// Notes: symmetric keys are obviously not part of this. for v2 OKP keys,
+// x25519 does not implement Sign()
+func IsValidRSAKey(key any) bool {
+ switch key.(type) {
+ case
+ ecdsa.PrivateKey, *ecdsa.PrivateKey,
+ ed25519.PrivateKey,
+ jwk.ECDSAPrivateKey, jwk.OKPPrivateKey:
+ // these are NOT ok
+ return false
+ }
+ return true
+}
+
+func IsValidECDSAKey(key any) bool {
+ switch key.(type) {
+ case
+ ed25519.PrivateKey,
+ rsa.PrivateKey, *rsa.PrivateKey,
+ jwk.RSAPrivateKey, jwk.OKPPrivateKey:
+ // these are NOT ok
+ return false
+ }
+ return true
+}
+
+func IsValidEDDSAKey(key any) bool {
+ switch key.(type) {
+ case
+ ecdsa.PrivateKey, *ecdsa.PrivateKey,
+ rsa.PrivateKey, *rsa.PrivateKey,
+ jwk.RSAPrivateKey, jwk.ECDSAPrivateKey:
+ // these are NOT ok
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/io.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/io.go
new file mode 100644
index 0000000000..77a084cfda
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/io.go
@@ -0,0 +1,36 @@
+// Code generated by tools/cmd/genreadfile/main.go. DO NOT EDIT.
+
+package jws
+
+import (
+ "fmt"
+ "io/fs"
+ "os"
+)
+
+type sysFS struct{}
+
+func (sysFS) Open(path string) (fs.File, error) {
+ return os.Open(path)
+}
+
+func ReadFile(path string, options ...ReadFileOption) (*Message, error) {
+
+ var srcFS fs.FS = sysFS{}
+ for _, option := range options {
+ switch option.Ident() {
+ case identFS{}:
+ if err := option.Value(&srcFS); err != nil {
+ return nil, fmt.Errorf("failed to set fs.FS: %w", err)
+ }
+ }
+ }
+
+ f, err := srcFS.Open(path)
+ if err != nil {
+ return nil, err
+ }
+
+ defer f.Close()
+ return ParseReader(f)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jws.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jws.go
new file mode 100644
index 0000000000..1fa77438b9
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jws.go
@@ -0,0 +1,662 @@
+//go:generate ../tools/cmd/genjws.sh
+
+// Package jws implements the digital signature on JSON based data
+// structures as described in https://tools.ietf.org/html/rfc7515
+//
+// If you do not care about the details, the only things that you
+// would need to use are the following functions:
+//
+// jws.Sign(payload, jws.WithKey(algorithm, key))
+// jws.Verify(serialized, jws.WithKey(algorithm, key))
+//
+// To sign, simply use `jws.Sign`. `payload` is a []byte buffer that
+// contains whatever data you want to sign. `alg` is one of the
+// jwa.SignatureAlgorithm constants from package jwa. For RSA and
+// ECDSA family of algorithms, you will need to prepare a private key.
+// For HMAC family, you just need a []byte value. The `jws.Sign`
+// function will return the encoded JWS message on success.
+//
+// To verify, use `jws.Verify`. It will parse the `encodedjws` buffer
+// and verify the result using `algorithm` and `key`. Upon successful
+// verification, the original payload is returned, so you can work on it.
+//
+// As a sidenote, consider using github.com/lestrrat-go/htmsig if you
+// looking for HTTP Message Signatures (RFC9421) -- it uses the same
+// underlying signing/verification mechanisms as this module.
+package jws
+
+import (
+ "bufio"
+ "crypto/ecdh"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/internal/jwxio"
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jwk"
+ "github.com/lestrrat-go/jwx/v3/jws/jwsbb"
+)
+
+var registry = json.NewRegistry()
+
+var signers = make(map[jwa.SignatureAlgorithm]Signer)
+var muSigner = &sync.Mutex{}
+
+func removeSigner(alg jwa.SignatureAlgorithm) {
+ muSigner.Lock()
+ defer muSigner.Unlock()
+ delete(signers, alg)
+}
+
+type defaultSigner struct {
+ alg jwa.SignatureAlgorithm
+}
+
+func (s defaultSigner) Algorithm() jwa.SignatureAlgorithm {
+ return s.alg
+}
+
+func (s defaultSigner) Sign(key any, payload []byte) ([]byte, error) {
+ return jwsbb.Sign(key, s.alg.String(), payload, nil)
+}
+
+type signerAdapter struct {
+ signer Signer
+}
+
+func (s signerAdapter) Algorithm() jwa.SignatureAlgorithm {
+ return s.signer.Algorithm()
+}
+
+func (s signerAdapter) Sign(key any, payload []byte) ([]byte, error) {
+ return s.signer.Sign(payload, key)
+}
+
+const (
+ fmtInvalid = 1 << iota
+ fmtCompact
+ fmtJSON
+ fmtJSONPretty
+ fmtMax
+)
+
+// silence linters
+var _ = fmtInvalid
+var _ = fmtMax
+
+func validateKeyBeforeUse(key any) error {
+ jwkKey, ok := key.(jwk.Key)
+ if !ok {
+ converted, err := jwk.Import(key)
+ if err != nil {
+ return fmt.Errorf(`could not convert key of type %T to jwk.Key for validation: %w`, key, err)
+ }
+ jwkKey = converted
+ }
+ return jwkKey.Validate()
+}
+
+// Sign generates a JWS message for the given payload and returns
+// it in serialized form, which can be in either compact or
+// JSON format. Default is compact.
+//
+// You must pass at least one key to `jws.Sign()` by using `jws.WithKey()`
+// option.
+//
+// jws.Sign(payload, jws.WithKey(alg, key))
+// jws.Sign(payload, jws.WithJSON(), jws.WithKey(alg1, key1), jws.WithKey(alg2, key2))
+//
+// Note that in the second example the `jws.WithJSON()` option is
+// specified as well. This is because the compact serialization
+// format does not support multiple signatures, and users must
+// specifically ask for the JSON serialization format.
+//
+// Read the documentation for `jws.WithKey()` to learn more about the
+// possible values that can be used for `alg` and `key`.
+//
+// You may create JWS messages with the "none" (jwa.NoSignature) algorithm
+// if you use the `jws.WithInsecureNoSignature()` option. This option
+// can be combined with one or more signature keys, as well as the
+// `jws.WithJSON()` option to generate multiple signatures (though
+// the usefulness of such constructs is highly debatable)
+//
+// Note that this library does not allow you to successfully call `jws.Verify()` on
+// signatures with the "none" algorithm. To parse these, use `jws.Parse()` instead.
+//
+// If you want to use a detached payload, use `jws.WithDetachedPayload()` as
+// one of the options. When you use this option, you must always set the
+// first parameter (`payload`) to `nil`, or the function will return an error
+//
+// You may also want to look at how to pass protected headers to the
+// signing process, as you will likely be required to set the `b64` field
+// when using detached payload.
+//
+// Look for options that return `jws.SignOption` or `jws.SignVerifyOption`
+// for a complete list of options that can be passed to this function.
+//
+// You can use `errors.Is` with `jws.SignError()` to check if an error is from this function.
+func Sign(payload []byte, options ...SignOption) ([]byte, error) {
+ sc := signContextPool.Get()
+ defer signContextPool.Put(sc)
+
+ sc.payload = payload
+
+ if err := sc.ProcessOptions(options); err != nil {
+ return nil, signerr(`failed to process options: %w`, err)
+ }
+
+ lsigner := len(sc.sigbuilders)
+ if lsigner == 0 {
+ return nil, signerr(`no signers available. Specify an algorithm and a key using jws.WithKey()`)
+ }
+
+ // Design note: while we could have easily set format = fmtJSON when
+ // lsigner > 1, I believe the decision to change serialization formats
+ // must be explicitly stated by the caller. Otherwise, I'm pretty sure
+ // there would be people filing issues saying "I get JSON when I expected
+ // compact serialization".
+ //
+ // Therefore, instead of making implicit format conversions, we force the
+ // user to spell it out as `jws.Sign(..., jws.WithJSON(), jws.WithKey(...), jws.WithKey(...))`
+ if sc.format == fmtCompact && lsigner != 1 {
+ return nil, signerr(`cannot have multiple signers (keys) specified for compact serialization. Use only one jws.WithKey()`)
+ }
+
+ // Create a Message object with all the bits and bobs, and we'll
+ // serialize it in the end
+ var result Message
+
+ if err := sc.PopulateMessage(&result); err != nil {
+ return nil, signerr(`failed to populate message: %w`, err)
+ }
+ switch sc.format {
+ case fmtJSON:
+ return json.Marshal(result)
+ case fmtJSONPretty:
+ return json.MarshalIndent(result, "", " ")
+ case fmtCompact:
+ // Take the only signature object, and convert it into a Compact
+ // serialization format
+ var compactOpts []CompactOption
+ if sc.detached {
+ compactOpts = append(compactOpts, WithDetached(true))
+ }
+ for _, option := range options {
+ if copt, ok := option.(CompactOption); ok {
+ compactOpts = append(compactOpts, copt)
+ }
+ }
+ return Compact(&result, compactOpts...)
+ default:
+ return nil, signerr(`invalid serialization format`)
+ }
+}
+
+var allowNoneWhitelist = jwk.WhitelistFunc(func(string) bool {
+ return false
+})
+
+// Verify checks if the given JWS message is verifiable using `alg` and `key`.
+// `key` may be a "raw" key (e.g. rsa.PublicKey) or a jwk.Key
+//
+// If the verification is successful, `err` is nil, and the content of the
+// payload that was signed is returned. If you need more fine-grained
+// control of the verification process, manually generate a
+// `Verifier` in `verify` subpackage, and call `Verify` method on it.
+// If you need to access signatures and JOSE headers in a JWS message,
+// use `Parse` function to get `Message` object.
+//
+// Because the use of "none" (jwa.NoSignature) algorithm is strongly discouraged,
+// this function DOES NOT consider it a success when `{"alg":"none"}` is
+// encountered in the message (it would also be counterintuitive when the code says
+// it _verified_ something when in fact it did no such thing). If you want to
+// accept messages with "none" signature algorithm, use `jws.Parse` to get the
+// raw JWS message.
+//
+// The error returned by this function is of type can be checked against
+// `jws.VerifyError()` and `jws.VerificationError()`. The latter is returned
+// when the verification process itself fails (e.g. invalid signature, wrong key),
+// while the former is returned when any other part of the `jws.Verify()`
+// function fails.
+func Verify(buf []byte, options ...VerifyOption) ([]byte, error) {
+ vc := verifyContextPool.Get()
+ defer verifyContextPool.Put(vc)
+
+ if err := vc.ProcessOptions(options); err != nil {
+ return nil, verifyerr(`failed to process options: %w`, err)
+ }
+
+ return vc.VerifyMessage(buf)
+}
+
+// get the value of b64 header field.
+// If the field does not exist, returns true (default)
+// Otherwise return the value specified by the header field.
+func getB64Value(hdr Headers) bool {
+ var b64 bool
+ if err := hdr.Get("b64", &b64); err != nil {
+ return true // default
+ }
+
+ return b64
+}
+
+// Parse parses contents from the given source and creates a jws.Message
+// struct. By default the input can be in either compact or full JSON serialization.
+//
+// You may pass `jws.WithJSON()` and/or `jws.WithCompact()` to specify
+// explicitly which format to use. If neither or both is specified, the function
+// will attempt to autodetect the format. If one or the other is specified,
+// only the specified format will be attempted.
+//
+// On error, returns a jws.ParseError.
+func Parse(src []byte, options ...ParseOption) (*Message, error) {
+ var formats int
+ for _, option := range options {
+ switch option.Ident() {
+ case identSerialization{}:
+ var v int
+ if err := option.Value(&v); err != nil {
+ return nil, parseerr(`failed to retrieve serialization option value: %w`, err)
+ }
+ switch v {
+ case fmtJSON:
+ formats |= fmtJSON
+ case fmtCompact:
+ formats |= fmtCompact
+ }
+ }
+ }
+
+ // if format is 0 or both JSON/Compact, auto detect
+ if v := formats & (fmtJSON | fmtCompact); v == 0 || v == fmtJSON|fmtCompact {
+ CHECKLOOP:
+ for i := range src {
+ r := rune(src[i])
+ if r >= utf8.RuneSelf {
+ r, _ = utf8.DecodeRune(src)
+ }
+ if !unicode.IsSpace(r) {
+ if r == tokens.OpenCurlyBracket {
+ formats = fmtJSON
+ } else {
+ formats = fmtCompact
+ }
+ break CHECKLOOP
+ }
+ }
+ }
+
+ if formats&fmtCompact == fmtCompact {
+ msg, err := parseCompact(src)
+ if err != nil {
+ return nil, parseerr(`failed to parse compact format: %w`, err)
+ }
+ return msg, nil
+ } else if formats&fmtJSON == fmtJSON {
+ msg, err := parseJSON(src)
+ if err != nil {
+ return nil, parseerr(`failed to parse JSON format: %w`, err)
+ }
+ return msg, nil
+ }
+
+ return nil, parseerr(`invalid byte sequence`)
+}
+
+// ParseString parses contents from the given source and creates a jws.Message
+// struct. The input can be in either compact or full JSON serialization.
+//
+// On error, returns a jws.ParseError.
+func ParseString(src string) (*Message, error) {
+ msg, err := Parse([]byte(src))
+ if err != nil {
+ return nil, sparseerr(`failed to parse string: %w`, err)
+ }
+ return msg, nil
+}
+
+// ParseReader parses contents from the given source and creates a jws.Message
+// struct. The input can be in either compact or full JSON serialization.
+//
+// On error, returns a jws.ParseError.
+func ParseReader(src io.Reader) (*Message, error) {
+ data, err := jwxio.ReadAllFromFiniteSource(src)
+ if err == nil {
+ return Parse(data)
+ }
+
+ if !errors.Is(err, jwxio.NonFiniteSourceError()) {
+ return nil, rparseerr(`failed to read from finite source: %w`, err)
+ }
+
+ rdr := bufio.NewReader(src)
+ var first rune
+ for {
+ r, _, err := rdr.ReadRune()
+ if err != nil {
+ return nil, rparseerr(`failed to read rune: %w`, err)
+ }
+ if !unicode.IsSpace(r) {
+ first = r
+ if err := rdr.UnreadRune(); err != nil {
+ return nil, rparseerr(`failed to unread rune: %w`, err)
+ }
+
+ break
+ }
+ }
+
+ var parser func(io.Reader) (*Message, error)
+ if first == tokens.OpenCurlyBracket {
+ parser = parseJSONReader
+ } else {
+ parser = parseCompactReader
+ }
+
+ m, err := parser(rdr)
+ if err != nil {
+ return nil, rparseerr(`failed to parse reader: %w`, err)
+ }
+
+ return m, nil
+}
+
+func parseJSONReader(src io.Reader) (result *Message, err error) {
+ var m Message
+ if err := json.NewDecoder(src).Decode(&m); err != nil {
+ return nil, fmt.Errorf(`failed to unmarshal jws message: %w`, err)
+ }
+ return &m, nil
+}
+
+func parseJSON(data []byte) (result *Message, err error) {
+ var m Message
+ if err := json.Unmarshal(data, &m); err != nil {
+ return nil, fmt.Errorf(`failed to unmarshal jws message: %w`, err)
+ }
+ return &m, nil
+}
+
+// SplitCompact splits a JWS in compact format and returns its three parts
+// separately: protected headers, payload and signature.
+// On error, returns a jws.ParseError.
+//
+// This function will be deprecated in v4. It is a low-level API, and
+// thus will be available in the `jwsbb` package.
+func SplitCompact(src []byte) ([]byte, []byte, []byte, error) {
+ hdr, payload, signature, err := jwsbb.SplitCompact(src)
+ if err != nil {
+ return nil, nil, nil, parseerr(`%w`, err)
+ }
+ return hdr, payload, signature, nil
+}
+
+// SplitCompactString splits a JWT and returns its three parts
+// separately: protected headers, payload and signature.
+// On error, returns a jws.ParseError.
+//
+// This function will be deprecated in v4. It is a low-level API, and
+// thus will be available in the `jwsbb` package.
+func SplitCompactString(src string) ([]byte, []byte, []byte, error) {
+ hdr, payload, signature, err := jwsbb.SplitCompactString(src)
+ if err != nil {
+ return nil, nil, nil, parseerr(`%w`, err)
+ }
+ return hdr, payload, signature, nil
+}
+
+// SplitCompactReader splits a JWT and returns its three parts
+// separately: protected headers, payload and signature.
+// On error, returns a jws.ParseError.
+//
+// This function will be deprecated in v4. It is a low-level API, and
+// thus will be available in the `jwsbb` package.
+func SplitCompactReader(rdr io.Reader) ([]byte, []byte, []byte, error) {
+ hdr, payload, signature, err := jwsbb.SplitCompactReader(rdr)
+ if err != nil {
+ return nil, nil, nil, parseerr(`%w`, err)
+ }
+ return hdr, payload, signature, nil
+}
+
+// parseCompactReader parses a JWS value serialized via compact serialization.
+func parseCompactReader(rdr io.Reader) (m *Message, err error) {
+ protected, payload, signature, err := SplitCompactReader(rdr)
+ if err != nil {
+ return nil, fmt.Errorf(`invalid compact serialization format: %w`, err)
+ }
+ return parse(protected, payload, signature)
+}
+
+func parseCompact(data []byte) (m *Message, err error) {
+ protected, payload, signature, err := SplitCompact(data)
+ if err != nil {
+ return nil, fmt.Errorf(`invalid compact serialization format: %w`, err)
+ }
+ return parse(protected, payload, signature)
+}
+
+func parse(protected, payload, signature []byte) (*Message, error) {
+ decodedHeader, err := base64.Decode(protected)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to decode protected headers: %w`, err)
+ }
+
+ hdr := NewHeaders()
+ if err := json.Unmarshal(decodedHeader, hdr); err != nil {
+ return nil, fmt.Errorf(`failed to parse JOSE headers: %w`, err)
+ }
+
+ var decodedPayload []byte
+ b64 := getB64Value(hdr)
+ if !b64 {
+ decodedPayload = payload
+ } else {
+ v, err := base64.Decode(payload)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to decode payload: %w`, err)
+ }
+ decodedPayload = v
+ }
+
+ decodedSignature, err := base64.Decode(signature)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to decode signature: %w`, err)
+ }
+
+ var msg Message
+ msg.payload = decodedPayload
+ msg.signatures = append(msg.signatures, &Signature{
+ protected: hdr,
+ signature: decodedSignature,
+ })
+ msg.b64 = b64
+ return &msg, nil
+}
+
+type CustomDecoder = json.CustomDecoder
+type CustomDecodeFunc = json.CustomDecodeFunc
+
+// RegisterCustomField allows users to specify that a private field
+// be decoded as an instance of the specified type. This option has
+// a global effect.
+//
+// For example, suppose you have a custom field `x-birthday`, which
+// you want to represent as a string formatted in RFC3339 in JSON,
+// but want it back as `time.Time`.
+//
+// In such case you would register a custom field as follows
+//
+// jws.RegisterCustomField(`x-birthday`, time.Time{})
+//
+// Then you can use a `time.Time` variable to extract the value
+// of `x-birthday` field, instead of having to use `any`
+// and later convert it to `time.Time`
+//
+// var bday time.Time
+// _ = hdr.Get(`x-birthday`, &bday)
+//
+// If you need a more fine-tuned control over the decoding process,
+// you can register a `CustomDecoder`. For example, below shows
+// how to register a decoder that can parse RFC1123 format string:
+//
+// jws.RegisterCustomField(`x-birthday`, jws.CustomDecodeFunc(func(data []byte) (any, error) {
+// return time.Parse(time.RFC1123, string(data))
+// }))
+//
+// Please note that use of custom fields can be problematic if you
+// are using a library that does not implement MarshalJSON/UnmarshalJSON
+// and you try to roundtrip from an object to JSON, and then back to an object.
+// For example, in the above example, you can _parse_ time values formatted
+// in the format specified in RFC822, but when you convert an object into
+// JSON, it will be formatted in RFC3339, because that's what `time.Time`
+// likes to do. To avoid this, it's always better to use a custom type
+// that wraps your desired type (in this case `time.Time`) and implement
+// MarshalJSON and UnmashalJSON.
+func RegisterCustomField(name string, object any) {
+ registry.Register(name, object)
+}
+
+// Helpers for signature verification
+var rawKeyToKeyType = make(map[reflect.Type]jwa.KeyType)
+var keyTypeToAlgorithms = make(map[jwa.KeyType][]jwa.SignatureAlgorithm)
+
+func init() {
+ rawKeyToKeyType[reflect.TypeOf([]byte(nil))] = jwa.OctetSeq()
+ rawKeyToKeyType[reflect.TypeOf(ed25519.PublicKey(nil))] = jwa.OKP()
+ rawKeyToKeyType[reflect.TypeOf(rsa.PublicKey{})] = jwa.RSA()
+ rawKeyToKeyType[reflect.TypeOf((*rsa.PublicKey)(nil))] = jwa.RSA()
+ rawKeyToKeyType[reflect.TypeOf(ecdsa.PublicKey{})] = jwa.EC()
+ rawKeyToKeyType[reflect.TypeOf((*ecdsa.PublicKey)(nil))] = jwa.EC()
+
+ addAlgorithmForKeyType(jwa.OKP(), jwa.EdDSA())
+ for _, alg := range []jwa.SignatureAlgorithm{jwa.HS256(), jwa.HS384(), jwa.HS512()} {
+ addAlgorithmForKeyType(jwa.OctetSeq(), alg)
+ }
+ for _, alg := range []jwa.SignatureAlgorithm{jwa.RS256(), jwa.RS384(), jwa.RS512(), jwa.PS256(), jwa.PS384(), jwa.PS512()} {
+ addAlgorithmForKeyType(jwa.RSA(), alg)
+ }
+ for _, alg := range []jwa.SignatureAlgorithm{jwa.ES256(), jwa.ES384(), jwa.ES512()} {
+ addAlgorithmForKeyType(jwa.EC(), alg)
+ }
+}
+
+func addAlgorithmForKeyType(kty jwa.KeyType, alg jwa.SignatureAlgorithm) {
+ keyTypeToAlgorithms[kty] = append(keyTypeToAlgorithms[kty], alg)
+}
+
+// AlgorithmsForKey returns the possible signature algorithms that can
+// be used for a given key. It only takes in consideration keys/algorithms
+// for verification purposes, as this is the only usage where one may need
+// dynamically figure out which method to use.
+func AlgorithmsForKey(key any) ([]jwa.SignatureAlgorithm, error) {
+ var kty jwa.KeyType
+ switch key := key.(type) {
+ case jwk.Key:
+ kty = key.KeyType()
+ case rsa.PublicKey, *rsa.PublicKey, rsa.PrivateKey, *rsa.PrivateKey:
+ kty = jwa.RSA()
+ case ecdsa.PublicKey, *ecdsa.PublicKey, ecdsa.PrivateKey, *ecdsa.PrivateKey:
+ kty = jwa.EC()
+ case ed25519.PublicKey, ed25519.PrivateKey, *ecdh.PublicKey, ecdh.PublicKey, *ecdh.PrivateKey, ecdh.PrivateKey:
+ kty = jwa.OKP()
+ case []byte:
+ kty = jwa.OctetSeq()
+ default:
+ return nil, fmt.Errorf(`unknown key type %T`, key)
+ }
+
+ algs, ok := keyTypeToAlgorithms[kty]
+ if !ok {
+ return nil, fmt.Errorf(`unregistered key type %q`, kty)
+ }
+ return algs, nil
+}
+
+func Settings(options ...GlobalOption) {
+ for _, option := range options {
+ switch option.Ident() {
+ case identLegacySigners{}:
+ enableLegacySigners()
+ }
+ }
+}
+
+// VerifyCompactFast is a fast path verification function for JWS messages
+// in compact serialization format.
+//
+// This function is considered experimental, and may change or be removed
+// in the future.
+//
+// VerifyCompactFast performs signature verification on a JWS compact
+// serialization without fully parsing the message into a jws.Message object.
+// This makes it more efficient for cases where you only need to verify
+// the signature and extract the payload, without needing access to headers
+// or other JWS metadata.
+//
+// Returns the original payload that was signed if verification succeeds.
+//
+// Unlike jws.Verify(), this function requires you to specify the
+// algorithm explicitly rather than extracting it from the JWS headers.
+// This can be useful for performance-critical applications where the
+// algorithm is known in advance.
+//
+// Since this function avoids doing many checks that jws.Verify would perform,
+// you must ensure to perform the necessary checks including ensuring that algorithm is safe to use for your payload yourself.
+func VerifyCompactFast(key any, compact []byte, alg jwa.SignatureAlgorithm) ([]byte, error) {
+ algstr := alg.String()
+
+ // Split the serialized JWT into its components
+ hdr, payload, encodedSig, err := jwsbb.SplitCompact(compact)
+ if err != nil {
+ return nil, fmt.Errorf("jwt.verifyFast: failed to split compact: %w", err)
+ }
+
+ signature, err := base64.Decode(encodedSig)
+ if err != nil {
+ return nil, fmt.Errorf("jwt.verifyFast: failed to decode signature: %w", err)
+ }
+
+ // Instead of appending, copy the data from hdr/payload
+ lvb := len(hdr) + 1 + len(payload)
+ verifyBuf := pool.ByteSlice().GetCapacity(lvb)
+ verifyBuf = verifyBuf[:lvb]
+ copy(verifyBuf, hdr)
+ verifyBuf[len(hdr)] = tokens.Period
+ copy(verifyBuf[len(hdr)+1:], payload)
+ defer pool.ByteSlice().Put(verifyBuf)
+
+ // Verify the signature
+ if verifier2, err := VerifierFor(alg); err == nil {
+ if err := verifier2.Verify(key, verifyBuf, signature); err != nil {
+ return nil, verifyError{verificationError{fmt.Errorf("jwt.VerifyCompact: signature verification failed for %s: %w", algstr, err)}}
+ }
+ } else {
+ legacyVerifier, err := NewVerifier(alg)
+ if err != nil {
+ return nil, verifyerr("jwt.VerifyCompact: failed to create verifier for %s: %w", algstr, err)
+ }
+ if err := legacyVerifier.Verify(verifyBuf, signature, key); err != nil {
+ return nil, verifyError{verificationError{fmt.Errorf("jwt.VerifyCompact: signature verification failed for %s: %w", algstr, err)}}
+ }
+ }
+
+ decoded, err := base64.Decode(payload)
+ if err != nil {
+ return nil, verifyerr("jwt.VerifyCompact: failed to decode payload: %w", err)
+ }
+ return decoded, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/BUILD.bazel
new file mode 100644
index 0000000000..0799e81110
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/BUILD.bazel
@@ -0,0 +1,38 @@
+load("@rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "jwsbb",
+ srcs = [
+ "crypto_signer.go",
+ "ecdsa.go",
+ "eddsa.go",
+ "format.go",
+ "hmac.go",
+ "jwsbb.go",
+ "rsa.go",
+ "sign.go",
+ "verify.go",
+ ],
+ importpath = "github.com/lestrrat-go/jwx/v3/jws/jwsbb",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//internal/base64",
+ "//internal/ecutil",
+ "//internal/jwxio",
+ "//internal/keyconv",
+ "//internal/pool",
+ "//internal/tokens",
+ "//jws/internal/keytype",
+ "@com_github_lestrrat_go_dsig//:dsig",
+ ],
+)
+
+go_test(
+ name = "jwsbb_test",
+ srcs = ["jwsbb_test.go"],
+ embed = [":jwsbb"],
+ deps = [
+ "//internal/base64",
+ "@com_github_stretchr_testify//require",
+ ],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/crypto_signer.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/crypto_signer.go
new file mode 100644
index 0000000000..bd6132a4c5
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/crypto_signer.go
@@ -0,0 +1,45 @@
+package jwsbb
+
+import (
+ "crypto"
+ "crypto/rand"
+ "fmt"
+ "io"
+)
+
+// cryptosign is a low-level function that signs a payload using a crypto.Signer.
+// If hash is crypto.Hash(0), the payload is signed directly without hashing.
+// Otherwise, the payload is hashed using the specified hash function before signing.
+//
+// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader.
+func cryptosign(signer crypto.Signer, payload []byte, hash crypto.Hash, opts crypto.SignerOpts, rr io.Reader) ([]byte, error) {
+ if rr == nil {
+ rr = rand.Reader
+ }
+
+ var digest []byte
+ if hash == crypto.Hash(0) {
+ digest = payload
+ } else {
+ h := hash.New()
+ if _, err := h.Write(payload); err != nil {
+ return nil, fmt.Errorf(`failed to write payload to hash: %w`, err)
+ }
+ digest = h.Sum(nil)
+ }
+ return signer.Sign(rr, digest, opts)
+}
+
+// SignCryptoSigner generates a signature using a crypto.Signer interface.
+// This function can be used for hardware security modules, smart cards,
+// and other implementations of the crypto.Signer interface.
+//
+// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader.
+//
+// Returns the signature bytes or an error if signing fails.
+func SignCryptoSigner(signer crypto.Signer, raw []byte, h crypto.Hash, opts crypto.SignerOpts, rr io.Reader) ([]byte, error) {
+ if signer == nil {
+ return nil, fmt.Errorf("jwsbb.SignCryptoSignerRaw: signer is nil")
+ }
+ return cryptosign(signer, raw, h, opts, rr)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/ecdsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/ecdsa.go
new file mode 100644
index 0000000000..1eb492ee7b
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/ecdsa.go
@@ -0,0 +1,179 @@
+package jwsbb
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "encoding/asn1"
+ "fmt"
+ "io"
+ "math/big"
+
+ "github.com/lestrrat-go/dsig"
+ "github.com/lestrrat-go/jwx/v3/internal/ecutil"
+)
+
+// ecdsaHashToDsigAlgorithm maps ECDSA hash functions to dsig algorithm constants
+func ecdsaHashToDsigAlgorithm(h crypto.Hash) (string, error) {
+ switch h {
+ case crypto.SHA256:
+ return dsig.ECDSAWithP256AndSHA256, nil
+ case crypto.SHA384:
+ return dsig.ECDSAWithP384AndSHA384, nil
+ case crypto.SHA512:
+ return dsig.ECDSAWithP521AndSHA512, nil
+ default:
+ return "", fmt.Errorf("unsupported ECDSA hash function: %v", h)
+ }
+}
+
+// UnpackASN1ECDSASignature unpacks an ASN.1 encoded ECDSA signature into r and s values.
+// This is typically used when working with crypto.Signer interfaces that return ASN.1 encoded signatures.
+func UnpackASN1ECDSASignature(signed []byte, r, s *big.Int) error {
+ // Okay, this is silly, but hear me out. When we use the
+ // crypto.Signer interface, the PrivateKey is hidden.
+ // But we need some information about the key (its bit size).
+ //
+ // So while silly, we're going to have to make another call
+ // here and fetch the Public key.
+ // (This probably means that this information should be cached somewhere)
+ var p struct {
+ R *big.Int // TODO: get this from a pool?
+ S *big.Int
+ }
+ if _, err := asn1.Unmarshal(signed, &p); err != nil {
+ return fmt.Errorf(`failed to unmarshal ASN1 encoded signature: %w`, err)
+ }
+
+ r.Set(p.R)
+ s.Set(p.S)
+ return nil
+}
+
+// UnpackECDSASignature unpacks a JWS-format ECDSA signature into r and s values.
+// The signature should be in the format specified by RFC 7515 (r||s as fixed-length byte arrays).
+func UnpackECDSASignature(signature []byte, pubkey *ecdsa.PublicKey, r, s *big.Int) error {
+ keySize := ecutil.CalculateKeySize(pubkey.Curve)
+ if len(signature) != keySize*2 {
+ return fmt.Errorf(`invalid signature length for curve %q`, pubkey.Curve.Params().Name)
+ }
+
+ r.SetBytes(signature[:keySize])
+ s.SetBytes(signature[keySize:])
+
+ return nil
+}
+
+// PackECDSASignature packs the r and s values from an ECDSA signature into a JWS-format byte slice.
+// The output format follows RFC 7515: r||s as fixed-length byte arrays.
+func PackECDSASignature(r *big.Int, sbig *big.Int, curveBits int) ([]byte, error) {
+ keyBytes := curveBits / 8
+ if curveBits%8 > 0 {
+ keyBytes++
+ }
+
+ // Serialize r and s into fixed-length bytes
+ rBytes := r.Bytes()
+ rBytesPadded := make([]byte, keyBytes)
+ copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
+
+ sBytes := sbig.Bytes()
+ sBytesPadded := make([]byte, keyBytes)
+ copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
+
+ // Output as r||s
+ return append(rBytesPadded, sBytesPadded...), nil
+}
+
+// SignECDSA generates an ECDSA signature for the given payload using the specified private key and hash.
+// The raw parameter should be the pre-computed signing input (typically header.payload).
+//
+// rr is an io.Reader that provides randomness for signing. if rr is nil, it defaults to rand.Reader.
+//
+// This function is now a thin wrapper around dsig.SignECDSA. For new projects, you should
+// consider using dsig instead of this function.
+func SignECDSA(key *ecdsa.PrivateKey, payload []byte, h crypto.Hash, rr io.Reader) ([]byte, error) {
+ dsigAlg, err := ecdsaHashToDsigAlgorithm(h)
+ if err != nil {
+ return nil, fmt.Errorf("jwsbb.SignECDSA: %w", err)
+ }
+
+ return dsig.Sign(key, dsigAlg, payload, rr)
+}
+
+// SignECDSACryptoSigner generates an ECDSA signature using a crypto.Signer interface.
+// This function works with hardware security modules and other crypto.Signer implementations.
+// The signature is converted from ASN.1 format to JWS format (r||s).
+//
+// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader.
+func SignECDSACryptoSigner(signer crypto.Signer, raw []byte, h crypto.Hash, rr io.Reader) ([]byte, error) {
+ signed, err := SignCryptoSigner(signer, raw, h, h, rr)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to sign payload using crypto.Signer: %w`, err)
+ }
+
+ return signECDSACryptoSigner(signer, signed)
+}
+
+func signECDSACryptoSigner(signer crypto.Signer, signed []byte) ([]byte, error) {
+ cpub := signer.Public()
+ pubkey, ok := cpub.(*ecdsa.PublicKey)
+ if !ok {
+ return nil, fmt.Errorf(`expected *ecdsa.PublicKey, got %T`, pubkey)
+ }
+ curveBits := pubkey.Curve.Params().BitSize
+
+ var r, s big.Int
+ if err := UnpackASN1ECDSASignature(signed, &r, &s); err != nil {
+ return nil, fmt.Errorf(`failed to unpack ASN1 encoded signature: %w`, err)
+ }
+
+ return PackECDSASignature(&r, &s, curveBits)
+}
+
+func ecdsaVerify(key *ecdsa.PublicKey, buf []byte, h crypto.Hash, r, s *big.Int) error {
+ hasher := h.New()
+ hasher.Write(buf)
+ digest := hasher.Sum(nil)
+ if !ecdsa.Verify(key, digest, r, s) {
+ return fmt.Errorf("jwsbb.ECDSAVerifier: invalid ECDSA signature")
+ }
+ return nil
+}
+
+// VerifyECDSA verifies an ECDSA signature for the given payload.
+// This function verifies the signature using the specified public key and hash algorithm.
+// The payload parameter should be the pre-computed signing input (typically header.payload).
+//
+// This function is now a thin wrapper around dsig.VerifyECDSA. For new projects, you should
+// consider using dsig instead of this function.
+func VerifyECDSA(key *ecdsa.PublicKey, payload, signature []byte, h crypto.Hash) error {
+ dsigAlg, err := ecdsaHashToDsigAlgorithm(h)
+ if err != nil {
+ return fmt.Errorf("jwsbb.VerifyECDSA: %w", err)
+ }
+
+ return dsig.Verify(key, dsigAlg, payload, signature)
+}
+
+// VerifyECDSACryptoSigner verifies an ECDSA signature for crypto.Signer implementations.
+// This function is useful for verifying signatures created by hardware security modules
+// or other implementations of the crypto.Signer interface.
+// The payload parameter should be the pre-computed signing input (typically header.payload).
+func VerifyECDSACryptoSigner(signer crypto.Signer, payload, signature []byte, h crypto.Hash) error {
+ var pubkey *ecdsa.PublicKey
+ switch cpub := signer.Public(); cpub := cpub.(type) {
+ case ecdsa.PublicKey:
+ pubkey = &cpub
+ case *ecdsa.PublicKey:
+ pubkey = cpub
+ default:
+ return fmt.Errorf(`jwsbb.VerifyECDSACryptoSigner: expected *ecdsa.PublicKey, got %T`, cpub)
+ }
+
+ var r, s big.Int
+ if err := UnpackECDSASignature(signature, pubkey, &r, &s); err != nil {
+ return fmt.Errorf("jwsbb.ECDSAVerifier: failed to unpack ASN.1 encoded ECDSA signature: %w", err)
+ }
+
+ return ecdsaVerify(pubkey, payload, h, &r, &s)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/eddsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/eddsa.go
new file mode 100644
index 0000000000..960cf97dde
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/eddsa.go
@@ -0,0 +1,30 @@
+package jwsbb
+
+import (
+ "crypto/ed25519"
+
+ "github.com/lestrrat-go/dsig"
+)
+
+// SignEdDSA generates an EdDSA (Ed25519) signature for the given payload.
+// The raw parameter should be the pre-computed signing input (typically header.payload).
+// EdDSA is deterministic and doesn't require additional hashing of the input.
+//
+// This function is now a thin wrapper around dsig.SignEdDSA. For new projects, you should
+// consider using dsig instead of this function.
+func SignEdDSA(key ed25519.PrivateKey, payload []byte) ([]byte, error) {
+ // Use dsig.Sign with EdDSA algorithm constant
+ return dsig.Sign(key, dsig.EdDSA, payload, nil)
+}
+
+// VerifyEdDSA verifies an EdDSA (Ed25519) signature for the given payload.
+// This function verifies the signature using Ed25519 verification algorithm.
+// The payload parameter should be the pre-computed signing input (typically header.payload).
+// EdDSA is deterministic and provides strong security guarantees without requiring hash function selection.
+//
+// This function is now a thin wrapper around dsig.VerifyEdDSA. For new projects, you should
+// consider using dsig instead of this function.
+func VerifyEdDSA(key ed25519.PublicKey, payload, signature []byte) error {
+ // Use dsig.Verify with EdDSA algorithm constant
+ return dsig.Verify(key, dsig.EdDSA, payload, signature)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/es256k.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/es256k.go
new file mode 100644
index 0000000000..a8761ee0fc
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/es256k.go
@@ -0,0 +1,14 @@
+//go:build jwx_es256k
+
+package jwsbb
+
+import (
+ dsigsecp256k1 "github.com/lestrrat-go/dsig-secp256k1"
+)
+
+const es256k = "ES256K"
+
+func init() {
+ // Add ES256K mapping when this build tag is enabled
+ jwsToDsigAlgorithm[es256k] = dsigsecp256k1.ECDSAWithSecp256k1AndSHA256
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/format.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/format.go
new file mode 100644
index 0000000000..430bf625ac
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/format.go
@@ -0,0 +1,235 @@
+package jwsbb
+
+import (
+ "bytes"
+ "errors"
+ "io"
+
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/internal/jwxio"
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+)
+
+// SignBuffer combines the base64-encoded header and payload into a single byte slice
+// for signing purposes. This creates the signing input according to JWS specification (RFC 7515).
+// The result should be passed to signature generation functions.
+//
+// Parameters:
+// - buf: Reusable buffer (can be nil for automatic allocation)
+// - hdr: Raw header bytes (will be base64-encoded)
+// - payload: Raw payload bytes (encoded based on encodePayload flag)
+// - encoder: Base64 encoder to use for encoding components
+// - encodePayload: If true, payload is base64-encoded; if false, payload is used as-is
+//
+// Returns the constructed signing input in the format: base64(header).base64(payload) or base64(header).payload
+func SignBuffer(buf, hdr, payload []byte, encoder base64.Encoder, encodePayload bool) []byte {
+ l := encoder.EncodedLen(len(hdr)+len(payload)) + 1
+ if cap(buf) < l {
+ buf = make([]byte, 0, l)
+ }
+ buf = buf[:0]
+ buf = encoder.AppendEncode(buf, hdr)
+ buf = append(buf, tokens.Period)
+ if encodePayload {
+ buf = encoder.AppendEncode(buf, payload)
+ } else {
+ buf = append(buf, payload...)
+ }
+
+ return buf
+}
+
+// AppendSignature appends a base64-encoded signature to a JWS signing input buffer.
+// This completes the compact JWS serialization by adding the final signature component.
+// The input buffer should contain the signing input (header.payload), and this function
+// adds the period separator and base64-encoded signature.
+//
+// Parameters:
+// - buf: Buffer containing the signing input (typically from SignBuffer)
+// - signature: Raw signature bytes (will be base64-encoded)
+// - encoder: Base64 encoder to use for encoding the signature
+//
+// Returns the complete compact JWS in the format: base64(header).base64(payload).base64(signature)
+func AppendSignature(buf, signature []byte, encoder base64.Encoder) []byte {
+ l := len(buf) + len(signature) + 1
+ if cap(buf) < l {
+ buf = make([]byte, 0, l)
+ }
+ buf = append(buf, tokens.Period)
+ buf = encoder.AppendEncode(buf, signature)
+
+ return buf
+}
+
+// JoinCompact creates a complete compact JWS serialization from individual components.
+// This is a one-step function that combines header, payload, and signature into the final JWS format.
+// It includes safety checks to prevent excessive memory allocation.
+//
+// Parameters:
+// - buf: Reusable buffer (can be nil for automatic allocation)
+// - hdr: Raw header bytes (will be base64-encoded)
+// - payload: Raw payload bytes (encoded based on encodePayload flag)
+// - signature: Raw signature bytes (will be base64-encoded)
+// - encoder: Base64 encoder to use for encoding all components
+// - encodePayload: If true, payload is base64-encoded; if false, payload is used as-is
+//
+// Returns the complete compact JWS or an error if the total size exceeds safety limits (1GB).
+func JoinCompact(buf, hdr, payload, signature []byte, encoder base64.Encoder, encodePayload bool) ([]byte, error) {
+ const MaxBufferSize = 1 << 30 // 1 GB
+ totalSize := len(hdr) + len(payload) + len(signature) + 2
+ if totalSize > MaxBufferSize {
+ return nil, errors.New("input sizes exceed maximum allowable buffer size")
+ }
+ if cap(buf) < totalSize {
+ buf = make([]byte, 0, totalSize)
+ }
+ buf = buf[:0]
+ buf = encoder.AppendEncode(buf, hdr)
+ buf = append(buf, tokens.Period)
+ if encodePayload {
+ buf = encoder.AppendEncode(buf, payload)
+ } else {
+ buf = append(buf, payload...)
+ }
+ buf = append(buf, tokens.Period)
+ buf = encoder.AppendEncode(buf, signature)
+
+ return buf, nil
+}
+
+var compactDelim = []byte{tokens.Period}
+
+var errInvalidNumberOfSegments = errors.New(`jwsbb: invalid number of segments`)
+
+// InvalidNumberOfSegmentsError returns the standard error for invalid JWS segment count.
+// A valid compact JWS must have exactly 3 segments separated by periods: header.payload.signature
+func InvalidNumberOfSegmentsError() error {
+ return errInvalidNumberOfSegments
+}
+
+// SplitCompact parses a compact JWS serialization into its three components.
+// This function validates that the input has exactly 3 segments separated by periods
+// and returns the base64-encoded components without decoding them.
+//
+// Parameters:
+// - src: Complete compact JWS string as bytes
+//
+// Returns:
+// - protected: Base64-encoded protected header
+// - payload: Base64-encoded payload (or raw payload if b64=false was used)
+// - signature: Base64-encoded signature
+// - err: Error if the format is invalid or segment count is wrong
+func SplitCompact(src []byte) (protected, payload, signature []byte, err error) {
+ var s []byte
+ var ok bool
+
+ protected, s, ok = bytes.Cut(src, compactDelim)
+ if !ok { // no period found
+ return nil, nil, nil, InvalidNumberOfSegmentsError()
+ }
+ payload, s, ok = bytes.Cut(s, compactDelim)
+ if !ok { // only one period found
+ return nil, nil, nil, InvalidNumberOfSegmentsError()
+ }
+ signature, _, ok = bytes.Cut(s, compactDelim)
+ if ok { // three periods found
+ return nil, nil, nil, InvalidNumberOfSegmentsError()
+ }
+ return protected, payload, signature, nil
+}
+
+// SplitCompactString is a convenience wrapper around SplitCompact for string inputs.
+// It converts the string to bytes and parses the compact JWS serialization.
+//
+// Parameters:
+// - src: Complete compact JWS as a string
+//
+// Returns the same components as SplitCompact: protected header, payload, signature, and error.
+func SplitCompactString(src string) (protected, payload, signature []byte, err error) {
+ return SplitCompact([]byte(src))
+}
+
+// SplitCompactReader parses a compact JWS serialization from an io.Reader.
+// This function handles both finite and streaming sources efficiently.
+// For finite sources, it reads all data at once. For streaming sources,
+// it uses a buffer-based approach to find segment boundaries.
+//
+// Parameters:
+// - rdr: Reader containing the compact JWS data
+//
+// Returns:
+// - protected: Base64-encoded protected header
+// - payload: Base64-encoded payload (or raw payload if b64=false was used)
+// - signature: Base64-encoded signature
+// - err: Error if reading fails or the format is invalid
+//
+// The function validates that exactly 3 segments are present, separated by periods.
+func SplitCompactReader(rdr io.Reader) (protected, payload, signature []byte, err error) {
+ data, err := jwxio.ReadAllFromFiniteSource(rdr)
+ if err == nil {
+ return SplitCompact(data)
+ }
+
+ if !errors.Is(err, jwxio.NonFiniteSourceError()) {
+ return nil, nil, nil, err
+ }
+
+ var periods int
+ var state int
+
+ buf := make([]byte, 4096)
+ var sofar []byte
+
+ for {
+ // read next bytes
+ n, err := rdr.Read(buf)
+ // return on unexpected read error
+ if err != nil && err != io.EOF {
+ return nil, nil, nil, io.ErrUnexpectedEOF
+ }
+
+ // append to current buffer
+ sofar = append(sofar, buf[:n]...)
+ // loop to capture multiple tokens.Period in current buffer
+ for loop := true; loop; {
+ var i = bytes.IndexByte(sofar, tokens.Period)
+ if i == -1 && err != io.EOF {
+ // no tokens.Period found -> exit and read next bytes (outer loop)
+ loop = false
+ continue
+ } else if i == -1 && err == io.EOF {
+ // no tokens.Period found -> process rest and exit
+ i = len(sofar)
+ loop = false
+ } else {
+ // tokens.Period found
+ periods++
+ }
+
+ // Reaching this point means we have found a tokens.Period or EOF and process the rest of the buffer
+ switch state {
+ case 0:
+ protected = sofar[:i]
+ state++
+ case 1:
+ payload = sofar[:i]
+ state++
+ case 2:
+ signature = sofar[:i]
+ }
+ // Shorten current buffer
+ if len(sofar) > i {
+ sofar = sofar[i+1:]
+ }
+ }
+ // Exit on EOF
+ if err == io.EOF {
+ break
+ }
+ }
+ if periods != 2 {
+ return nil, nil, nil, InvalidNumberOfSegmentsError()
+ }
+
+ return protected, payload, signature, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/header.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/header.go
new file mode 100644
index 0000000000..d50c38eeb1
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/header.go
@@ -0,0 +1,222 @@
+package jwsbb
+
+import (
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/valyala/fastjson"
+)
+
+type headerNotFoundError struct {
+ key string
+}
+
+func (e headerNotFoundError) Error() string {
+ return fmt.Sprintf(`jwsbb: header "%s" not found`, e.key)
+}
+
+func (e headerNotFoundError) Is(target error) bool {
+ switch target.(type) {
+ case headerNotFoundError, *headerNotFoundError:
+ // If the target is a headerNotFoundError or a pointer to it, we
+ // consider it a match
+ return true
+ default:
+ return false
+ }
+}
+
+// ErrHeaderdNotFound returns an error that can be passed to `errors.Is` to check if the error is
+// the result of the field not being found
+func ErrHeaderNotFound() error {
+ return headerNotFoundError{}
+}
+
+// ErrFieldNotFound is an alias for ErrHeaderNotFound, and is deprecated. It was a misnomer.
+// It will be removed in a future release.
+func ErrFieldNotFound() error {
+ return ErrHeaderNotFound()
+}
+
+// Header is an object that allows you to access the JWS header in a quick and
+// dirty way. It does not verify anything, it does not know anything about what
+// each header field means, and it does not care about the JWS specification.
+// But when you need to access the JWS header for that one field that you
+// need, this is the object you want to use.
+//
+// As of this writing, HeaderParser cannot be used from concurrent goroutines.
+// You will need to create a new instance for each goroutine that needs to parse a JWS header.
+// Also, in general values obtained from this object should only be used
+// while the Header object is still in scope.
+//
+// This type is experimental and may change or be removed in the future.
+type Header interface {
+ // I'm hiding this behind an interface so that users won't accidentally
+ // rely on the underlying json handler implementation, nor the concrete
+ // type name that jwsbb provides, as we may choose a different one in the future.
+ jwsbbHeader()
+}
+
+type header struct {
+ v *fastjson.Value
+ err error
+}
+
+func (h *header) jwsbbHeader() {}
+
+// HeaderParseCompact parses a JWS header from a compact serialization format.
+// You will need to call HeaderGet* functions to extract the values from the header.
+//
+// This function is experimental and may change or be removed in the future.
+func HeaderParseCompact(buf []byte) Header {
+ decoded, err := base64.Decode(buf)
+ if err != nil {
+ return &header{err: err}
+ }
+ return HeaderParse(decoded)
+}
+
+// HeaderParse parses a JWS header from a byte slice containing the decoded JSON.
+// You will need to call HeaderGet* functions to extract the values from the header.
+//
+// Unlike HeaderParseCompact, this function does not perform any base64 decoding.
+// This function is experimental and may change or be removed in the future.
+func HeaderParse(decoded []byte) Header {
+ var p fastjson.Parser
+ v, err := p.ParseBytes(decoded)
+ if err != nil {
+ return &header{err: err}
+ }
+ return &header{
+ v: v,
+ }
+}
+
+func headerGet(h Header, key string) (*fastjson.Value, error) {
+ //nolint:forcetypeassert
+ hh := h.(*header) // we _know_ this can't be another type
+ if hh.err != nil {
+ return nil, hh.err
+ }
+
+ v := hh.v.Get(key)
+ if v == nil {
+ return nil, headerNotFoundError{key: key}
+ }
+ return v, nil
+}
+
+// HeaderGetString returns the string value for the given key from the JWS header.
+// An error is returned if the JSON was not valid, if the key does not exist,
+// or if the value is not a string.
+//
+// This function is experimental and may change or be removed in the future.
+func HeaderGetString(h Header, key string) (string, error) {
+ v, err := headerGet(h, key)
+ if err != nil {
+ return "", err
+ }
+
+ sb, err := v.StringBytes()
+ if err != nil {
+ return "", err
+ }
+
+ return string(sb), nil
+}
+
+// HeaderGetBool returns the boolean value for the given key from the JWS header.
+// An error is returned if the JSON was not valid, if the key does not exist,
+// or if the value is not a boolean.
+//
+// This function is experimental and may change or be removed in the future.
+func HeaderGetBool(h Header, key string) (bool, error) {
+ v, err := headerGet(h, key)
+ if err != nil {
+ return false, err
+ }
+ return v.Bool()
+}
+
+// HeaderGetFloat64 returns the float64 value for the given key from the JWS header.
+// An error is returned if the JSON was not valid, if the key does not exist,
+// or if the value is not a float64.
+//
+// This function is experimental and may change or be removed in the future.
+func HeaderGetFloat64(h Header, key string) (float64, error) {
+ v, err := headerGet(h, key)
+ if err != nil {
+ return 0, err
+ }
+ return v.Float64()
+}
+
+// HeaderGetInt returns the int value for the given key from the JWS header.
+// An error is returned if the JSON was not valid, if the key does not exist,
+// or if the value is not an int.
+//
+// This function is experimental and may change or be removed in the future.
+func HeaderGetInt(h Header, key string) (int, error) {
+ v, err := headerGet(h, key)
+ if err != nil {
+ return 0, err
+ }
+ return v.Int()
+}
+
+// HeaderGetInt64 returns the int64 value for the given key from the JWS header.
+// An error is returned if the JSON was not valid, if the key does not exist,
+// or if the value is not an int64.
+//
+// This function is experimental and may change or be removed in the future.
+func HeaderGetInt64(h Header, key string) (int64, error) {
+ v, err := headerGet(h, key)
+ if err != nil {
+ return 0, err
+ }
+ return v.Int64()
+}
+
+// HeaderGetStringBytes returns the byte slice value for the given key from the JWS header.
+// An error is returned if the JSON was not valid, if the key does not exist,
+// or if the value is not a byte slice.
+//
+// Because of limitations of the underlying library, you cannot use the return value
+// of this function after the parser is garbage collected.
+//
+// This function is experimental and may change or be removed in the future.
+func HeaderGetStringBytes(h Header, key string) ([]byte, error) {
+ v, err := headerGet(h, key)
+ if err != nil {
+ return nil, err
+ }
+
+ return v.StringBytes()
+}
+
+// HeaderGetUint returns the uint value for the given key from the JWS header.
+// An error is returned if the JSON was not valid, if the key does not exist,
+// or if the value is not a uint.
+//
+// This function is experimental and may change or be removed in the future.
+func HeaderGetUint(h Header, key string) (uint, error) {
+ v, err := headerGet(h, key)
+ if err != nil {
+ return 0, err
+ }
+ return v.Uint()
+}
+
+// HeaderGetUint64 returns the uint64 value for the given key from the JWS header.
+// An error is returned if the JSON was not valid, if the key does not exist,
+// or if the value is not a uint64.
+//
+// This function is experimental and may change or be removed in the future.
+func HeaderGetUint64(h Header, key string) (uint64, error) {
+ v, err := headerGet(h, key)
+ if err != nil {
+ return 0, err
+ }
+
+ return v.Uint64()
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/hmac.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/hmac.go
new file mode 100644
index 0000000000..8e70eb667d
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/hmac.go
@@ -0,0 +1,52 @@
+package jwsbb
+
+import (
+ "fmt"
+ "hash"
+
+ "github.com/lestrrat-go/dsig"
+)
+
+// hmacHashToDsigAlgorithm maps HMAC hash function sizes to dsig algorithm constants
+func hmacHashToDsigAlgorithm(hfunc func() hash.Hash) (string, error) {
+ h := hfunc()
+ switch h.Size() {
+ case 32: // SHA256
+ return dsig.HMACWithSHA256, nil
+ case 48: // SHA384
+ return dsig.HMACWithSHA384, nil
+ case 64: // SHA512
+ return dsig.HMACWithSHA512, nil
+ default:
+ return "", fmt.Errorf("unsupported HMAC hash function: size=%d", h.Size())
+ }
+}
+
+// SignHMAC generates an HMAC signature for the given payload using the specified hash function and key.
+// The raw parameter should be the pre-computed signing input (typically header.payload).
+//
+// This function is now a thin wrapper around dsig.SignHMAC. For new projects, you should
+// consider using dsig instead of this function.
+func SignHMAC(key, payload []byte, hfunc func() hash.Hash) ([]byte, error) {
+ dsigAlg, err := hmacHashToDsigAlgorithm(hfunc)
+ if err != nil {
+ return nil, fmt.Errorf("jwsbb.SignHMAC: %w", err)
+ }
+
+ return dsig.Sign(key, dsigAlg, payload, nil)
+}
+
+// VerifyHMAC verifies an HMAC signature for the given payload.
+// This function verifies the signature using the specified key and hash function.
+// The payload parameter should be the pre-computed signing input (typically header.payload).
+//
+// This function is now a thin wrapper around dsig.VerifyHMAC. For new projects, you should
+// consider using dsig instead of this function.
+func VerifyHMAC(key, payload, signature []byte, hfunc func() hash.Hash) error {
+ dsigAlg, err := hmacHashToDsigAlgorithm(hfunc)
+ if err != nil {
+ return fmt.Errorf("jwsbb.VerifyHMAC: %w", err)
+ }
+
+ return dsig.Verify(key, dsigAlg, payload, signature)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/jwsbb.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/jwsbb.go
new file mode 100644
index 0000000000..6a67ee8f86
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/jwsbb.go
@@ -0,0 +1,94 @@
+// Package jwsbb provides the building blocks (hence the name "bb") for JWS operations.
+// It should be thought of as a low-level API, almost akin to internal packages
+// that should not be used directly by users of the jwx package. However, these exist
+// to provide a more efficient way to perform JWS operations without the overhead of
+// the higher-level jws package to power-users who know what they are doing.
+//
+// This package is currently considered EXPERIMENTAL, and the API may change
+// without notice. It is not recommended to use this package unless you are
+// fully aware of the implications of using it.
+//
+// All bb packages in jwx follow the same design principles:
+// 1. Does minimal checking of input parameters (for performance); callers need to ensure that the parameters are valid.
+// 2. All exported functions are strongly typed (i.e. they do not take `any` types unless they absolutely have to).
+// 3. Does not rely on other public jwx packages (they are standalone, except for internal packages).
+//
+// This implementation uses github.com/lestrrat-go/dsig as the underlying signature provider.
+package jwsbb
+
+import (
+ "github.com/lestrrat-go/dsig"
+)
+
+// JWS algorithm name constants
+const (
+ // HMAC algorithms
+ hs256 = "HS256"
+ hs384 = "HS384"
+ hs512 = "HS512"
+
+ // RSA PKCS#1 v1.5 algorithms
+ rs256 = "RS256"
+ rs384 = "RS384"
+ rs512 = "RS512"
+
+ // RSA PSS algorithms
+ ps256 = "PS256"
+ ps384 = "PS384"
+ ps512 = "PS512"
+
+ // ECDSA algorithms
+ es256 = "ES256"
+ es384 = "ES384"
+ es512 = "ES512"
+
+ // EdDSA algorithm
+ edDSA = "EdDSA"
+)
+
+// Signer is a generic interface that defines the method for signing payloads.
+// The type parameter K represents the key type (e.g., []byte for HMAC keys,
+// *rsa.PrivateKey for RSA keys, *ecdsa.PrivateKey for ECDSA keys).
+type Signer[K any] interface {
+ Sign(key K, payload []byte) ([]byte, error)
+}
+
+// Verifier is a generic interface that defines the method for verifying signatures.
+// The type parameter K represents the key type (e.g., []byte for HMAC keys,
+// *rsa.PublicKey for RSA keys, *ecdsa.PublicKey for ECDSA keys).
+type Verifier[K any] interface {
+ Verify(key K, buf []byte, signature []byte) error
+}
+
+// JWS to dsig algorithm mapping
+var jwsToDsigAlgorithm = map[string]string{
+ // HMAC algorithms
+ hs256: dsig.HMACWithSHA256,
+ hs384: dsig.HMACWithSHA384,
+ hs512: dsig.HMACWithSHA512,
+
+ // RSA PKCS#1 v1.5 algorithms
+ rs256: dsig.RSAPKCS1v15WithSHA256,
+ rs384: dsig.RSAPKCS1v15WithSHA384,
+ rs512: dsig.RSAPKCS1v15WithSHA512,
+
+ // RSA PSS algorithms
+ ps256: dsig.RSAPSSWithSHA256,
+ ps384: dsig.RSAPSSWithSHA384,
+ ps512: dsig.RSAPSSWithSHA512,
+
+ // ECDSA algorithms
+ es256: dsig.ECDSAWithP256AndSHA256,
+ es384: dsig.ECDSAWithP384AndSHA384,
+ es512: dsig.ECDSAWithP521AndSHA512,
+ // Note: ES256K requires external dependency and is handled separately
+
+ // EdDSA algorithm
+ edDSA: dsig.EdDSA,
+}
+
+// getDsigAlgorithm returns the dsig algorithm name for a JWS algorithm
+func getDsigAlgorithm(jwsAlg string) (string, bool) {
+ dsigAlg, ok := jwsToDsigAlgorithm[jwsAlg]
+ return dsigAlg, ok
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/rsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/rsa.go
new file mode 100644
index 0000000000..36997cef7c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/rsa.go
@@ -0,0 +1,71 @@
+package jwsbb
+
+import (
+ "crypto"
+ "crypto/rsa"
+ "fmt"
+ "io"
+
+ "github.com/lestrrat-go/dsig"
+)
+
+// rsaHashToDsigAlgorithm maps RSA hash functions to dsig algorithm constants
+func rsaHashToDsigAlgorithm(h crypto.Hash, pss bool) (string, error) {
+ if pss {
+ switch h {
+ case crypto.SHA256:
+ return dsig.RSAPSSWithSHA256, nil
+ case crypto.SHA384:
+ return dsig.RSAPSSWithSHA384, nil
+ case crypto.SHA512:
+ return dsig.RSAPSSWithSHA512, nil
+ default:
+ return "", fmt.Errorf("unsupported hash algorithm for RSA-PSS: %v", h)
+ }
+ } else {
+ switch h {
+ case crypto.SHA256:
+ return dsig.RSAPKCS1v15WithSHA256, nil
+ case crypto.SHA384:
+ return dsig.RSAPKCS1v15WithSHA384, nil
+ case crypto.SHA512:
+ return dsig.RSAPKCS1v15WithSHA512, nil
+ default:
+ return "", fmt.Errorf("unsupported hash algorithm for RSA PKCS#1 v1.5: %v", h)
+ }
+ }
+}
+
+// SignRSA generates an RSA signature for the given payload using the specified private key and options.
+// The raw parameter should be the pre-computed signing input (typically header.payload).
+// If pss is true, RSA-PSS is used; otherwise, PKCS#1 v1.5 is used.
+//
+// The rr parameter is an optional io.Reader that can be used to provide randomness for signing.
+// If rr is nil, it defaults to rand.Reader.
+//
+// This function is now a thin wrapper around dsig.SignRSA. For new projects, you should
+// consider using dsig instead of this function.
+func SignRSA(key *rsa.PrivateKey, payload []byte, h crypto.Hash, pss bool, rr io.Reader) ([]byte, error) {
+ dsigAlg, err := rsaHashToDsigAlgorithm(h, pss)
+ if err != nil {
+ return nil, fmt.Errorf("jwsbb.SignRSA: %w", err)
+ }
+
+ return dsig.Sign(key, dsigAlg, payload, rr)
+}
+
+// VerifyRSA verifies an RSA signature for the given payload and header.
+// This function constructs the signing input by encoding the header and payload according to JWS specification,
+// then verifies the signature using the specified public key and hash algorithm.
+// If pss is true, RSA-PSS verification is used; otherwise, PKCS#1 v1.5 verification is used.
+//
+// This function is now a thin wrapper around dsig.VerifyRSA. For new projects, you should
+// consider using dsig instead of this function.
+func VerifyRSA(key *rsa.PublicKey, payload, signature []byte, h crypto.Hash, pss bool) error {
+ dsigAlg, err := rsaHashToDsigAlgorithm(h, pss)
+ if err != nil {
+ return fmt.Errorf("jwsbb.VerifyRSA: %w", err)
+ }
+
+ return dsig.Verify(key, dsigAlg, payload, signature)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/sign.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/sign.go
new file mode 100644
index 0000000000..6f36ab0554
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/sign.go
@@ -0,0 +1,110 @@
+package jwsbb
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "fmt"
+ "io"
+
+ "github.com/lestrrat-go/dsig"
+ "github.com/lestrrat-go/jwx/v3/internal/keyconv"
+)
+
+// Sign generates a JWS signature using the specified key and algorithm.
+//
+// This function loads the signer registered in the jwsbb package _ONLY_.
+// It does not support custom signers that the user might have registered.
+//
+// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader.
+// Not all algorithms require this parameter, but it is included for consistency.
+// 99% of the time, you can pass nil for rr, and it will work fine.
+func Sign(key any, alg string, payload []byte, rr io.Reader) ([]byte, error) {
+ dsigAlg, ok := getDsigAlgorithm(alg)
+ if !ok {
+ return nil, fmt.Errorf(`jwsbb.Sign: unsupported signature algorithm %q`, alg)
+ }
+
+ // Get dsig algorithm info to determine key conversion strategy
+ dsigInfo, ok := dsig.GetAlgorithmInfo(dsigAlg)
+ if !ok {
+ return nil, fmt.Errorf(`jwsbb.Sign: dsig algorithm %q not registered`, dsigAlg)
+ }
+
+ switch dsigInfo.Family {
+ case dsig.HMAC:
+ return dispatchHMACSign(key, dsigAlg, payload)
+ case dsig.RSA:
+ return dispatchRSASign(key, dsigAlg, payload, rr)
+ case dsig.ECDSA:
+ return dispatchECDSASign(key, dsigAlg, payload, rr)
+ case dsig.EdDSAFamily:
+ return dispatchEdDSASign(key, dsigAlg, payload, rr)
+ default:
+ return nil, fmt.Errorf(`jwsbb.Sign: unsupported dsig algorithm family %q`, dsigInfo.Family)
+ }
+}
+
+func dispatchHMACSign(key any, dsigAlg string, payload []byte) ([]byte, error) {
+ var hmackey []byte
+ if err := keyconv.ByteSliceKey(&hmackey, key); err != nil {
+ return nil, fmt.Errorf(`jwsbb.Sign: invalid key type %T. []byte is required: %w`, key, err)
+ }
+
+ return dsig.Sign(hmackey, dsigAlg, payload, nil)
+}
+
+func dispatchRSASign(key any, dsigAlg string, payload []byte, rr io.Reader) ([]byte, error) {
+ // Try crypto.Signer first (dsig can handle it directly)
+ if signer, ok := key.(crypto.Signer); ok {
+ // Verify it's an RSA key
+ if _, ok := signer.Public().(*rsa.PublicKey); ok {
+ return dsig.Sign(signer, dsigAlg, payload, rr)
+ }
+ }
+
+ // Fall back to concrete key types
+ var privkey *rsa.PrivateKey
+ if err := keyconv.RSAPrivateKey(&privkey, key); err != nil {
+ return nil, fmt.Errorf(`jwsbb.Sign: invalid key type %T. *rsa.PrivateKey is required: %w`, key, err)
+ }
+
+ return dsig.Sign(privkey, dsigAlg, payload, rr)
+}
+
+func dispatchECDSASign(key any, dsigAlg string, payload []byte, rr io.Reader) ([]byte, error) {
+ // Try crypto.Signer first (dsig can handle it directly)
+ if signer, ok := key.(crypto.Signer); ok {
+ // Verify it's an ECDSA key
+ if _, ok := signer.Public().(*ecdsa.PublicKey); ok {
+ return dsig.Sign(signer, dsigAlg, payload, rr)
+ }
+ }
+
+ // Fall back to concrete key types
+ var privkey *ecdsa.PrivateKey
+ if err := keyconv.ECDSAPrivateKey(&privkey, key); err != nil {
+ return nil, fmt.Errorf(`jwsbb.Sign: invalid key type %T. *ecdsa.PrivateKey is required: %w`, key, err)
+ }
+
+ return dsig.Sign(privkey, dsigAlg, payload, rr)
+}
+
+func dispatchEdDSASign(key any, dsigAlg string, payload []byte, rr io.Reader) ([]byte, error) {
+ // Try crypto.Signer first (dsig can handle it directly)
+ if signer, ok := key.(crypto.Signer); ok {
+ // Verify it's an EdDSA key
+ if _, ok := signer.Public().(ed25519.PublicKey); ok {
+ return dsig.Sign(signer, dsigAlg, payload, rr)
+ }
+ }
+
+ // Fall back to concrete key types
+ var privkey ed25519.PrivateKey
+ if err := keyconv.Ed25519PrivateKey(&privkey, key); err != nil {
+ return nil, fmt.Errorf(`jwsbb.Sign: invalid key type %T. ed25519.PrivateKey is required: %w`, key, err)
+ }
+
+ return dsig.Sign(privkey, dsigAlg, payload, rr)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/verify.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/verify.go
new file mode 100644
index 0000000000..bac3ff487e
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/verify.go
@@ -0,0 +1,105 @@
+package jwsbb
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "fmt"
+
+ "github.com/lestrrat-go/dsig"
+ "github.com/lestrrat-go/jwx/v3/internal/keyconv"
+)
+
+// Verify verifies a JWS signature using the specified key and algorithm.
+//
+// This function loads the verifier registered in the jwsbb package _ONLY_.
+// It does not support custom verifiers that the user might have registered.
+func Verify(key any, alg string, payload, signature []byte) error {
+ dsigAlg, ok := getDsigAlgorithm(alg)
+ if !ok {
+ return fmt.Errorf(`jwsbb.Verify: unsupported signature algorithm %q`, alg)
+ }
+
+ // Get dsig algorithm info to determine key conversion strategy
+ dsigInfo, ok := dsig.GetAlgorithmInfo(dsigAlg)
+ if !ok {
+ return fmt.Errorf(`jwsbb.Verify: dsig algorithm %q not registered`, dsigAlg)
+ }
+
+ switch dsigInfo.Family {
+ case dsig.HMAC:
+ return dispatchHMACVerify(key, dsigAlg, payload, signature)
+ case dsig.RSA:
+ return dispatchRSAVerify(key, dsigAlg, payload, signature)
+ case dsig.ECDSA:
+ return dispatchECDSAVerify(key, dsigAlg, payload, signature)
+ case dsig.EdDSAFamily:
+ return dispatchEdDSAVerify(key, dsigAlg, payload, signature)
+ default:
+ return fmt.Errorf(`jwsbb.Verify: unsupported dsig algorithm family %q`, dsigInfo.Family)
+ }
+}
+
+func dispatchHMACVerify(key any, dsigAlg string, payload, signature []byte) error {
+ var hmackey []byte
+ if err := keyconv.ByteSliceKey(&hmackey, key); err != nil {
+ return fmt.Errorf(`jwsbb.Verify: invalid key type %T. []byte is required: %w`, key, err)
+ }
+
+ return dsig.Verify(hmackey, dsigAlg, payload, signature)
+}
+
+func dispatchRSAVerify(key any, dsigAlg string, payload, signature []byte) error {
+ // Try crypto.Signer first (dsig can handle it directly)
+ if signer, ok := key.(crypto.Signer); ok {
+ // Verify it's an RSA key
+ if _, ok := signer.Public().(*rsa.PublicKey); ok {
+ return dsig.Verify(signer, dsigAlg, payload, signature)
+ }
+ }
+
+ // Fall back to concrete key types
+ var pubkey *rsa.PublicKey
+ if err := keyconv.RSAPublicKey(&pubkey, key); err != nil {
+ return fmt.Errorf(`jwsbb.Verify: invalid key type %T. *rsa.PublicKey is required: %w`, key, err)
+ }
+
+ return dsig.Verify(pubkey, dsigAlg, payload, signature)
+}
+
+func dispatchECDSAVerify(key any, dsigAlg string, payload, signature []byte) error {
+ // Try crypto.Signer first (dsig can handle it directly)
+ if signer, ok := key.(crypto.Signer); ok {
+ // Verify it's an ECDSA key
+ if _, ok := signer.Public().(*ecdsa.PublicKey); ok {
+ return dsig.Verify(signer, dsigAlg, payload, signature)
+ }
+ }
+
+ // Fall back to concrete key types
+ var pubkey *ecdsa.PublicKey
+ if err := keyconv.ECDSAPublicKey(&pubkey, key); err != nil {
+ return fmt.Errorf(`jwsbb.Verify: invalid key type %T. *ecdsa.PublicKey is required: %w`, key, err)
+ }
+
+ return dsig.Verify(pubkey, dsigAlg, payload, signature)
+}
+
+func dispatchEdDSAVerify(key any, dsigAlg string, payload, signature []byte) error {
+ // Try crypto.Signer first (dsig can handle it directly)
+ if signer, ok := key.(crypto.Signer); ok {
+ // Verify it's an EdDSA key
+ if _, ok := signer.Public().(ed25519.PublicKey); ok {
+ return dsig.Verify(signer, dsigAlg, payload, signature)
+ }
+ }
+
+ // Fall back to concrete key types
+ var pubkey ed25519.PublicKey
+ if err := keyconv.Ed25519PublicKey(&pubkey, key); err != nil {
+ return fmt.Errorf(`jwsbb.Verify: invalid key type %T. ed25519.PublicKey is required: %w`, key, err)
+ }
+
+ return dsig.Verify(pubkey, dsigAlg, payload, signature)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/key_provider.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/key_provider.go
new file mode 100644
index 0000000000..84529a1a87
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/key_provider.go
@@ -0,0 +1,291 @@
+package jws
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "sync"
+
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jwk"
+)
+
+// KeyProvider is responsible for providing key(s) to sign or verify a payload.
+// Multiple `jws.KeyProvider`s can be passed to `jws.Verify()` or `jws.Sign()`
+//
+// `jws.Sign()` can only accept static key providers via `jws.WithKey()`,
+// while `jws.Verify()` can accept `jws.WithKey()`, `jws.WithKeySet()`,
+// `jws.WithVerifyAuto()`, and `jws.WithKeyProvider()`.
+//
+// Understanding how this works is crucial to learn how this package works.
+//
+// `jws.Sign()` is straightforward: signatures are created for each
+// provided key.
+//
+// `jws.Verify()` is a bit more involved, because there are cases you
+// will want to compute/deduce/guess the keys that you would like to
+// use for verification.
+//
+// The first thing that `jws.Verify()` does is to collect the
+// KeyProviders from the option list that the user provided (presented in pseudocode):
+//
+// keyProviders := filterKeyProviders(options)
+//
+// Then, remember that a JWS message may contain multiple signatures in the
+// message. For each signature, we call on the KeyProviders to give us
+// the key(s) to use on this signature:
+//
+// for sig in msg.Signatures {
+// for kp in keyProviders {
+// kp.FetchKeys(ctx, sink, sig, msg)
+// ...
+// }
+// }
+//
+// The `sink` argument passed to the KeyProvider is a temporary storage
+// for the keys (either a jwk.Key or a "raw" key). The `KeyProvider`
+// is responsible for sending keys into the `sink`.
+//
+// When called, the `KeyProvider` created by `jws.WithKey()` sends the same key,
+// `jws.WithKeySet()` sends keys that matches a particular `kid` and `alg`,
+// `jws.WithVerifyAuto()` fetches a JWK from the `jku` URL,
+// and finally `jws.WithKeyProvider()` allows you to execute arbitrary
+// logic to provide keys. If you are providing a custom `KeyProvider`,
+// you should execute the necessary checks or retrieval of keys, and
+// then send the key(s) to the sink:
+//
+// sink.Key(alg, key)
+//
+// These keys are then retrieved and tried for each signature, until
+// a match is found:
+//
+// keys := sink.Keys()
+// for key in keys {
+// if givenSignature == makeSignature(key, payload, ...)) {
+// return OK
+// }
+// }
+type KeyProvider interface {
+ FetchKeys(context.Context, KeySink, *Signature, *Message) error
+}
+
+// KeySink is a data storage where `jws.KeyProvider` objects should
+// send their keys to.
+type KeySink interface {
+ Key(jwa.SignatureAlgorithm, any)
+}
+
+type algKeyPair struct {
+ alg jwa.KeyAlgorithm
+ key any
+}
+
+type algKeySink struct {
+ mu sync.Mutex
+ list []algKeyPair
+}
+
+func (s *algKeySink) Key(alg jwa.SignatureAlgorithm, key any) {
+ s.mu.Lock()
+ s.list = append(s.list, algKeyPair{alg, key})
+ s.mu.Unlock()
+}
+
+type staticKeyProvider struct {
+ alg jwa.SignatureAlgorithm
+ key any
+}
+
+func (kp *staticKeyProvider) FetchKeys(_ context.Context, sink KeySink, _ *Signature, _ *Message) error {
+ sink.Key(kp.alg, kp.key)
+ return nil
+}
+
+type keySetProvider struct {
+ set jwk.Set
+ requireKid bool // true if `kid` must be specified
+ useDefault bool // true if the first key should be used iff there's exactly one key in set
+ inferAlgorithm bool // true if the algorithm should be inferred from key type
+ multipleKeysPerKeyID bool // true if we should attempt to match multiple keys per key ID. if false we assume that only one key exists for a given key ID
+}
+
+func (kp *keySetProvider) selectKey(sink KeySink, key jwk.Key, sig *Signature, _ *Message) error {
+ if usage, ok := key.KeyUsage(); ok {
+ // it's okay if use: "". we'll assume it's "sig"
+ if usage != "" && usage != jwk.ForSignature.String() {
+ return nil
+ }
+ }
+
+ if v, ok := key.Algorithm(); ok {
+ salg, ok := jwa.LookupSignatureAlgorithm(v.String())
+ if !ok {
+ return fmt.Errorf(`invalid signature algorithm %q`, v)
+ }
+
+ sink.Key(salg, key)
+ return nil
+ }
+
+ if kp.inferAlgorithm {
+ algs, err := AlgorithmsForKey(key)
+ if err != nil {
+ return fmt.Errorf(`failed to get a list of signature methods for key type %s: %w`, key.KeyType(), err)
+ }
+
+ // bail out if the JWT has a `alg` field, and it doesn't match
+ if tokAlg, ok := sig.ProtectedHeaders().Algorithm(); ok {
+ for _, alg := range algs {
+ if tokAlg == alg {
+ sink.Key(alg, key)
+ return nil
+ }
+ }
+ return fmt.Errorf(`algorithm in the message does not match any of the inferred algorithms`)
+ }
+
+ // Yes, you get to try them all!!!!!!!
+ for _, alg := range algs {
+ sink.Key(alg, key)
+ }
+ return nil
+ }
+ return nil
+}
+
+func (kp *keySetProvider) FetchKeys(_ context.Context, sink KeySink, sig *Signature, msg *Message) error {
+ if kp.requireKid {
+ wantedKid, ok := sig.ProtectedHeaders().KeyID()
+ if !ok {
+ // If the kid is NOT specified... kp.useDefault needs to be true, and the
+ // JWKs must have exactly one key in it
+ if !kp.useDefault {
+ return fmt.Errorf(`failed to find matching key: no key ID ("kid") specified in token`)
+ } else if kp.useDefault && kp.set.Len() > 1 {
+ return fmt.Errorf(`failed to find matching key: no key ID ("kid") specified in token but multiple keys available in key set`)
+ }
+
+ // if we got here, then useDefault == true AND there is exactly
+ // one key in the set.
+ key, ok := kp.set.Key(0)
+ if !ok {
+ return fmt.Errorf(`failed to get key at index 0 (empty JWKS?)`)
+ }
+ return kp.selectKey(sink, key, sig, msg)
+ }
+
+ // Otherwise we better be able to look up the key.
+ // <= v2.0.3 backwards compatible case: only match a single key
+ // whose key ID matches `wantedKid`
+ if !kp.multipleKeysPerKeyID {
+ key, ok := kp.set.LookupKeyID(wantedKid)
+ if !ok {
+ return fmt.Errorf(`failed to find key with key ID %q in key set`, wantedKid)
+ }
+ return kp.selectKey(sink, key, sig, msg)
+ }
+
+ // if multipleKeysPerKeyID is true, we attempt all keys whose key ID matches
+ // the wantedKey
+ ok = false
+ for i := range kp.set.Len() {
+ key, _ := kp.set.Key(i)
+ if kid, ok := key.KeyID(); !ok || kid != wantedKid {
+ continue
+ }
+
+ if err := kp.selectKey(sink, key, sig, msg); err != nil {
+ continue
+ }
+ ok = true
+ // continue processing so that we try all keys with the same key ID
+ }
+ if !ok {
+ return fmt.Errorf(`failed to find key with key ID %q in key set`, wantedKid)
+ }
+ return nil
+ }
+
+ // Otherwise just try all keys
+ for i := range kp.set.Len() {
+ key, ok := kp.set.Key(i)
+ if !ok {
+ return fmt.Errorf(`failed to get key at index %d`, i)
+ }
+ if err := kp.selectKey(sink, key, sig, msg); err != nil {
+ continue
+ }
+ }
+ return nil
+}
+
+type jkuProvider struct {
+ fetcher jwk.Fetcher
+ options []jwk.FetchOption
+}
+
+func (kp jkuProvider) FetchKeys(ctx context.Context, sink KeySink, sig *Signature, _ *Message) error {
+ if kp.fetcher == nil {
+ kp.fetcher = jwk.FetchFunc(jwk.Fetch)
+ }
+
+ kid, ok := sig.ProtectedHeaders().KeyID()
+ if !ok {
+ return fmt.Errorf(`use of "jku" requires that the payload contain a "kid" field in the protected header`)
+ }
+
+ // errors here can't be reliably passed to the consumers.
+ // it's unfortunate, but if you need this control, you are
+ // going to have to write your own fetcher
+ u, ok := sig.ProtectedHeaders().JWKSetURL()
+ if !ok || u == "" {
+ return fmt.Errorf(`use of "jku" field specified, but the field is empty`)
+ }
+ uo, err := url.Parse(u)
+ if err != nil {
+ return fmt.Errorf(`failed to parse "jku": %w`, err)
+ }
+ if uo.Scheme != "https" {
+ return fmt.Errorf(`url in "jku" must be HTTPS`)
+ }
+
+ set, err := kp.fetcher.Fetch(ctx, u, kp.options...)
+ if err != nil {
+ return fmt.Errorf(`failed to fetch %q: %w`, u, err)
+ }
+
+ key, ok := set.LookupKeyID(kid)
+ if !ok {
+ // It is not an error if the key with the kid doesn't exist
+ return nil
+ }
+
+ algs, err := AlgorithmsForKey(key)
+ if err != nil {
+ return fmt.Errorf(`failed to get a list of signature methods for key type %s: %w`, key.KeyType(), err)
+ }
+
+ hdrAlg, ok := sig.ProtectedHeaders().Algorithm()
+ if ok {
+ for _, alg := range algs {
+ // if we have an "alg" field in the JWS, we can only proceed if
+ // the inferred algorithm matches
+ if hdrAlg != alg {
+ continue
+ }
+
+ sink.Key(alg, key)
+ break
+ }
+ }
+ return nil
+}
+
+// KeyProviderFunc is a type of KeyProvider that is implemented by
+// a single function. You can use this to create ad-hoc `KeyProvider`
+// instances.
+type KeyProviderFunc func(context.Context, KeySink, *Signature, *Message) error
+
+func (kp KeyProviderFunc) FetchKeys(ctx context.Context, sink KeySink, sig *Signature, msg *Message) error {
+ return kp(ctx, sink, sig, msg)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy.go
new file mode 100644
index 0000000000..a6687d68cb
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy.go
@@ -0,0 +1,88 @@
+package jws
+
+import (
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jws/legacy"
+)
+
+func enableLegacySigners() {
+ for _, alg := range []jwa.SignatureAlgorithm{jwa.HS256(), jwa.HS384(), jwa.HS512()} {
+ if err := RegisterSigner(alg, func(alg jwa.SignatureAlgorithm) SignerFactory {
+ return SignerFactoryFn(func() (Signer, error) {
+ return legacy.NewHMACSigner(alg), nil
+ })
+ }(alg)); err != nil {
+ panic(fmt.Sprintf("RegisterSigner failed: %v", err))
+ }
+ if err := RegisterVerifier(alg, func(alg jwa.SignatureAlgorithm) VerifierFactory {
+ return VerifierFactoryFn(func() (Verifier, error) {
+ return legacy.NewHMACVerifier(alg), nil
+ })
+ }(alg)); err != nil {
+ panic(fmt.Sprintf("RegisterVerifier failed: %v", err))
+ }
+ }
+
+ for _, alg := range []jwa.SignatureAlgorithm{jwa.RS256(), jwa.RS384(), jwa.RS512(), jwa.PS256(), jwa.PS384(), jwa.PS512()} {
+ if err := RegisterSigner(alg, func(alg jwa.SignatureAlgorithm) SignerFactory {
+ return SignerFactoryFn(func() (Signer, error) {
+ return legacy.NewRSASigner(alg), nil
+ })
+ }(alg)); err != nil {
+ panic(fmt.Sprintf("RegisterSigner failed: %v", err))
+ }
+ if err := RegisterVerifier(alg, func(alg jwa.SignatureAlgorithm) VerifierFactory {
+ return VerifierFactoryFn(func() (Verifier, error) {
+ return legacy.NewRSAVerifier(alg), nil
+ })
+ }(alg)); err != nil {
+ panic(fmt.Sprintf("RegisterVerifier failed: %v", err))
+ }
+ }
+ for _, alg := range []jwa.SignatureAlgorithm{jwa.ES256(), jwa.ES384(), jwa.ES512(), jwa.ES256K()} {
+ if err := RegisterSigner(alg, func(alg jwa.SignatureAlgorithm) SignerFactory {
+ return SignerFactoryFn(func() (Signer, error) {
+ return legacy.NewECDSASigner(alg), nil
+ })
+ }(alg)); err != nil {
+ panic(fmt.Sprintf("RegisterSigner failed: %v", err))
+ }
+ if err := RegisterVerifier(alg, func(alg jwa.SignatureAlgorithm) VerifierFactory {
+ return VerifierFactoryFn(func() (Verifier, error) {
+ return legacy.NewECDSAVerifier(alg), nil
+ })
+ }(alg)); err != nil {
+ panic(fmt.Sprintf("RegisterVerifier failed: %v", err))
+ }
+ }
+
+ if err := RegisterSigner(jwa.EdDSA(), SignerFactoryFn(func() (Signer, error) {
+ return legacy.NewEdDSASigner(), nil
+ })); err != nil {
+ panic(fmt.Sprintf("RegisterSigner failed: %v", err))
+ }
+ if err := RegisterVerifier(jwa.EdDSA(), VerifierFactoryFn(func() (Verifier, error) {
+ return legacy.NewEdDSAVerifier(), nil
+ })); err != nil {
+ panic(fmt.Sprintf("RegisterVerifier failed: %v", err))
+ }
+}
+
+func legacySignerFor(alg jwa.SignatureAlgorithm) (Signer, error) {
+ muSigner.Lock()
+ s, ok := signers[alg]
+ if !ok {
+ v, err := NewSigner(alg)
+ if err != nil {
+ muSigner.Unlock()
+ return nil, fmt.Errorf(`failed to create payload signer: %w`, err)
+ }
+ signers[alg] = v
+ s = v
+ }
+ muSigner.Unlock()
+
+ return s, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/BUILD.bazel
new file mode 100644
index 0000000000..8e77cece46
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/BUILD.bazel
@@ -0,0 +1,21 @@
+load("@rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "legacy",
+ srcs = [
+ "ecdsa.go",
+ "eddsa.go",
+ "hmac.go",
+ "legacy.go",
+ "rsa.go",
+ ],
+ importpath = "github.com/lestrrat-go/jwx/v3/jws/legacy",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//internal/ecutil",
+ "//internal/keyconv",
+ "//internal/pool",
+ "//jwa",
+ "//jws/internal/keytype",
+ ],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/ecdsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/ecdsa.go
new file mode 100644
index 0000000000..0b714a44b8
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/ecdsa.go
@@ -0,0 +1,204 @@
+package legacy
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "encoding/asn1"
+ "fmt"
+ "math/big"
+
+ "github.com/lestrrat-go/jwx/v3/internal/ecutil"
+ "github.com/lestrrat-go/jwx/v3/internal/keyconv"
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jws/internal/keytype"
+)
+
+var ecdsaSigners = make(map[jwa.SignatureAlgorithm]*ecdsaSigner)
+var ecdsaVerifiers = make(map[jwa.SignatureAlgorithm]*ecdsaVerifier)
+
+func init() {
+ algs := map[jwa.SignatureAlgorithm]crypto.Hash{
+ jwa.ES256(): crypto.SHA256,
+ jwa.ES384(): crypto.SHA384,
+ jwa.ES512(): crypto.SHA512,
+ jwa.ES256K(): crypto.SHA256,
+ }
+ for alg, hash := range algs {
+ ecdsaSigners[alg] = &ecdsaSigner{
+ alg: alg,
+ hash: hash,
+ }
+ ecdsaVerifiers[alg] = &ecdsaVerifier{
+ alg: alg,
+ hash: hash,
+ }
+ }
+}
+
+func NewECDSASigner(alg jwa.SignatureAlgorithm) Signer {
+ return ecdsaSigners[alg]
+}
+
+// ecdsaSigners are immutable.
+type ecdsaSigner struct {
+ alg jwa.SignatureAlgorithm
+ hash crypto.Hash
+}
+
+func (es ecdsaSigner) Algorithm() jwa.SignatureAlgorithm {
+ return es.alg
+}
+
+func (es *ecdsaSigner) Sign(payload []byte, key any) ([]byte, error) {
+ if key == nil {
+ return nil, fmt.Errorf(`missing private key while signing payload`)
+ }
+
+ h := es.hash.New()
+ if _, err := h.Write(payload); err != nil {
+ return nil, fmt.Errorf(`failed to write payload using ecdsa: %w`, err)
+ }
+
+ signer, ok := key.(crypto.Signer)
+ if ok {
+ if !keytype.IsValidECDSAKey(key) {
+ return nil, fmt.Errorf(`cannot use key of type %T to generate ECDSA based signatures`, key)
+ }
+ switch key.(type) {
+ case ecdsa.PrivateKey, *ecdsa.PrivateKey:
+ // if it's a ecdsa.PrivateKey, it's more efficient to
+ // go through the non-crypto.Signer route. Set ok to false
+ ok = false
+ }
+ }
+
+ var r, s *big.Int
+ var curveBits int
+ if ok {
+ signed, err := signer.Sign(rand.Reader, h.Sum(nil), es.hash)
+ if err != nil {
+ return nil, err
+ }
+
+ var p struct {
+ R *big.Int
+ S *big.Int
+ }
+ if _, err := asn1.Unmarshal(signed, &p); err != nil {
+ return nil, fmt.Errorf(`failed to unmarshal ASN1 encoded signature: %w`, err)
+ }
+
+ // Okay, this is silly, but hear me out. When we use the
+ // crypto.Signer interface, the PrivateKey is hidden.
+ // But we need some information about the key (its bit size).
+ //
+ // So while silly, we're going to have to make another call
+ // here and fetch the Public key.
+ // This probably means that this should be cached some where.
+ cpub := signer.Public()
+ pubkey, ok := cpub.(*ecdsa.PublicKey)
+ if !ok {
+ return nil, fmt.Errorf(`expected *ecdsa.PublicKey, got %T`, pubkey)
+ }
+ curveBits = pubkey.Curve.Params().BitSize
+
+ r = p.R
+ s = p.S
+ } else {
+ var privkey ecdsa.PrivateKey
+ if err := keyconv.ECDSAPrivateKey(&privkey, key); err != nil {
+ return nil, fmt.Errorf(`failed to retrieve ecdsa.PrivateKey out of %T: %w`, key, err)
+ }
+ curveBits = privkey.Curve.Params().BitSize
+ rtmp, stmp, err := ecdsa.Sign(rand.Reader, &privkey, h.Sum(nil))
+ if err != nil {
+ return nil, fmt.Errorf(`failed to sign payload using ecdsa: %w`, err)
+ }
+ r = rtmp
+ s = stmp
+ }
+
+ keyBytes := curveBits / 8
+ // Curve bits do not need to be a multiple of 8.
+ if curveBits%8 > 0 {
+ keyBytes++
+ }
+
+ rBytes := r.Bytes()
+ rBytesPadded := make([]byte, keyBytes)
+ copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
+
+ sBytes := s.Bytes()
+ sBytesPadded := make([]byte, keyBytes)
+ copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
+
+ out := append(rBytesPadded, sBytesPadded...)
+
+ return out, nil
+}
+
+// ecdsaVerifiers are immutable.
+type ecdsaVerifier struct {
+ alg jwa.SignatureAlgorithm
+ hash crypto.Hash
+}
+
+func NewECDSAVerifier(alg jwa.SignatureAlgorithm) Verifier {
+ return ecdsaVerifiers[alg]
+}
+
+func (v ecdsaVerifier) Algorithm() jwa.SignatureAlgorithm {
+ return v.alg
+}
+
+func (v *ecdsaVerifier) Verify(payload []byte, signature []byte, key any) error {
+ if key == nil {
+ return fmt.Errorf(`missing public key while verifying payload`)
+ }
+
+ var pubkey ecdsa.PublicKey
+ if cs, ok := key.(crypto.Signer); ok {
+ cpub := cs.Public()
+ switch cpub := cpub.(type) {
+ case ecdsa.PublicKey:
+ pubkey = cpub
+ case *ecdsa.PublicKey:
+ pubkey = *cpub
+ default:
+ return fmt.Errorf(`failed to retrieve ecdsa.PublicKey out of crypto.Signer %T`, key)
+ }
+ } else {
+ if err := keyconv.ECDSAPublicKey(&pubkey, key); err != nil {
+ return fmt.Errorf(`failed to retrieve ecdsa.PublicKey out of %T: %w`, key, err)
+ }
+ }
+
+ if !pubkey.Curve.IsOnCurve(pubkey.X, pubkey.Y) {
+ return fmt.Errorf(`public key used does not contain a point (X,Y) on the curve`)
+ }
+
+ r := pool.BigInt().Get()
+ s := pool.BigInt().Get()
+ defer pool.BigInt().Put(r)
+ defer pool.BigInt().Put(s)
+
+ keySize := ecutil.CalculateKeySize(pubkey.Curve)
+ if len(signature) != keySize*2 {
+ return fmt.Errorf(`invalid signature length for curve %q`, pubkey.Curve.Params().Name)
+ }
+
+ r.SetBytes(signature[:keySize])
+ s.SetBytes(signature[keySize:])
+
+ h := v.hash.New()
+ if _, err := h.Write(payload); err != nil {
+ return fmt.Errorf(`failed to write payload using ecdsa: %w`, err)
+ }
+
+ if !ecdsa.Verify(&pubkey, h.Sum(nil), r, s) {
+ return fmt.Errorf(`failed to verify signature using ecdsa`)
+ }
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/eddsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/eddsa.go
new file mode 100644
index 0000000000..289e48e3b3
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/eddsa.go
@@ -0,0 +1,79 @@
+package legacy
+
+import (
+ "crypto"
+ "crypto/ed25519"
+ "crypto/rand"
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/internal/keyconv"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jws/internal/keytype"
+)
+
+type eddsaSigner struct{}
+
+func NewEdDSASigner() Signer {
+ return &eddsaSigner{}
+}
+
+func (s eddsaSigner) Algorithm() jwa.SignatureAlgorithm {
+ return jwa.EdDSA()
+}
+
+func (s eddsaSigner) Sign(payload []byte, key any) ([]byte, error) {
+ if key == nil {
+ return nil, fmt.Errorf(`missing private key while signing payload`)
+ }
+
+ // The ed25519.PrivateKey object implements crypto.Signer, so we should
+ // simply accept a crypto.Signer here.
+ signer, ok := key.(crypto.Signer)
+ if ok {
+ if !keytype.IsValidEDDSAKey(key) {
+ return nil, fmt.Errorf(`cannot use key of type %T to generate EdDSA based signatures`, key)
+ }
+ } else {
+ // This fallback exists for cases when jwk.Key was passed, or
+ // users gave us a pointer instead of non-pointer, etc.
+ var privkey ed25519.PrivateKey
+ if err := keyconv.Ed25519PrivateKey(&privkey, key); err != nil {
+ return nil, fmt.Errorf(`failed to retrieve ed25519.PrivateKey out of %T: %w`, key, err)
+ }
+ signer = privkey
+ }
+
+ return signer.Sign(rand.Reader, payload, crypto.Hash(0))
+}
+
+type eddsaVerifier struct{}
+
+func NewEdDSAVerifier() Verifier {
+ return &eddsaVerifier{}
+}
+
+func (v eddsaVerifier) Verify(payload, signature []byte, key any) (err error) {
+ if key == nil {
+ return fmt.Errorf(`missing public key while verifying payload`)
+ }
+
+ var pubkey ed25519.PublicKey
+ signer, ok := key.(crypto.Signer)
+ if ok {
+ v := signer.Public()
+ pubkey, ok = v.(ed25519.PublicKey)
+ if !ok {
+ return fmt.Errorf(`expected crypto.Signer.Public() to return ed25519.PublicKey, but got %T`, v)
+ }
+ } else {
+ if err := keyconv.Ed25519PublicKey(&pubkey, key); err != nil {
+ return fmt.Errorf(`failed to retrieve ed25519.PublicKey out of %T: %w`, key, err)
+ }
+ }
+
+ if !ed25519.Verify(pubkey, payload, signature) {
+ return fmt.Errorf(`failed to match EdDSA signature`)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/hmac.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/hmac.go
new file mode 100644
index 0000000000..7a3c9d1896
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/hmac.go
@@ -0,0 +1,90 @@
+package legacy
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "crypto/sha512"
+ "fmt"
+ "hash"
+
+ "github.com/lestrrat-go/jwx/v3/internal/keyconv"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+)
+
+func init() {
+ algs := map[jwa.SignatureAlgorithm]func() hash.Hash{
+ jwa.HS256(): sha256.New,
+ jwa.HS384(): sha512.New384,
+ jwa.HS512(): sha512.New,
+ }
+
+ for alg, h := range algs {
+ hmacSignFuncs[alg] = makeHMACSignFunc(h)
+ }
+}
+
+// HMACSigner uses crypto/hmac to sign the payloads.
+// This is for legacy support only.
+type HMACSigner struct {
+ alg jwa.SignatureAlgorithm
+ sign hmacSignFunc
+}
+
+type HMACVerifier struct {
+ signer Signer
+}
+
+type hmacSignFunc func(payload []byte, key []byte) ([]byte, error)
+
+var hmacSignFuncs = make(map[jwa.SignatureAlgorithm]hmacSignFunc)
+
+func NewHMACSigner(alg jwa.SignatureAlgorithm) Signer {
+ return &HMACSigner{
+ alg: alg,
+ sign: hmacSignFuncs[alg], // we know this will succeed
+ }
+}
+
+func makeHMACSignFunc(hfunc func() hash.Hash) hmacSignFunc {
+ return func(payload []byte, key []byte) ([]byte, error) {
+ h := hmac.New(hfunc, key)
+ if _, err := h.Write(payload); err != nil {
+ return nil, fmt.Errorf(`failed to write payload using hmac: %w`, err)
+ }
+ return h.Sum(nil), nil
+ }
+}
+
+func (s HMACSigner) Algorithm() jwa.SignatureAlgorithm {
+ return s.alg
+}
+
+func (s HMACSigner) Sign(payload []byte, key any) ([]byte, error) {
+ var hmackey []byte
+ if err := keyconv.ByteSliceKey(&hmackey, key); err != nil {
+ return nil, fmt.Errorf(`invalid key type %T. []byte is required: %w`, key, err)
+ }
+
+ if len(hmackey) == 0 {
+ return nil, fmt.Errorf(`missing key while signing payload`)
+ }
+
+ return s.sign(payload, hmackey)
+}
+
+func NewHMACVerifier(alg jwa.SignatureAlgorithm) Verifier {
+ s := NewHMACSigner(alg)
+ return &HMACVerifier{signer: s}
+}
+
+func (v HMACVerifier) Verify(payload, signature []byte, key any) (err error) {
+ expected, err := v.signer.Sign(payload, key)
+ if err != nil {
+ return fmt.Errorf(`failed to generated signature: %w`, err)
+ }
+
+ if !hmac.Equal(signature, expected) {
+ return fmt.Errorf(`failed to match hmac signature`)
+ }
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/legacy.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/legacy.go
new file mode 100644
index 0000000000..84a2527428
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/legacy.go
@@ -0,0 +1,36 @@
+// Package legacy provides support for legacy implementation of JWS signing and verification.
+// Types, functions, and variables in this package are exported only for legacy support,
+// and should not be relied upon for new code.
+//
+// This package will be available until v3 is sunset, but it will be removed in v4
+package legacy
+
+import (
+ "github.com/lestrrat-go/jwx/v3/jwa"
+)
+
+// Signer generates the signature for a given payload.
+// This is for legacy support only.
+type Signer interface {
+ // Sign creates a signature for the given payload.
+ // The second argument is the key used for signing the payload, and is usually
+ // the private key type associated with the signature method. For example,
+ // for `jwa.RSXXX` and `jwa.PSXXX` types, you need to pass the
+ // `*"crypto/rsa".PrivateKey` type.
+ // Check the documentation for each signer for details
+ Sign([]byte, any) ([]byte, error)
+
+ Algorithm() jwa.SignatureAlgorithm
+}
+
+// This is for legacy support only.
+type Verifier interface {
+ // Verify checks whether the payload and signature are valid for
+ // the given key.
+ // `key` is the key used for verifying the payload, and is usually
+ // the public key associated with the signature method. For example,
+ // for `jwa.RSXXX` and `jwa.PSXXX` types, you need to pass the
+ // `*"crypto/rsa".PublicKey` type.
+ // Check the documentation for each verifier for details
+ Verify(payload []byte, signature []byte, key any) error
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/rsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/rsa.go
new file mode 100644
index 0000000000..aef110a5cf
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/rsa.go
@@ -0,0 +1,145 @@
+package legacy
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/internal/keyconv"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jws/internal/keytype"
+)
+
+var rsaSigners = make(map[jwa.SignatureAlgorithm]*rsaSigner)
+var rsaVerifiers = make(map[jwa.SignatureAlgorithm]*rsaVerifier)
+
+func init() {
+ data := map[jwa.SignatureAlgorithm]struct {
+ Hash crypto.Hash
+ PSS bool
+ }{
+ jwa.RS256(): {
+ Hash: crypto.SHA256,
+ },
+ jwa.RS384(): {
+ Hash: crypto.SHA384,
+ },
+ jwa.RS512(): {
+ Hash: crypto.SHA512,
+ },
+ jwa.PS256(): {
+ Hash: crypto.SHA256,
+ PSS: true,
+ },
+ jwa.PS384(): {
+ Hash: crypto.SHA384,
+ PSS: true,
+ },
+ jwa.PS512(): {
+ Hash: crypto.SHA512,
+ PSS: true,
+ },
+ }
+
+ for alg, item := range data {
+ rsaSigners[alg] = &rsaSigner{
+ alg: alg,
+ hash: item.Hash,
+ pss: item.PSS,
+ }
+ rsaVerifiers[alg] = &rsaVerifier{
+ alg: alg,
+ hash: item.Hash,
+ pss: item.PSS,
+ }
+ }
+}
+
+type rsaSigner struct {
+ alg jwa.SignatureAlgorithm
+ hash crypto.Hash
+ pss bool
+}
+
+func NewRSASigner(alg jwa.SignatureAlgorithm) Signer {
+ return rsaSigners[alg]
+}
+
+func (rs *rsaSigner) Algorithm() jwa.SignatureAlgorithm {
+ return rs.alg
+}
+
+func (rs *rsaSigner) Sign(payload []byte, key any) ([]byte, error) {
+ if key == nil {
+ return nil, fmt.Errorf(`missing private key while signing payload`)
+ }
+
+ signer, ok := key.(crypto.Signer)
+ if ok {
+ if !keytype.IsValidRSAKey(key) {
+ return nil, fmt.Errorf(`cannot use key of type %T to generate RSA based signatures`, key)
+ }
+ } else {
+ var privkey rsa.PrivateKey
+ if err := keyconv.RSAPrivateKey(&privkey, key); err != nil {
+ return nil, fmt.Errorf(`failed to retrieve rsa.PrivateKey out of %T: %w`, key, err)
+ }
+ signer = &privkey
+ }
+
+ h := rs.hash.New()
+ if _, err := h.Write(payload); err != nil {
+ return nil, fmt.Errorf(`failed to write payload to hash: %w`, err)
+ }
+ if rs.pss {
+ return signer.Sign(rand.Reader, h.Sum(nil), &rsa.PSSOptions{
+ Hash: rs.hash,
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ })
+ }
+ return signer.Sign(rand.Reader, h.Sum(nil), rs.hash)
+}
+
+type rsaVerifier struct {
+ alg jwa.SignatureAlgorithm
+ hash crypto.Hash
+ pss bool
+}
+
+func NewRSAVerifier(alg jwa.SignatureAlgorithm) Verifier {
+ return rsaVerifiers[alg]
+}
+
+func (rv *rsaVerifier) Verify(payload, signature []byte, key any) error {
+ if key == nil {
+ return fmt.Errorf(`missing public key while verifying payload`)
+ }
+
+ var pubkey rsa.PublicKey
+ if cs, ok := key.(crypto.Signer); ok {
+ cpub := cs.Public()
+ switch cpub := cpub.(type) {
+ case rsa.PublicKey:
+ pubkey = cpub
+ case *rsa.PublicKey:
+ pubkey = *cpub
+ default:
+ return fmt.Errorf(`failed to retrieve rsa.PublicKey out of crypto.Signer %T`, key)
+ }
+ } else {
+ if err := keyconv.RSAPublicKey(&pubkey, key); err != nil {
+ return fmt.Errorf(`failed to retrieve rsa.PublicKey out of %T: %w`, key, err)
+ }
+ }
+
+ h := rv.hash.New()
+ if _, err := h.Write(payload); err != nil {
+ return fmt.Errorf(`failed to write payload to hash: %w`, err)
+ }
+
+ if rv.pss {
+ return rsa.VerifyPSS(&pubkey, rv.hash, h.Sum(nil), signature, nil)
+ }
+ return rsa.VerifyPKCS1v15(&pubkey, rv.hash, h.Sum(nil), signature)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/message.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/message.go
new file mode 100644
index 0000000000..e113d1438c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/message.go
@@ -0,0 +1,550 @@
+package jws
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+)
+
+func NewSignature() *Signature {
+ return &Signature{}
+}
+
+func (s *Signature) DecodeCtx() DecodeCtx {
+ return s.dc
+}
+
+func (s *Signature) SetDecodeCtx(dc DecodeCtx) {
+ s.dc = dc
+}
+
+func (s Signature) PublicHeaders() Headers {
+ return s.headers
+}
+
+func (s *Signature) SetPublicHeaders(v Headers) *Signature {
+ s.headers = v
+ return s
+}
+
+func (s Signature) ProtectedHeaders() Headers {
+ return s.protected
+}
+
+func (s *Signature) SetProtectedHeaders(v Headers) *Signature {
+ s.protected = v
+ return s
+}
+
+func (s Signature) Signature() []byte {
+ return s.signature
+}
+
+func (s *Signature) SetSignature(v []byte) *Signature {
+ s.signature = v
+ return s
+}
+
+type signatureUnmarshalProbe struct {
+ Header Headers `json:"header,omitempty"`
+ Protected *string `json:"protected,omitempty"`
+ Signature *string `json:"signature,omitempty"`
+}
+
+func (s *Signature) UnmarshalJSON(data []byte) error {
+ var sup signatureUnmarshalProbe
+ sup.Header = NewHeaders()
+ if err := json.Unmarshal(data, &sup); err != nil {
+ return fmt.Errorf(`failed to unmarshal signature into temporary struct: %w`, err)
+ }
+
+ s.headers = sup.Header
+ if buf := sup.Protected; buf != nil {
+ src := []byte(*buf)
+ if !bytes.HasPrefix(src, []byte{tokens.OpenCurlyBracket}) {
+ decoded, err := base64.Decode(src)
+ if err != nil {
+ return fmt.Errorf(`failed to base64 decode protected headers: %w`, err)
+ }
+ src = decoded
+ }
+
+ prt := NewHeaders()
+ //nolint:forcetypeassert
+ prt.(*stdHeaders).SetDecodeCtx(s.DecodeCtx())
+ if err := json.Unmarshal(src, prt); err != nil {
+ return fmt.Errorf(`failed to unmarshal protected headers: %w`, err)
+ }
+ //nolint:forcetypeassert
+ prt.(*stdHeaders).SetDecodeCtx(nil)
+ s.protected = prt
+ }
+
+ if sup.Signature != nil {
+ decoded, err := base64.DecodeString(*sup.Signature)
+ if err != nil {
+ return fmt.Errorf(`failed to base decode signature: %w`, err)
+ }
+ s.signature = decoded
+ }
+ return nil
+}
+
+// Sign populates the signature field, with a signature generated by
+// given the signer object and payload.
+//
+// The first return value is the raw signature in binary format.
+// The second return value s the full three-segment signature
+// (e.g. "eyXXXX.XXXXX.XXXX")
+//
+// This method is deprecated, and will be remove in a future release.
+// Signature objects in the future will only be used as containers,
+// and signing will be done using the `jws.Sign` function, or alternatively
+// you could use jwsbb package to craft the signature manually.
+func (s *Signature) Sign(payload []byte, signer Signer, key any) ([]byte, []byte, error) {
+ return s.sign2(payload, signer, key)
+}
+
+func (s *Signature) sign2(payload []byte, signer interface{ Algorithm() jwa.SignatureAlgorithm }, key any) ([]byte, []byte, error) {
+ // Create a signatureBuilder to use the shared signing logic
+ sb := signatureBuilderPool.Get()
+ defer signatureBuilderPool.Put(sb)
+
+ sb.alg = signer.Algorithm()
+ sb.key = key
+ sb.protected = s.protected
+ sb.public = s.headers
+
+ // Set up the appropriate signer interface
+ switch typedSigner := signer.(type) {
+ case Signer2:
+ sb.signer2 = typedSigner
+ case Signer:
+ sb.signer = typedSigner
+ default:
+ return nil, nil, fmt.Errorf(`invalid signer type: %T`, signer)
+ }
+
+ // Create a minimal sign context
+ sc := signContextPool.Get()
+ defer signContextPool.Put(sc)
+
+ sc.detached = s.detached
+
+ encoder := s.encoder
+ if encoder == nil {
+ encoder = base64.DefaultEncoder()
+ }
+ sc.encoder = encoder
+
+ // Build the signature using signatureBuilder
+ sig, err := sb.Build(sc, payload)
+ if err != nil {
+ return nil, nil, fmt.Errorf(`failed to build signature: %w`, err)
+ }
+
+ // Copy the signature result back to this signature instance
+ s.signature = sig.signature
+ s.protected = sig.protected
+ s.headers = sig.headers
+
+ // Build the complete JWS token for the return value
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+
+ // Marshal the merged headers for the final output
+ hdrs, err := mergeHeaders(s.headers, s.protected)
+ if err != nil {
+ return nil, nil, fmt.Errorf(`failed to merge headers: %w`, err)
+ }
+
+ hdrbuf, err := json.Marshal(hdrs)
+ if err != nil {
+ return nil, nil, fmt.Errorf(`failed to marshal headers: %w`, err)
+ }
+
+ buf.WriteString(encoder.EncodeToString(hdrbuf))
+ buf.WriteByte(tokens.Period)
+
+ var plen int
+ b64 := getB64Value(hdrs)
+ if b64 {
+ encoded := encoder.EncodeToString(payload)
+ plen = len(encoded)
+ buf.WriteString(encoded)
+ } else {
+ if !s.detached {
+ if bytes.Contains(payload, []byte{tokens.Period}) {
+ return nil, nil, fmt.Errorf(`payload must not contain a "."`)
+ }
+ }
+ plen = len(payload)
+ buf.Write(payload)
+ }
+
+ // Handle detached payload
+ if s.detached {
+ buf.Truncate(buf.Len() - plen)
+ }
+
+ buf.WriteByte(tokens.Period)
+ buf.WriteString(encoder.EncodeToString(s.signature))
+ ret := make([]byte, buf.Len())
+ copy(ret, buf.Bytes())
+
+ return s.signature, ret, nil
+}
+
+func NewMessage() *Message {
+ return &Message{}
+}
+
+// Clears the internal raw buffer that was accumulated during
+// the verify phase
+func (m *Message) clearRaw() {
+ for _, sig := range m.signatures {
+ if protected := sig.protected; protected != nil {
+ if cr, ok := protected.(*stdHeaders); ok {
+ cr.raw = nil
+ }
+ }
+ }
+}
+
+func (m *Message) SetDecodeCtx(dc DecodeCtx) {
+ m.dc = dc
+}
+
+func (m *Message) DecodeCtx() DecodeCtx {
+ return m.dc
+}
+
+// Payload returns the decoded payload
+func (m Message) Payload() []byte {
+ return m.payload
+}
+
+func (m *Message) SetPayload(v []byte) *Message {
+ m.payload = v
+ return m
+}
+
+func (m Message) Signatures() []*Signature {
+ return m.signatures
+}
+
+func (m *Message) AppendSignature(v *Signature) *Message {
+ m.signatures = append(m.signatures, v)
+ return m
+}
+
+func (m *Message) ClearSignatures() *Message {
+ m.signatures = nil
+ return m
+}
+
+// LookupSignature looks up a particular signature entry using
+// the `kid` value
+func (m Message) LookupSignature(kid string) []*Signature {
+ var sigs []*Signature
+ for _, sig := range m.signatures {
+ if hdr := sig.PublicHeaders(); hdr != nil {
+ hdrKeyID, ok := hdr.KeyID()
+ if ok && hdrKeyID == kid {
+ sigs = append(sigs, sig)
+ continue
+ }
+ }
+
+ if hdr := sig.ProtectedHeaders(); hdr != nil {
+ hdrKeyID, ok := hdr.KeyID()
+ if ok && hdrKeyID == kid {
+ sigs = append(sigs, sig)
+ continue
+ }
+ }
+ }
+ return sigs
+}
+
+// This struct is used to first probe for the structure of the
+// incoming JSON object. We then decide how to parse it
+// from the fields that are populated.
+type messageUnmarshalProbe struct {
+ Payload *string `json:"payload"`
+ Signatures []json.RawMessage `json:"signatures,omitempty"`
+ Header Headers `json:"header,omitempty"`
+ Protected *string `json:"protected,omitempty"`
+ Signature *string `json:"signature,omitempty"`
+}
+
+func (m *Message) UnmarshalJSON(buf []byte) error {
+ m.payload = nil
+ m.signatures = nil
+ m.b64 = true
+
+ var mup messageUnmarshalProbe
+ mup.Header = NewHeaders()
+ if err := json.Unmarshal(buf, &mup); err != nil {
+ return fmt.Errorf(`failed to unmarshal into temporary structure: %w`, err)
+ }
+
+ b64 := true
+ if mup.Signature == nil { // flattened signature is NOT present
+ if len(mup.Signatures) == 0 {
+ return fmt.Errorf(`required field "signatures" not present`)
+ }
+
+ m.signatures = make([]*Signature, 0, len(mup.Signatures))
+ for i, rawsig := range mup.Signatures {
+ var sig Signature
+ sig.SetDecodeCtx(m.DecodeCtx())
+ if err := json.Unmarshal(rawsig, &sig); err != nil {
+ return fmt.Errorf(`failed to unmarshal signature #%d: %w`, i+1, err)
+ }
+ sig.SetDecodeCtx(nil)
+
+ if sig.protected == nil {
+ // Instead of barfing on a nil protected header, use an empty header
+ sig.protected = NewHeaders()
+ }
+
+ if i == 0 {
+ if !getB64Value(sig.protected) {
+ b64 = false
+ }
+ } else {
+ if b64 != getB64Value(sig.protected) {
+ return fmt.Errorf(`b64 value must be the same for all signatures`)
+ }
+ }
+
+ m.signatures = append(m.signatures, &sig)
+ }
+ } else { // .signature is present, it's a flattened structure
+ if len(mup.Signatures) != 0 {
+ return fmt.Errorf(`invalid format ("signatures" and "signature" keys cannot both be present)`)
+ }
+
+ var sig Signature
+ sig.headers = mup.Header
+ if src := mup.Protected; src != nil {
+ decoded, err := base64.DecodeString(*src)
+ if err != nil {
+ return fmt.Errorf(`failed to base64 decode flattened protected headers: %w`, err)
+ }
+ prt := NewHeaders()
+ //nolint:forcetypeassert
+ prt.(*stdHeaders).SetDecodeCtx(m.DecodeCtx())
+ if err := json.Unmarshal(decoded, prt); err != nil {
+ return fmt.Errorf(`failed to unmarshal flattened protected headers: %w`, err)
+ }
+ //nolint:forcetypeassert
+ prt.(*stdHeaders).SetDecodeCtx(nil)
+ sig.protected = prt
+ }
+
+ if sig.protected == nil {
+ // Instead of barfing on a nil protected header, use an empty header
+ sig.protected = NewHeaders()
+ }
+
+ decoded, err := base64.DecodeString(*mup.Signature)
+ if err != nil {
+ return fmt.Errorf(`failed to base64 decode flattened signature: %w`, err)
+ }
+ sig.signature = decoded
+
+ m.signatures = []*Signature{&sig}
+ b64 = getB64Value(sig.protected)
+ }
+
+ if mup.Payload != nil {
+ if !b64 { // NOT base64 encoded
+ m.payload = []byte(*mup.Payload)
+ } else {
+ decoded, err := base64.DecodeString(*mup.Payload)
+ if err != nil {
+ return fmt.Errorf(`failed to base64 decode payload: %w`, err)
+ }
+ m.payload = decoded
+ }
+ }
+ m.b64 = b64
+ return nil
+}
+
+func (m Message) MarshalJSON() ([]byte, error) {
+ if len(m.signatures) == 1 {
+ return m.marshalFlattened()
+ }
+ return m.marshalFull()
+}
+
+func (m Message) marshalFlattened() ([]byte, error) {
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+
+ sig := m.signatures[0]
+
+ buf.WriteRune(tokens.OpenCurlyBracket)
+ var wrote bool
+
+ if hdr := sig.headers; hdr != nil {
+ hdrjs, err := json.Marshal(hdr)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to marshal "header" (flattened format): %w`, err)
+ }
+ buf.WriteString(`"header":`)
+ buf.Write(hdrjs)
+ wrote = true
+ }
+
+ if wrote {
+ buf.WriteRune(tokens.Comma)
+ }
+ buf.WriteString(`"payload":"`)
+ buf.WriteString(base64.EncodeToString(m.payload))
+ buf.WriteRune('"')
+
+ if protected := sig.protected; protected != nil {
+ protectedbuf, err := json.Marshal(protected)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to marshal "protected" (flattened format): %w`, err)
+ }
+ buf.WriteString(`,"protected":"`)
+ buf.WriteString(base64.EncodeToString(protectedbuf))
+ buf.WriteRune('"')
+ }
+
+ buf.WriteString(`,"signature":"`)
+ buf.WriteString(base64.EncodeToString(sig.signature))
+ buf.WriteRune('"')
+ buf.WriteRune(tokens.CloseCurlyBracket)
+
+ ret := make([]byte, buf.Len())
+ copy(ret, buf.Bytes())
+ return ret, nil
+}
+
+func (m Message) marshalFull() ([]byte, error) {
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+
+ buf.WriteString(`{"payload":"`)
+ buf.WriteString(base64.EncodeToString(m.payload))
+ buf.WriteString(`","signatures":[`)
+ for i, sig := range m.signatures {
+ if i > 0 {
+ buf.WriteRune(tokens.Comma)
+ }
+
+ buf.WriteRune(tokens.OpenCurlyBracket)
+ var wrote bool
+ if hdr := sig.headers; hdr != nil {
+ hdrbuf, err := json.Marshal(hdr)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to marshal "header" for signature #%d: %w`, i+1, err)
+ }
+ buf.WriteString(`"header":`)
+ buf.Write(hdrbuf)
+ wrote = true
+ }
+
+ if protected := sig.protected; protected != nil {
+ protectedbuf, err := json.Marshal(protected)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to marshal "protected" for signature #%d: %w`, i+1, err)
+ }
+ if wrote {
+ buf.WriteRune(tokens.Comma)
+ }
+ buf.WriteString(`"protected":"`)
+ buf.WriteString(base64.EncodeToString(protectedbuf))
+ buf.WriteRune('"')
+ wrote = true
+ }
+
+ if len(sig.signature) > 0 {
+ // If InsecureNoSignature is enabled, signature may not exist
+ if wrote {
+ buf.WriteRune(tokens.Comma)
+ }
+ buf.WriteString(`"signature":"`)
+ buf.WriteString(base64.EncodeToString(sig.signature))
+ buf.WriteString(`"`)
+ }
+ buf.WriteString(`}`)
+ }
+ buf.WriteString(`]}`)
+
+ ret := make([]byte, buf.Len())
+ copy(ret, buf.Bytes())
+ return ret, nil
+}
+
+// Compact generates a JWS message in compact serialization format from
+// `*jws.Message` object. The object contain exactly one signature, or
+// an error is returned.
+//
+// If using a detached payload, the payload must already be stored in
+// the `*jws.Message` object, and the `jws.WithDetached()` option
+// must be passed to the function.
+func Compact(msg *Message, options ...CompactOption) ([]byte, error) {
+ if l := len(msg.signatures); l != 1 {
+ return nil, fmt.Errorf(`jws.Compact: cannot serialize message with %d signatures (must be one)`, l)
+ }
+
+ var detached bool
+ var encoder Base64Encoder = base64.DefaultEncoder()
+ for _, option := range options {
+ switch option.Ident() {
+ case identDetached{}:
+ if err := option.Value(&detached); err != nil {
+ return nil, fmt.Errorf(`jws.Compact: failed to retrieve detached option value: %w`, err)
+ }
+ case identBase64Encoder{}:
+ if err := option.Value(&encoder); err != nil {
+ return nil, fmt.Errorf(`jws.Compact: failed to retrieve base64 encoder option value: %w`, err)
+ }
+ }
+ }
+
+ s := msg.signatures[0]
+ // XXX check if this is correct
+ hdrs := s.ProtectedHeaders()
+
+ hdrbuf, err := json.Marshal(hdrs)
+ if err != nil {
+ return nil, fmt.Errorf(`jws.Compress: failed to marshal headers: %w`, err)
+ }
+
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+
+ buf.WriteString(encoder.EncodeToString(hdrbuf))
+ buf.WriteByte(tokens.Period)
+
+ if !detached {
+ if getB64Value(hdrs) {
+ encoded := encoder.EncodeToString(msg.payload)
+ buf.WriteString(encoded)
+ } else {
+ if bytes.Contains(msg.payload, []byte{tokens.Period}) {
+ return nil, fmt.Errorf(`jws.Compress: payload must not contain a "."`)
+ }
+ buf.Write(msg.payload)
+ }
+ }
+
+ buf.WriteByte(tokens.Period)
+ buf.WriteString(encoder.EncodeToString(s.signature))
+ ret := make([]byte, buf.Len())
+ copy(ret, buf.Bytes())
+ return ret, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/options.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/options.go
new file mode 100644
index 0000000000..729e561936
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/options.go
@@ -0,0 +1,259 @@
+package jws
+
+import (
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jwk"
+ "github.com/lestrrat-go/option/v2"
+)
+
+type identInsecureNoSignature struct{}
+
+// WithJSON specifies that the result of `jws.Sign()` is serialized in
+// JSON format.
+//
+// If you pass multiple keys to `jws.Sign()`, it will fail unless
+// you also pass this option.
+func WithJSON(options ...WithJSONSuboption) SignVerifyParseOption {
+ var pretty bool
+ for _, option := range options {
+ switch option.Ident() {
+ case identPretty{}:
+ if err := option.Value(&pretty); err != nil {
+ panic(`jws.WithJSON() option must be of type bool`)
+ }
+ }
+ }
+
+ format := fmtJSON
+ if pretty {
+ format = fmtJSONPretty
+ }
+ return &signVerifyParseOption{option.New(identSerialization{}, format)}
+}
+
+type withKey struct {
+ alg jwa.KeyAlgorithm
+ key any
+ protected Headers
+ public Headers
+}
+
+// This exists as an escape hatch to modify the header values after the fact
+func (w *withKey) Protected(v Headers) Headers {
+ if w.protected == nil && v != nil {
+ w.protected = v
+ }
+ return w.protected
+}
+
+// WithKey is used to pass a static algorithm/key pair to either `jws.Sign()` or `jws.Verify()`.
+//
+// The `alg` parameter is the identifier for the signature algorithm that should be used.
+// It is of type `jwa.KeyAlgorithm` but in reality you can only pass `jwa.SignatureAlgorithm`
+// types. It is this way so that the value in `(jwk.Key).Algorithm()` can be directly
+// passed to the option. If you specify other algorithm types such as `jwa.KeyEncryptionAlgorithm`,
+// then you will get an error when `jws.Sign()` or `jws.Verify()` is executed.
+//
+// The `alg` parameter cannot be "none" (jwa.NoSignature) for security reasons.
+// You will have to use a separate, more explicit option to allow the use of "none"
+// algorithm (WithInsecureNoSignature).
+//
+// The algorithm specified in the `alg` parameter MUST be able to support
+// the type of key you provided, otherwise an error is returned.
+//
+// Any of the following is accepted for the `key` parameter:
+// * A "raw" key (e.g. rsa.PrivateKey, ecdsa.PrivateKey, etc)
+// * A crypto.Signer
+// * A jwk.Key
+//
+// Note that due to technical reasons, this library is NOT able to differentiate
+// between a valid/invalid key for given algorithm if the key implements crypto.Signer
+// and the key is from an external library. For example, while we can tell that it is
+// invalid to use `jwk.WithKey(jwa.RSA256, ecdsaPrivateKey)` because the key is
+// presumably from `crypto/ecdsa` or this library, if you use a KMS wrapper
+// that implements crypto.Signer that is outside of the go standard library or this
+// library, we will not be able to properly catch the misuse of such keys --
+// the output will happily generate an ECDSA signature even in the presence of
+// `jwa.RSA256`
+//
+// A `crypto.Signer` is used when the private part of a key is
+// kept in an inaccessible location, such as hardware.
+// `crypto.Signer` is currently supported for RSA, ECDSA, and EdDSA
+// family of algorithms. You may consider using `github.com/jwx-go/crypto-signer`
+// if you would like to use keys stored in GCP/AWS KMS services.
+//
+// If the key is a jwk.Key and the key contains a key ID (`kid` field),
+// then it is added to the protected header generated by the signature.
+//
+// `jws.WithKey()` can further accept suboptions to change signing behavior
+// when used with `jws.Sign()`. `jws.WithProtected()` and `jws.WithPublic()`
+// can be passed to specify JWS headers that should be used whe signing.
+//
+// If the protected headers contain "b64" field, then the boolean value for the field
+// is respected when serializing. That is, if you specify a header with
+// `{"b64": false}`, then the payload is not base64 encoded.
+//
+// These suboptions are ignored when the `jws.WithKey()` option is used with `jws.Verify()`.
+func WithKey(alg jwa.KeyAlgorithm, key any, options ...WithKeySuboption) SignVerifyOption {
+ // Implementation note: this option is shared between Sign() and
+ // Verify(). As such we don't create a KeyProvider here because
+ // if used in Sign() we would be doing something else.
+ var protected, public Headers
+ for _, option := range options {
+ switch option.Ident() {
+ case identProtectedHeaders{}:
+ if err := option.Value(&protected); err != nil {
+ panic(`jws.WithKey() option must be of type Headers`)
+ }
+ case identPublicHeaders{}:
+ if err := option.Value(&public); err != nil {
+ panic(`jws.WithKey() option must be of type Headers`)
+ }
+ }
+ }
+
+ return &signVerifyOption{
+ option.New(identKey{}, &withKey{
+ alg: alg,
+ key: key,
+ protected: protected,
+ public: public,
+ }),
+ }
+}
+
+// WithKeySet specifies a JWKS (jwk.Set) to use for verification.
+//
+// Because a JWKS can contain multiple keys and this library cannot tell
+// which one of the keys should be used for verification, we by default
+// require that both `alg` and `kid` fields in the JWS _and_ the
+// key match before a key is considered to be used.
+//
+// There are ways to override this behavior, but they must be explicitly
+// specified by the caller.
+//
+// To work with keys/JWS messages not having a `kid` field, you may specify
+// the suboption `WithKeySetRequired` via `jws.WithKey(key, jws.WithRequireKid(false))`.
+// This will allow the library to proceed without having to match the `kid` field.
+//
+// However, it will still check if the `alg` fields in the JWS message and the key(s)
+// match. If you must work with JWS messages that do not have an `alg` field,
+// you will need to use `jws.WithKeySet(key, jws.WithInferAlgorithm(true))`.
+//
+// See the documentation for `WithInferAlgorithm()` for more details.
+func WithKeySet(set jwk.Set, options ...WithKeySetSuboption) VerifyOption {
+ requireKid := true
+ var useDefault, inferAlgorithm, multipleKeysPerKeyID bool
+ for _, option := range options {
+ switch option.Ident() {
+ case identRequireKid{}:
+ if err := option.Value(&requireKid); err != nil {
+ panic(`jws.WithKeySet() option must be of type bool`)
+ }
+ case identUseDefault{}:
+ if err := option.Value(&useDefault); err != nil {
+ panic(`jws.WithKeySet() option must be of type bool`)
+ }
+ case identMultipleKeysPerKeyID{}:
+ if err := option.Value(&multipleKeysPerKeyID); err != nil {
+ panic(`jws.WithKeySet() option must be of type bool`)
+ }
+ case identInferAlgorithmFromKey{}:
+ if err := option.Value(&inferAlgorithm); err != nil {
+ panic(`jws.WithKeySet() option must be of type bool`)
+ }
+ }
+ }
+
+ return WithKeyProvider(&keySetProvider{
+ set: set,
+ requireKid: requireKid,
+ useDefault: useDefault,
+ multipleKeysPerKeyID: multipleKeysPerKeyID,
+ inferAlgorithm: inferAlgorithm,
+ })
+}
+
+// WithVerifyAuto enables automatic verification of the signature using the JWKS specified in
+// the `jku` header. Note that by default this option will _reject_ any jku
+// provided by the JWS message. Read on for details.
+//
+// The JWKS is retrieved by the `jwk.Fetcher` specified in the first argument.
+// If the fetcher object is nil, the default fetcher, which is the `jwk.Fetch()`
+// function (wrapped in the `jwk.FetchFunc` type) is used.
+//
+// The remaining arguments are passed to the `(jwk.Fetcher).Fetch` method
+// when the JWKS is retrieved.
+//
+// jws.WithVerifyAuto(nil) // uses jwk.Fetch
+// jws.WithVerifyAuto(jwk.NewCachedFetcher(...)) // uses cached fetcher
+// jws.WithVerifyAuto(myFetcher) // use your custom fetcher
+//
+// By default a whitelist that disallows all URLs is added to the options
+// passed to the fetcher. You must explicitly specify a whitelist that allows
+// the URLs you trust. This default behavior is provided because by design
+// of the JWS specification it is the/ caller's responsibility to verify if
+// the URL specified in the `jku` header can be trusted -- thus by default
+// we trust nothing.
+//
+// Users are free to specify an open whitelist if they so choose, but this must
+// be explicitly done:
+//
+// jws.WithVerifyAuto(nil, jwk.WithFetchWhitelist(jwk.InsecureWhitelist()))
+//
+// You can also use `jwk.CachedFetcher` to use cached JWKS objects, but do note
+// that this object is not really designed to accommodate a large set of
+// arbitrary URLs. Use `jwk.CachedFetcher` as the first argument if you only
+// have a small set of URLs that you trust. For anything more complex, you should
+// implement your own `jwk.Fetcher` object.
+func WithVerifyAuto(f jwk.Fetcher, options ...jwk.FetchOption) VerifyOption {
+ // the option MUST start with a "disallow no whitelist" to force
+ // users provide a whitelist
+ options = append(append([]jwk.FetchOption(nil), jwk.WithFetchWhitelist(allowNoneWhitelist)), options...)
+
+ return WithKeyProvider(jkuProvider{
+ fetcher: f,
+ options: options,
+ })
+}
+
+type withInsecureNoSignature struct {
+ protected Headers
+}
+
+// This exists as an escape hatch to modify the header values after the fact
+func (w *withInsecureNoSignature) Protected(v Headers) Headers {
+ if w.protected == nil && v != nil {
+ w.protected = v
+ }
+ return w.protected
+}
+
+// WithInsecureNoSignature creates an option that allows the user to use the
+// "none" signature algorithm.
+//
+// Please note that this is insecure, and should never be used in production
+// (this is exactly why specifying "none"/jwa.NoSignature to `jws.WithKey()`
+// results in an error when `jws.Sign()` is called -- we do not allow using
+// "none" by accident)
+//
+// TODO: create specific suboption set for this option
+func WithInsecureNoSignature(options ...WithKeySuboption) SignOption {
+ var protected Headers
+ for _, option := range options {
+ switch option.Ident() {
+ case identProtectedHeaders{}:
+ if err := option.Value(&protected); err != nil {
+ panic(`jws.WithInsecureNoSignature() option must be of type Headers`)
+ }
+ }
+ }
+
+ return &signOption{
+ option.New(identInsecureNoSignature{},
+ &withInsecureNoSignature{
+ protected: protected,
+ },
+ ),
+ }
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/options.yaml b/vendor/github.com/lestrrat-go/jwx/v3/jws/options.yaml
new file mode 100644
index 0000000000..303ab3a32e
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/options.yaml
@@ -0,0 +1,234 @@
+package_name: jws
+output: jws/options_gen.go
+interfaces:
+ - name: CompactOption
+ comment: |
+ CompactOption describes options that can be passed to `jws.Compact`
+ - name: VerifyOption
+ comment: |
+ VerifyOption describes options that can be passed to `jws.Verify`
+ methods:
+ - verifyOption
+ - parseOption
+ - name: SignOption
+ comment: |
+ SignOption describes options that can be passed to `jws.Sign`
+ - name: SignVerifyOption
+ methods:
+ - signOption
+ - verifyOption
+ - parseOption
+ comment: |
+ SignVerifyOption describes options that can be passed to either `jws.Verify` or `jws.Sign`
+ - name: SignVerifyCompactOption
+ methods:
+ - signOption
+ - verifyOption
+ - compactOption
+ - parseOption
+ comment: |
+ SignVerifyCompactOption describes options that can be passed to either `jws.Verify`,
+ `jws.Sign`, or `jws.Compact`
+ - name: WithJSONSuboption
+ concrete_type: withJSONSuboption
+ comment: |
+ JSONSuboption describes suboptions that can be passed to the `jws.WithJSON()` option.
+ - name: WithKeySuboption
+ comment: |
+ WithKeySuboption describes option types that can be passed to the `jws.WithKey()`
+ option.
+ - name: WithKeySetSuboption
+ comment: |
+ WithKeySetSuboption is a suboption passed to the `jws.WithKeySet()` option
+ - name: ParseOption
+ methods:
+ - readFileOption
+ comment: |
+ ReadFileOption is a type of `Option` that can be passed to `jwe.Parse`
+ - name: ReadFileOption
+ comment: |
+ ReadFileOption is a type of `Option` that can be passed to `jws.ReadFile`
+ - name: SignVerifyParseOption
+ methods:
+ - signOption
+ - verifyOption
+ - parseOption
+ - readFileOption
+ - name: GlobalOption
+ comment: |
+ GlobalOption can be passed to `jws.Settings()` to set global options for the JWS package.
+options:
+ - ident: Key
+ skip_option: true
+ - ident: Serialization
+ skip_option: true
+ - ident: Serialization
+ option_name: WithCompact
+ interface: SignVerifyParseOption
+ constant_value: fmtCompact
+ comment: |
+ WithCompact specifies that the result of `jws.Sign()` is serialized in
+ compact format.
+
+ By default `jws.Sign()` will opt to use compact format, so you usually
+ do not need to specify this option other than to be explicit about it
+ - ident: Detached
+ interface: CompactOption
+ argument_type: bool
+ comment: |
+ WithDetached specifies that the `jws.Message` should be serialized in
+ JWS compact serialization with detached payload. The resulting octet
+ sequence will not contain the payload section.
+
+ - ident: DetachedPayload
+ interface: SignVerifyOption
+ argument_type: '[]byte'
+ comment: |
+ WithDetachedPayload can be used to both sign or verify a JWS message with a
+ detached payload.
+ Note that this option does NOT populate the `b64` header, which is sometimes
+ required by other JWS implementations.
+
+
+ When this option is used for `jws.Sign()`, the first parameter (normally the payload)
+ must be set to `nil`.
+
+ If you have to verify using this option, you should know exactly how and why this works.
+ - ident: Base64Encoder
+ interface: SignVerifyCompactOption
+ argument_type: Base64Encoder
+ comment: |
+ WithBase64Encoder specifies the base64 encoder to be used while signing or
+ verifying the JWS message. By default, the raw URL base64 encoding (no padding)
+ is used.
+ - ident: Message
+ interface: VerifyOption
+ argument_type: '*Message'
+ comment: |
+ WithMessage can be passed to Verify() to obtain the jws.Message upon
+ a successful verification.
+ - ident: KeyUsed
+ interface: VerifyOption
+ argument_type: 'any'
+ comment: |
+ WithKeyUsed allows you to specify the `jws.Verify()` function to
+ return the key used for verification. This may be useful when
+ you specify multiple key sources or if you pass a `jwk.Set`
+ and you want to know which key was successful at verifying the
+ signature.
+
+ `v` must be a pointer to an empty `any`. Do not use
+ `jwk.Key` here unless you are 100% sure that all keys that you
+ have provided are instances of `jwk.Key` (remember that the
+ jwx API allows users to specify a raw key such as *rsa.PublicKey)
+ - ident: ValidateKey
+ interface: SignVerifyOption
+ argument_type: bool
+ comment: |
+ WithValidateKey specifies whether the key used for signing or verification
+ should be validated before using. Note that this means calling
+ `key.Validate()` on the key, which in turn means that your key
+ must be a `jwk.Key` instance, or a key that can be converted to
+ a `jwk.Key` by calling `jwk.Import()`. This means that your
+ custom hardware-backed keys will probably not work.
+
+ You can directly call `key.Validate()` yourself if you need to
+ mix keys that cannot be converted to `jwk.Key`.
+
+ Please also note that use of this option will also result in
+ one extra conversion of raw keys to a `jwk.Key` instance. If you
+ care about shaving off as much as possible, consider using a
+ pre-validated key instead of using this option to validate
+ the key on-demand each time.
+
+ By default, the key is not validated.
+ - ident: InferAlgorithmFromKey
+ interface: WithKeySetSuboption
+ argument_type: bool
+ comment: |
+ WithInferAlgorithmFromKey specifies whether the JWS signing algorithm name
+ should be inferred by looking at the provided key, in case the JWS
+ message or the key does not have a proper `alg` header.
+
+ When this option is set to true, a list of algorithm(s) that is compatible
+ with the key type will be enumerated, and _ALL_ of them will be tried
+ against the key/message pair. If any of them succeeds, the verification
+ will be considered successful.
+
+ Compared to providing explicit `alg` from the key this is slower, and
+ verification may fail to verify if somehow our heuristics are wrong
+ or outdated.
+
+ Also, automatic detection of signature verification methods are always
+ more vulnerable for potential attack vectors.
+
+ It is highly recommended that you fix your key to contain a proper `alg`
+ header field instead of resorting to using this option, but sometimes
+ it just needs to happen.
+ - ident: UseDefault
+ interface: WithKeySetSuboption
+ argument_type: bool
+ comment: |
+ WithUseDefault specifies that if and only if a jwk.Key contains
+ exactly one jwk.Key, that key should be used.
+ - ident: RequireKid
+ interface: WithKeySetSuboption
+ argument_type: bool
+ comment: |
+ WithRequiredKid specifies whether the keys in the jwk.Set should
+ only be matched if the target JWS message's Key ID and the Key ID
+ in the given key matches.
+ - ident: MultipleKeysPerKeyID
+ interface: WithKeySetSuboption
+ argument_type: bool
+ comment: |
+ WithMultipleKeysPerKeyID specifies if we should expect multiple keys
+ to match against a key ID. By default it is assumed that key IDs are
+ unique, i.e. for a given key ID, the key set only contains a single
+ key that has the matching ID. When this option is set to true,
+ multiple keys that match the same key ID in the set can be tried.
+ - ident: Pretty
+ interface: WithJSONSuboption
+ argument_type: bool
+ comment: |
+ WithPretty specifies whether the JSON output should be formatted and
+ indented
+ - ident: KeyProvider
+ interface: VerifyOption
+ argument_type: KeyProvider
+ - ident: Context
+ interface: VerifyOption
+ argument_type: context.Context
+ - ident: ProtectedHeaders
+ interface: WithKeySuboption
+ argument_type: Headers
+ comment: |
+ WithProtected is used with `jws.WithKey()` option when used with `jws.Sign()`
+ to specify a protected header to be attached to the JWS signature.
+
+ It has no effect if used when `jws.WithKey()` is passed to `jws.Verify()`
+ - ident: PublicHeaders
+ interface: WithKeySuboption
+ argument_type: Headers
+ comment: |
+ WithPublic is used with `jws.WithKey()` option when used with `jws.Sign()`
+ to specify a public header to be attached to the JWS signature.
+
+ It has no effect if used when `jws.WithKey()` is passed to `jws.Verify()`
+
+ `jws.Sign()` will result in an error if `jws.WithPublic()` is used
+ and the serialization format is compact serialization.
+ - ident: FS
+ interface: ReadFileOption
+ argument_type: fs.FS
+ comment: |
+ WithFS specifies the source `fs.FS` object to read the file from.
+ - ident: LegacySigners
+ interface: GlobalOption
+ constant_value: true
+ comment: |
+ WithLegacySigners specifies whether the JWS package should use legacy
+ signers for signing JWS messages.
+
+ Usually there's no need to use this option, as the new signers and
+ verifiers are loaded by default.
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/options_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/options_gen.go
new file mode 100644
index 0000000000..b97cf7e8dd
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/options_gen.go
@@ -0,0 +1,449 @@
+// Code generated by tools/cmd/genoptions/main.go. DO NOT EDIT.
+
+package jws
+
+import (
+ "context"
+ "io/fs"
+
+ "github.com/lestrrat-go/option/v2"
+)
+
+type Option = option.Interface
+
+// CompactOption describes options that can be passed to `jws.Compact`
+type CompactOption interface {
+ Option
+ compactOption()
+}
+
+type compactOption struct {
+ Option
+}
+
+func (*compactOption) compactOption() {}
+
+// GlobalOption can be passed to `jws.Settings()` to set global options for the JWS package.
+type GlobalOption interface {
+ Option
+ globalOption()
+}
+
+type globalOption struct {
+ Option
+}
+
+func (*globalOption) globalOption() {}
+
+// ReadFileOption is a type of `Option` that can be passed to `jwe.Parse`
+type ParseOption interface {
+ Option
+ readFileOption()
+}
+
+type parseOption struct {
+ Option
+}
+
+func (*parseOption) readFileOption() {}
+
+// ReadFileOption is a type of `Option` that can be passed to `jws.ReadFile`
+type ReadFileOption interface {
+ Option
+ readFileOption()
+}
+
+type readFileOption struct {
+ Option
+}
+
+func (*readFileOption) readFileOption() {}
+
+// SignOption describes options that can be passed to `jws.Sign`
+type SignOption interface {
+ Option
+ signOption()
+}
+
+type signOption struct {
+ Option
+}
+
+func (*signOption) signOption() {}
+
+// SignVerifyCompactOption describes options that can be passed to either `jws.Verify`,
+// `jws.Sign`, or `jws.Compact`
+type SignVerifyCompactOption interface {
+ Option
+ signOption()
+ verifyOption()
+ compactOption()
+ parseOption()
+}
+
+type signVerifyCompactOption struct {
+ Option
+}
+
+func (*signVerifyCompactOption) signOption() {}
+
+func (*signVerifyCompactOption) verifyOption() {}
+
+func (*signVerifyCompactOption) compactOption() {}
+
+func (*signVerifyCompactOption) parseOption() {}
+
+// SignVerifyOption describes options that can be passed to either `jws.Verify` or `jws.Sign`
+type SignVerifyOption interface {
+ Option
+ signOption()
+ verifyOption()
+ parseOption()
+}
+
+type signVerifyOption struct {
+ Option
+}
+
+func (*signVerifyOption) signOption() {}
+
+func (*signVerifyOption) verifyOption() {}
+
+func (*signVerifyOption) parseOption() {}
+
+type SignVerifyParseOption interface {
+ Option
+ signOption()
+ verifyOption()
+ parseOption()
+ readFileOption()
+}
+
+type signVerifyParseOption struct {
+ Option
+}
+
+func (*signVerifyParseOption) signOption() {}
+
+func (*signVerifyParseOption) verifyOption() {}
+
+func (*signVerifyParseOption) parseOption() {}
+
+func (*signVerifyParseOption) readFileOption() {}
+
+// VerifyOption describes options that can be passed to `jws.Verify`
+type VerifyOption interface {
+ Option
+ verifyOption()
+ parseOption()
+}
+
+type verifyOption struct {
+ Option
+}
+
+func (*verifyOption) verifyOption() {}
+
+func (*verifyOption) parseOption() {}
+
+// JSONSuboption describes suboptions that can be passed to the `jws.WithJSON()` option.
+type WithJSONSuboption interface {
+ Option
+ withJSONSuboption()
+}
+
+type withJSONSuboption struct {
+ Option
+}
+
+func (*withJSONSuboption) withJSONSuboption() {}
+
+// WithKeySetSuboption is a suboption passed to the `jws.WithKeySet()` option
+type WithKeySetSuboption interface {
+ Option
+ withKeySetSuboption()
+}
+
+type withKeySetSuboption struct {
+ Option
+}
+
+func (*withKeySetSuboption) withKeySetSuboption() {}
+
+// WithKeySuboption describes option types that can be passed to the `jws.WithKey()`
+// option.
+type WithKeySuboption interface {
+ Option
+ withKeySuboption()
+}
+
+type withKeySuboption struct {
+ Option
+}
+
+func (*withKeySuboption) withKeySuboption() {}
+
+type identBase64Encoder struct{}
+type identContext struct{}
+type identDetached struct{}
+type identDetachedPayload struct{}
+type identFS struct{}
+type identInferAlgorithmFromKey struct{}
+type identKey struct{}
+type identKeyProvider struct{}
+type identKeyUsed struct{}
+type identLegacySigners struct{}
+type identMessage struct{}
+type identMultipleKeysPerKeyID struct{}
+type identPretty struct{}
+type identProtectedHeaders struct{}
+type identPublicHeaders struct{}
+type identRequireKid struct{}
+type identSerialization struct{}
+type identUseDefault struct{}
+type identValidateKey struct{}
+
+func (identBase64Encoder) String() string {
+ return "WithBase64Encoder"
+}
+
+func (identContext) String() string {
+ return "WithContext"
+}
+
+func (identDetached) String() string {
+ return "WithDetached"
+}
+
+func (identDetachedPayload) String() string {
+ return "WithDetachedPayload"
+}
+
+func (identFS) String() string {
+ return "WithFS"
+}
+
+func (identInferAlgorithmFromKey) String() string {
+ return "WithInferAlgorithmFromKey"
+}
+
+func (identKey) String() string {
+ return "WithKey"
+}
+
+func (identKeyProvider) String() string {
+ return "WithKeyProvider"
+}
+
+func (identKeyUsed) String() string {
+ return "WithKeyUsed"
+}
+
+func (identLegacySigners) String() string {
+ return "WithLegacySigners"
+}
+
+func (identMessage) String() string {
+ return "WithMessage"
+}
+
+func (identMultipleKeysPerKeyID) String() string {
+ return "WithMultipleKeysPerKeyID"
+}
+
+func (identPretty) String() string {
+ return "WithPretty"
+}
+
+func (identProtectedHeaders) String() string {
+ return "WithProtectedHeaders"
+}
+
+func (identPublicHeaders) String() string {
+ return "WithPublicHeaders"
+}
+
+func (identRequireKid) String() string {
+ return "WithRequireKid"
+}
+
+func (identSerialization) String() string {
+ return "WithSerialization"
+}
+
+func (identUseDefault) String() string {
+ return "WithUseDefault"
+}
+
+func (identValidateKey) String() string {
+ return "WithValidateKey"
+}
+
+// WithBase64Encoder specifies the base64 encoder to be used while signing or
+// verifying the JWS message. By default, the raw URL base64 encoding (no padding)
+// is used.
+func WithBase64Encoder(v Base64Encoder) SignVerifyCompactOption {
+ return &signVerifyCompactOption{option.New(identBase64Encoder{}, v)}
+}
+
+func WithContext(v context.Context) VerifyOption {
+ return &verifyOption{option.New(identContext{}, v)}
+}
+
+// WithDetached specifies that the `jws.Message` should be serialized in
+// JWS compact serialization with detached payload. The resulting octet
+// sequence will not contain the payload section.
+func WithDetached(v bool) CompactOption {
+ return &compactOption{option.New(identDetached{}, v)}
+}
+
+// WithDetachedPayload can be used to both sign or verify a JWS message with a
+// detached payload.
+// Note that this option does NOT populate the `b64` header, which is sometimes
+// required by other JWS implementations.
+//
+// When this option is used for `jws.Sign()`, the first parameter (normally the payload)
+// must be set to `nil`.
+//
+// If you have to verify using this option, you should know exactly how and why this works.
+func WithDetachedPayload(v []byte) SignVerifyOption {
+ return &signVerifyOption{option.New(identDetachedPayload{}, v)}
+}
+
+// WithFS specifies the source `fs.FS` object to read the file from.
+func WithFS(v fs.FS) ReadFileOption {
+ return &readFileOption{option.New(identFS{}, v)}
+}
+
+// WithInferAlgorithmFromKey specifies whether the JWS signing algorithm name
+// should be inferred by looking at the provided key, in case the JWS
+// message or the key does not have a proper `alg` header.
+//
+// When this option is set to true, a list of algorithm(s) that is compatible
+// with the key type will be enumerated, and _ALL_ of them will be tried
+// against the key/message pair. If any of them succeeds, the verification
+// will be considered successful.
+//
+// Compared to providing explicit `alg` from the key this is slower, and
+// verification may fail to verify if somehow our heuristics are wrong
+// or outdated.
+//
+// Also, automatic detection of signature verification methods are always
+// more vulnerable for potential attack vectors.
+//
+// It is highly recommended that you fix your key to contain a proper `alg`
+// header field instead of resorting to using this option, but sometimes
+// it just needs to happen.
+func WithInferAlgorithmFromKey(v bool) WithKeySetSuboption {
+ return &withKeySetSuboption{option.New(identInferAlgorithmFromKey{}, v)}
+}
+
+func WithKeyProvider(v KeyProvider) VerifyOption {
+ return &verifyOption{option.New(identKeyProvider{}, v)}
+}
+
+// WithKeyUsed allows you to specify the `jws.Verify()` function to
+// return the key used for verification. This may be useful when
+// you specify multiple key sources or if you pass a `jwk.Set`
+// and you want to know which key was successful at verifying the
+// signature.
+//
+// `v` must be a pointer to an empty `any`. Do not use
+// `jwk.Key` here unless you are 100% sure that all keys that you
+// have provided are instances of `jwk.Key` (remember that the
+// jwx API allows users to specify a raw key such as *rsa.PublicKey)
+func WithKeyUsed(v any) VerifyOption {
+ return &verifyOption{option.New(identKeyUsed{}, v)}
+}
+
+// WithLegacySigners specifies whether the JWS package should use legacy
+// signers for signing JWS messages.
+//
+// Usually there's no need to use this option, as the new signers and
+// verifiers are loaded by default.
+func WithLegacySigners() GlobalOption {
+ return &globalOption{option.New(identLegacySigners{}, true)}
+}
+
+// WithMessage can be passed to Verify() to obtain the jws.Message upon
+// a successful verification.
+func WithMessage(v *Message) VerifyOption {
+ return &verifyOption{option.New(identMessage{}, v)}
+}
+
+// WithMultipleKeysPerKeyID specifies if we should expect multiple keys
+// to match against a key ID. By default it is assumed that key IDs are
+// unique, i.e. for a given key ID, the key set only contains a single
+// key that has the matching ID. When this option is set to true,
+// multiple keys that match the same key ID in the set can be tried.
+func WithMultipleKeysPerKeyID(v bool) WithKeySetSuboption {
+ return &withKeySetSuboption{option.New(identMultipleKeysPerKeyID{}, v)}
+}
+
+// WithPretty specifies whether the JSON output should be formatted and
+// indented
+func WithPretty(v bool) WithJSONSuboption {
+ return &withJSONSuboption{option.New(identPretty{}, v)}
+}
+
+// WithProtected is used with `jws.WithKey()` option when used with `jws.Sign()`
+// to specify a protected header to be attached to the JWS signature.
+//
+// It has no effect if used when `jws.WithKey()` is passed to `jws.Verify()`
+func WithProtectedHeaders(v Headers) WithKeySuboption {
+ return &withKeySuboption{option.New(identProtectedHeaders{}, v)}
+}
+
+// WithPublic is used with `jws.WithKey()` option when used with `jws.Sign()`
+// to specify a public header to be attached to the JWS signature.
+//
+// It has no effect if used when `jws.WithKey()` is passed to `jws.Verify()`
+//
+// `jws.Sign()` will result in an error if `jws.WithPublic()` is used
+// and the serialization format is compact serialization.
+func WithPublicHeaders(v Headers) WithKeySuboption {
+ return &withKeySuboption{option.New(identPublicHeaders{}, v)}
+}
+
+// WithRequiredKid specifies whether the keys in the jwk.Set should
+// only be matched if the target JWS message's Key ID and the Key ID
+// in the given key matches.
+func WithRequireKid(v bool) WithKeySetSuboption {
+ return &withKeySetSuboption{option.New(identRequireKid{}, v)}
+}
+
+// WithCompact specifies that the result of `jws.Sign()` is serialized in
+// compact format.
+//
+// By default `jws.Sign()` will opt to use compact format, so you usually
+// do not need to specify this option other than to be explicit about it
+func WithCompact() SignVerifyParseOption {
+ return &signVerifyParseOption{option.New(identSerialization{}, fmtCompact)}
+}
+
+// WithUseDefault specifies that if and only if a jwk.Key contains
+// exactly one jwk.Key, that key should be used.
+func WithUseDefault(v bool) WithKeySetSuboption {
+ return &withKeySetSuboption{option.New(identUseDefault{}, v)}
+}
+
+// WithValidateKey specifies whether the key used for signing or verification
+// should be validated before using. Note that this means calling
+// `key.Validate()` on the key, which in turn means that your key
+// must be a `jwk.Key` instance, or a key that can be converted to
+// a `jwk.Key` by calling `jwk.Import()`. This means that your
+// custom hardware-backed keys will probably not work.
+//
+// You can directly call `key.Validate()` yourself if you need to
+// mix keys that cannot be converted to `jwk.Key`.
+//
+// Please also note that use of this option will also result in
+// one extra conversion of raw keys to a `jwk.Key` instance. If you
+// care about shaving off as much as possible, consider using a
+// pre-validated key instead of using this option to validate
+// the key on-demand each time.
+//
+// By default, the key is not validated.
+func WithValidateKey(v bool) SignVerifyOption {
+ return &signVerifyOption{option.New(identValidateKey{}, v)}
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/sign_context.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/sign_context.go
new file mode 100644
index 0000000000..49abe0abca
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/sign_context.go
@@ -0,0 +1,141 @@
+package jws
+
+import (
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+)
+
+type signContext struct {
+ format int
+ detached bool
+ validateKey bool
+ payload []byte
+ encoder Base64Encoder
+ none *signatureBuilder // special signature builder
+ sigbuilders []*signatureBuilder
+}
+
+var signContextPool = pool.New[*signContext](allocSignContext, freeSignContext)
+
+func allocSignContext() *signContext {
+ return &signContext{
+ format: fmtCompact,
+ sigbuilders: make([]*signatureBuilder, 0, 1),
+ encoder: base64.DefaultEncoder(),
+ }
+}
+
+func freeSignContext(ctx *signContext) *signContext {
+ ctx.format = fmtCompact
+ for _, sb := range ctx.sigbuilders {
+ signatureBuilderPool.Put(sb)
+ }
+ ctx.sigbuilders = ctx.sigbuilders[:0]
+ ctx.detached = false
+ ctx.validateKey = false
+ ctx.encoder = base64.DefaultEncoder()
+ ctx.none = nil
+ ctx.payload = nil
+
+ return ctx
+}
+
+func (sc *signContext) ProcessOptions(options []SignOption) error {
+ for _, option := range options {
+ switch option.Ident() {
+ case identSerialization{}:
+ if err := option.Value(&sc.format); err != nil {
+ return signerr(`failed to retrieve serialization option value: %w`, err)
+ }
+ case identInsecureNoSignature{}:
+ var data withInsecureNoSignature
+ if err := option.Value(&data); err != nil {
+ return signerr(`failed to retrieve insecure-no-signature option value: %w`, err)
+ }
+ sb := signatureBuilderPool.Get()
+ sb.alg = jwa.NoSignature()
+ sb.protected = data.protected
+ sb.signer = noneSigner{}
+ sc.none = sb
+ sc.sigbuilders = append(sc.sigbuilders, sb)
+ case identKey{}:
+ var data *withKey
+ if err := option.Value(&data); err != nil {
+ return signerr(`jws.Sign: invalid value for WithKey option: %w`, err)
+ }
+
+ alg, ok := data.alg.(jwa.SignatureAlgorithm)
+ if !ok {
+ return signerr(`expected algorithm to be of type jwa.SignatureAlgorithm but got (%[1]q, %[1]T)`, data.alg)
+ }
+
+ // No, we don't accept "none" here.
+ if alg == jwa.NoSignature() {
+ return signerr(`"none" (jwa.NoSignature) cannot be used with jws.WithKey`)
+ }
+
+ sb := signatureBuilderPool.Get()
+ sb.alg = alg
+ sb.protected = data.protected
+ sb.key = data.key
+ sb.public = data.public
+
+ s2, err := SignerFor(alg)
+ if err == nil {
+ sb.signer2 = s2
+ } else {
+ s1, err := legacySignerFor(alg)
+ if err != nil {
+ sb.signer2 = defaultSigner{alg: alg}
+ } else {
+ sb.signer = s1
+ }
+ }
+
+ sc.sigbuilders = append(sc.sigbuilders, sb)
+ case identDetachedPayload{}:
+ if sc.payload != nil {
+ return signerr(`payload must be nil when jws.WithDetachedPayload() is specified`)
+ }
+ if err := option.Value(&sc.payload); err != nil {
+ return signerr(`failed to retrieve detached payload option value: %w`, err)
+ }
+ sc.detached = true
+ case identValidateKey{}:
+ if err := option.Value(&sc.validateKey); err != nil {
+ return signerr(`failed to retrieve validate-key option value: %w`, err)
+ }
+ case identBase64Encoder{}:
+ if err := option.Value(&sc.encoder); err != nil {
+ return signerr(`failed to retrieve base64-encoder option value: %w`, err)
+ }
+ }
+ }
+ return nil
+}
+
+func (sc *signContext) PopulateMessage(m *Message) error {
+ m.payload = sc.payload
+ m.signatures = make([]*Signature, 0, len(sc.sigbuilders))
+
+ for i, sb := range sc.sigbuilders {
+ // Create signature for each builders
+ if sc.validateKey {
+ if err := validateKeyBeforeUse(sb.key); err != nil {
+ return fmt.Errorf(`failed to validate key for signature %d: %w`, i, err)
+ }
+ }
+
+ sig, err := sb.Build(sc, m.payload)
+ if err != nil {
+ return fmt.Errorf(`failed to build signature %d: %w`, i, err)
+ }
+
+ m.signatures = append(m.signatures, sig)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/signature_builder.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/signature_builder.go
new file mode 100644
index 0000000000..fc09a69367
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/signature_builder.go
@@ -0,0 +1,118 @@
+package jws
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jwk"
+ "github.com/lestrrat-go/jwx/v3/jws/jwsbb"
+)
+
+var signatureBuilderPool = pool.New[*signatureBuilder](allocSignatureBuilder, freeSignatureBuilder)
+
+// signatureBuilder is a transient object that is used to build
+// a single JWS signature.
+//
+// In a multi-signature JWS message, each message is paired with
+// the following:
+// - a signer (the object that takes a buffer and key and generates a signature)
+// - a key (the key that is used to sign the payload)
+// - protected headers (the headers that are protected by the signature)
+// - public headers (the headers that are not protected by the signature)
+//
+// This object stores all of this information in one place.
+//
+// This object does NOT take care of any synchronization, because it is
+// meant to be used in a single-threaded context.
+type signatureBuilder struct {
+ alg jwa.SignatureAlgorithm
+ signer Signer
+ signer2 Signer2
+ key any
+ protected Headers
+ public Headers
+}
+
+func allocSignatureBuilder() *signatureBuilder {
+ return &signatureBuilder{}
+}
+
+func freeSignatureBuilder(sb *signatureBuilder) *signatureBuilder {
+ sb.alg = jwa.EmptySignatureAlgorithm()
+ sb.signer = nil
+ sb.signer2 = nil
+ sb.key = nil
+ sb.protected = nil
+ sb.public = nil
+ return sb
+}
+
+func (sb *signatureBuilder) Build(sc *signContext, payload []byte) (*Signature, error) {
+ protected := sb.protected
+ if protected == nil {
+ protected = NewHeaders()
+ }
+
+ if err := protected.Set(AlgorithmKey, sb.alg); err != nil {
+ return nil, signerr(`failed to set "alg" header: %w`, err)
+ }
+
+ if key, ok := sb.key.(jwk.Key); ok {
+ if kid, ok := key.KeyID(); ok && kid != "" {
+ if err := protected.Set(KeyIDKey, kid); err != nil {
+ return nil, signerr(`failed to set "kid" header: %w`, err)
+ }
+ }
+ }
+
+ hdrs, err := mergeHeaders(sb.public, protected)
+ if err != nil {
+ return nil, signerr(`failed to merge headers: %w`, err)
+ }
+
+ // raw, json format headers
+ hdrbuf, err := json.Marshal(hdrs)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to marshal headers: %w`, err)
+ }
+
+ // check if we need to base64 encode the payload
+ b64 := getB64Value(hdrs)
+ if !b64 && !sc.detached {
+ if bytes.IndexByte(payload, tokens.Period) != -1 {
+ return nil, fmt.Errorf(`payload must not contain a "."`)
+ }
+ }
+
+ combined := jwsbb.SignBuffer(nil, hdrbuf, payload, sc.encoder, b64)
+
+ var sig Signature
+ sig.protected = protected
+ sig.headers = sb.public
+
+ if sb.signer2 != nil {
+ signature, err := sb.signer2.Sign(sb.key, combined)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to sign payload: %w`, err)
+ }
+ sig.signature = signature
+ return &sig, nil
+ }
+
+ if sb.signer == nil {
+ panic("can't get here")
+ }
+
+ signature, err := sb.signer.Sign(combined, sb.key)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to sign payload: %w`, err)
+ }
+
+ sig.signature = signature
+
+ return &sig, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/signer.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/signer.go
new file mode 100644
index 0000000000..340666931f
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/signer.go
@@ -0,0 +1,158 @@
+package jws
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/lestrrat-go/jwx/v3/jwa"
+)
+
+// Signer2 is an interface that represents a per-signature algorithm signing
+// operation.
+type Signer2 interface {
+ Algorithm() jwa.SignatureAlgorithm
+
+ // Sign takes a key and a payload, and returns the signature for the payload.
+ // The key type is restricted by the signature algorithm that this
+ // signer is associated with.
+ //
+ // (Note to users of legacy Signer interface: the method signature
+ // is different from the legacy Signer interface)
+ Sign(key any, payload []byte) ([]byte, error)
+}
+
+var muSigner2DB sync.RWMutex
+var signer2DB = make(map[jwa.SignatureAlgorithm]Signer2)
+
+type SignerFactory interface {
+ Create() (Signer, error)
+}
+type SignerFactoryFn func() (Signer, error)
+
+func (fn SignerFactoryFn) Create() (Signer, error) {
+ return fn()
+}
+
+// SignerFor returns a Signer2 for the given signature algorithm.
+//
+// Currently, this function will never fail. It will always return a
+// valid Signer2 object. The heuristic is as follows:
+// 1. If a Signer2 is registered for the given algorithm, it will return that.
+// 2. If a legacy Signer(Factory) is registered for the given algorithm, it will
+// return a Signer2 that wraps the legacy Signer.
+// 3. If no Signer2 or legacy Signer(Factory) is registered, it will return a
+// default signer that uses jwsbb.Sign.
+//
+// jwsbb.Sign knows how to handle a static set of algorithms, so if the
+// algorithm is not supported, it will return an error when you call
+// `Sign` on the default signer.
+func SignerFor(alg jwa.SignatureAlgorithm) (Signer2, error) {
+ muSigner2DB.RLock()
+ defer muSigner2DB.RUnlock()
+
+ signer, ok := signer2DB[alg]
+ if ok {
+ return signer, nil
+ }
+
+ s1, err := legacySignerFor(alg)
+ if err == nil {
+ return signerAdapter{signer: s1}, nil
+ }
+
+ return defaultSigner{alg: alg}, nil
+}
+
+var muSignerDB sync.RWMutex
+var signerDB = make(map[jwa.SignatureAlgorithm]SignerFactory)
+
+// RegisterSigner is used to register a signer for the given
+// algorithm.
+//
+// Please note that this function is intended to be passed a
+// signer object as its second argument, but due to historical
+// reasons the function signature is defined as taking `any` type.
+//
+// You should create a signer object that implements the `Signer2`
+// interface to register a signer, unless you have legacy code that
+// plugged into the `SignerFactory` interface.
+//
+// Unlike the `UnregisterSigner` function, this function automatically
+// calls `jwa.RegisterSignatureAlgorithm` to register the algorithm
+// in this module's algorithm database.
+func RegisterSigner(alg jwa.SignatureAlgorithm, f any) error {
+ jwa.RegisterSignatureAlgorithm(alg)
+ switch s := f.(type) {
+ case Signer2:
+ muSigner2DB.Lock()
+ signer2DB[alg] = s
+ muSigner2DB.Unlock()
+
+ // delete the other signer, if there was one
+ muSignerDB.Lock()
+ delete(signerDB, alg)
+ muSignerDB.Unlock()
+ case SignerFactory:
+ muSignerDB.Lock()
+ signerDB[alg] = s
+ muSignerDB.Unlock()
+
+ // Remove previous signer, if there was one
+ removeSigner(alg)
+
+ muSigner2DB.Lock()
+ delete(signer2DB, alg)
+ muSigner2DB.Unlock()
+ default:
+ return fmt.Errorf(`jws.RegisterSigner: unsupported type %T for algorithm %q`, f, alg)
+ }
+ return nil
+}
+
+// UnregisterSigner removes the signer factory associated with
+// the given algorithm, as well as the signer instance created
+// by the factory.
+//
+// Note that when you call this function, the algorithm itself is
+// not automatically unregistered from this module's algorithm database.
+// This is because the algorithm may still be required for verification or
+// some other operation (however unlikely, it is still possible).
+// Therefore, in order to completely remove the algorithm, you must
+// call `jwa.UnregisterSignatureAlgorithm` yourself.
+func UnregisterSigner(alg jwa.SignatureAlgorithm) {
+ muSigner2DB.Lock()
+ delete(signer2DB, alg)
+ muSigner2DB.Unlock()
+
+ muSignerDB.Lock()
+ delete(signerDB, alg)
+ muSignerDB.Unlock()
+ // Remove previous signer
+ removeSigner(alg)
+}
+
+// NewSigner creates a signer that signs payloads using the given signature algorithm.
+// This function is deprecated. You should use `SignerFor()` instead.
+//
+// This function only exists for backwards compatibility, but will not work
+// unless you enable the legacy support mode by calling jws.Settings(jws.WithLegacySigners(true)).
+func NewSigner(alg jwa.SignatureAlgorithm) (Signer, error) {
+ muSignerDB.RLock()
+ f, ok := signerDB[alg]
+ muSignerDB.RUnlock()
+
+ if ok {
+ return f.Create()
+ }
+ return nil, fmt.Errorf(`jws.NewSigner: unsupported signature algorithm "%s"`, alg)
+}
+
+type noneSigner struct{}
+
+func (noneSigner) Algorithm() jwa.SignatureAlgorithm {
+ return jwa.NoSignature()
+}
+
+func (noneSigner) Sign([]byte, any) ([]byte, error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/verifier.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/verifier.go
new file mode 100644
index 0000000000..70b91c2938
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/verifier.go
@@ -0,0 +1,154 @@
+package jws
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jws/jwsbb"
+)
+
+type defaultVerifier struct {
+ alg jwa.SignatureAlgorithm
+}
+
+func (v defaultVerifier) Algorithm() jwa.SignatureAlgorithm {
+ return v.alg
+}
+
+func (v defaultVerifier) Verify(key any, payload, signature []byte) error {
+ if err := jwsbb.Verify(key, v.alg.String(), payload, signature); err != nil {
+ return verifyError{verificationError{err}}
+ }
+ return nil
+}
+
+type Verifier2 interface {
+ Verify(key any, payload, signature []byte) error
+}
+
+var muVerifier2DB sync.RWMutex
+var verifier2DB = make(map[jwa.SignatureAlgorithm]Verifier2)
+
+type verifierAdapter struct {
+ v Verifier
+}
+
+func (v verifierAdapter) Verify(key any, payload, signature []byte) error {
+ if err := v.v.Verify(payload, signature, key); err != nil {
+ return verifyError{verificationError{err}}
+ }
+ return nil
+}
+
+// VerifierFor returns a Verifier2 for the given signature algorithm.
+//
+// Currently, this function will never fail. It will always return a
+// valid Verifier2 object. The heuristic is as follows:
+// 1. If a Verifier2 is registered for the given algorithm, it will return that.
+// 2. If a legacy Verifier(Factory) is registered for the given algorithm, it will
+// return a Verifier2 that wraps the legacy Verifier.
+// 3. If no Verifier2 or legacy Verifier(Factory) is registered, it will return a
+// default verifier that uses jwsbb.Verify.
+//
+// jwsbb.Verify knows how to handle a static set of algorithms, so if the
+// algorithm is not supported, it will return an error when you call
+// `Verify` on the default verifier.
+func VerifierFor(alg jwa.SignatureAlgorithm) (Verifier2, error) {
+ muVerifier2DB.RLock()
+ defer muVerifier2DB.RUnlock()
+
+ v2, ok := verifier2DB[alg]
+ if ok {
+ return v2, nil
+ }
+
+ v1, err := NewVerifier(alg)
+ if err == nil {
+ return verifierAdapter{v: v1}, nil
+ }
+
+ return defaultVerifier{alg: alg}, nil
+}
+
+type VerifierFactory interface {
+ Create() (Verifier, error)
+}
+type VerifierFactoryFn func() (Verifier, error)
+
+func (fn VerifierFactoryFn) Create() (Verifier, error) {
+ return fn()
+}
+
+var muVerifierDB sync.RWMutex
+var verifierDB = make(map[jwa.SignatureAlgorithm]VerifierFactory)
+
+// RegisterVerifier is used to register a verifier for the given
+// algorithm.
+//
+// Please note that this function is intended to be passed a
+// verifier object as its second argument, but due to historical
+// reasons the function signature is defined as taking `any` type.
+//
+// You should create a signer object that implements the `Verifier2`
+// interface to register a signer, unless you have legacy code that
+// plugged into the `SignerFactory` interface.
+//
+// Unlike the `UnregisterVerifier` function, this function automatically
+// calls `jwa.RegisterSignatureAlgorithm` to register the algorithm
+// in this module's algorithm database.
+func RegisterVerifier(alg jwa.SignatureAlgorithm, f any) error {
+ jwa.RegisterSignatureAlgorithm(alg)
+ switch v := f.(type) {
+ case Verifier2:
+ muVerifier2DB.Lock()
+ verifier2DB[alg] = v
+ muVerifier2DB.Unlock()
+
+ muVerifierDB.Lock()
+ delete(verifierDB, alg)
+ muVerifierDB.Unlock()
+ case VerifierFactory:
+ muVerifierDB.Lock()
+ verifierDB[alg] = v
+ muVerifierDB.Unlock()
+
+ muVerifier2DB.Lock()
+ delete(verifier2DB, alg)
+ muVerifier2DB.Unlock()
+ default:
+ return fmt.Errorf(`jws.RegisterVerifier: unsupported type %T for algorithm %q`, f, alg)
+ }
+ return nil
+}
+
+// UnregisterVerifier removes the signer factory associated with
+// the given algorithm.
+//
+// Note that when you call this function, the algorithm itself is
+// not automatically unregistered from this module's algorithm database.
+// This is because the algorithm may still be required for signing or
+// some other operation (however unlikely, it is still possible).
+// Therefore, in order to completely remove the algorithm, you must
+// call `jwa.UnregisterSignatureAlgorithm` yourself.
+func UnregisterVerifier(alg jwa.SignatureAlgorithm) {
+ muVerifier2DB.Lock()
+ delete(verifier2DB, alg)
+ muVerifier2DB.Unlock()
+
+ muVerifierDB.Lock()
+ delete(verifierDB, alg)
+ muVerifierDB.Unlock()
+}
+
+// NewVerifier creates a verifier that signs payloads using the given signature algorithm.
+func NewVerifier(alg jwa.SignatureAlgorithm) (Verifier, error) {
+ muVerifierDB.RLock()
+ f, ok := verifierDB[alg]
+ muVerifierDB.RUnlock()
+
+ if ok {
+ return f.Create()
+ }
+ return nil, fmt.Errorf(`jws.NewVerifier: unsupported signature algorithm "%s"`, alg)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/verify_context.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/verify_context.go
new file mode 100644
index 0000000000..b4807d569c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/verify_context.go
@@ -0,0 +1,211 @@
+package jws
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/lestrrat-go/blackmagic"
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jws/jwsbb"
+)
+
+// verifyContext holds the state during JWS verification
+type verifyContext struct {
+ parseOptions []ParseOption
+ dst *Message
+ detachedPayload []byte
+ keyProviders []KeyProvider
+ keyUsed any
+ validateKey bool
+ encoder Base64Encoder
+ //nolint:containedctx
+ ctx context.Context
+}
+
+var verifyContextPool = pool.New[*verifyContext](allocVerifyContext, freeVerifyContext)
+
+func allocVerifyContext() *verifyContext {
+ return &verifyContext{
+ encoder: base64.DefaultEncoder(),
+ ctx: context.Background(),
+ }
+}
+
+func freeVerifyContext(vc *verifyContext) *verifyContext {
+ vc.parseOptions = vc.parseOptions[:0]
+ vc.dst = nil
+ vc.detachedPayload = nil
+ vc.keyProviders = vc.keyProviders[:0]
+ vc.keyUsed = nil
+ vc.validateKey = false
+ vc.encoder = base64.DefaultEncoder()
+ vc.ctx = context.Background()
+ return vc
+}
+
+func (vc *verifyContext) ProcessOptions(options []VerifyOption) error {
+ //nolint:forcetypeassert
+ for _, option := range options {
+ switch option.Ident() {
+ case identMessage{}:
+ if err := option.Value(&vc.dst); err != nil {
+ return verifyerr(`invalid value for option WithMessage: %w`, err)
+ }
+ case identDetachedPayload{}:
+ if err := option.Value(&vc.detachedPayload); err != nil {
+ return verifyerr(`invalid value for option WithDetachedPayload: %w`, err)
+ }
+ case identKey{}:
+ var pair *withKey
+ if err := option.Value(&pair); err != nil {
+ return verifyerr(`invalid value for option WithKey: %w`, err)
+ }
+ vc.keyProviders = append(vc.keyProviders, &staticKeyProvider{
+ alg: pair.alg.(jwa.SignatureAlgorithm),
+ key: pair.key,
+ })
+ case identKeyProvider{}:
+ var kp KeyProvider
+ if err := option.Value(&kp); err != nil {
+ return verifyerr(`failed to retrieve key-provider option value: %w`, err)
+ }
+ vc.keyProviders = append(vc.keyProviders, kp)
+ case identKeyUsed{}:
+ if err := option.Value(&vc.keyUsed); err != nil {
+ return verifyerr(`failed to retrieve key-used option value: %w`, err)
+ }
+ case identContext{}:
+ if err := option.Value(&vc.ctx); err != nil {
+ return verifyerr(`failed to retrieve context option value: %w`, err)
+ }
+ case identValidateKey{}:
+ if err := option.Value(&vc.validateKey); err != nil {
+ return verifyerr(`failed to retrieve validate-key option value: %w`, err)
+ }
+ case identSerialization{}:
+ vc.parseOptions = append(vc.parseOptions, option.(ParseOption))
+ case identBase64Encoder{}:
+ if err := option.Value(&vc.encoder); err != nil {
+ return verifyerr(`failed to retrieve base64-encoder option value: %w`, err)
+ }
+ default:
+ return verifyerr(`invalid jws.VerifyOption %q passed`, `With`+strings.TrimPrefix(fmt.Sprintf(`%T`, option.Ident()), `jws.ident`))
+ }
+ }
+
+ if len(vc.keyProviders) < 1 {
+ return verifyerr(`no key providers have been provided (see jws.WithKey(), jws.WithKeySet(), jws.WithVerifyAuto(), and jws.WithKeyProvider()`)
+ }
+
+ return nil
+}
+
+func (vc *verifyContext) VerifyMessage(buf []byte) ([]byte, error) {
+ msg, err := Parse(buf, vc.parseOptions...)
+ if err != nil {
+ return nil, verifyerr(`failed to parse jws: %w`, err)
+ }
+ defer msg.clearRaw()
+
+ if vc.detachedPayload != nil {
+ if len(msg.payload) != 0 {
+ return nil, verifyerr(`can't specify detached payload for JWS with payload`)
+ }
+
+ msg.payload = vc.detachedPayload
+ }
+
+ verifyBuf := pool.ByteSlice().Get()
+
+ // Because deferred functions bind to the current value of the variable,
+ // we can't just use `defer pool.ByteSlice().Put(verifyBuf)` here.
+ // Instead, we use a closure to reference the _variable_.
+ // it would be better if we could call it directly, but there are
+ // too many place we may return from this function
+ defer func() {
+ pool.ByteSlice().Put(verifyBuf)
+ }()
+
+ errs := pool.ErrorSlice().Get()
+ defer func() {
+ pool.ErrorSlice().Put(errs)
+ }()
+ for idx, sig := range msg.signatures {
+ var rawHeaders []byte
+ if rbp, ok := sig.protected.(interface{ rawBuffer() []byte }); ok {
+ if raw := rbp.rawBuffer(); raw != nil {
+ rawHeaders = raw
+ }
+ }
+
+ if rawHeaders == nil {
+ protected, err := json.Marshal(sig.protected)
+ if err != nil {
+ return nil, verifyerr(`failed to marshal "protected" for signature #%d: %w`, idx+1, err)
+ }
+ rawHeaders = protected
+ }
+
+ verifyBuf = verifyBuf[:0]
+ verifyBuf = jwsbb.SignBuffer(verifyBuf, rawHeaders, msg.payload, vc.encoder, msg.b64)
+ for i, kp := range vc.keyProviders {
+ var sink algKeySink
+ if err := kp.FetchKeys(vc.ctx, &sink, sig, msg); err != nil {
+ return nil, verifyerr(`key provider %d failed: %w`, i, err)
+ }
+
+ for _, pair := range sink.list {
+ // alg is converted here because pair.alg is of type jwa.KeyAlgorithm.
+ // this may seem ugly, but we're trying to avoid declaring separate
+ // structs for `alg jwa.KeyEncryptionAlgorithm` and `alg jwa.SignatureAlgorithm`
+ //nolint:forcetypeassert
+ alg := pair.alg.(jwa.SignatureAlgorithm)
+ key := pair.key
+
+ if err := vc.tryKey(verifyBuf, alg, key, msg, sig); err != nil {
+ errs = append(errs, verifyerr(`failed to verify signature #%d with key %T: %w`, idx+1, key, err))
+ continue
+ }
+
+ return msg.payload, nil
+ }
+ }
+ errs = append(errs, verifyerr(`signature #%d could not be verified with any of the keys`, idx+1))
+ }
+ return nil, verifyerr(`could not verify message using any of the signatures or keys: %w`, errors.Join(errs...))
+}
+
+func (vc *verifyContext) tryKey(verifyBuf []byte, alg jwa.SignatureAlgorithm, key any, msg *Message, sig *Signature) error {
+ if vc.validateKey {
+ if err := validateKeyBeforeUse(key); err != nil {
+ return fmt.Errorf(`failed to validate key before signing: %w`, err)
+ }
+ }
+
+ verifier, err := VerifierFor(alg)
+ if err != nil {
+ return fmt.Errorf(`failed to get verifier for algorithm %q: %w`, alg, err)
+ }
+
+ if err := verifier.Verify(key, verifyBuf, sig.signature); err != nil {
+ return verificationError{err}
+ }
+
+ // Verification succeeded
+ if vc.keyUsed != nil {
+ if err := blackmagic.AssignIfCompatible(vc.keyUsed, key); err != nil {
+ return fmt.Errorf(`failed to assign used key (%T) to %T: %w`, key, vc.keyUsed, err)
+ }
+ }
+
+ if vc.dst != nil {
+ *(vc.dst) = *msg
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwt/BUILD.bazel
new file mode 100644
index 0000000000..86197d348a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/BUILD.bazel
@@ -0,0 +1,72 @@
+load("@rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "jwt",
+ srcs = [
+ "builder_gen.go",
+ "errors.go",
+ "filter.go",
+ "fastpath.go",
+ "http.go",
+ "interface.go",
+ "io.go",
+ "jwt.go",
+ "options.go",
+ "options_gen.go",
+ "serialize.go",
+ "token_gen.go",
+ "token_options.go",
+ "token_options_gen.go",
+ "validate.go",
+ ],
+ importpath = "github.com/lestrrat-go/jwx/v3/jwt",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//:jwx",
+ "//internal/base64",
+ "//transform",
+ "//internal/json",
+ "//internal/tokens",
+ "//internal/pool",
+ "//jwa",
+ "//jwe",
+ "//jwk",
+ "//jws",
+ "//jws/jwsbb",
+ "//jwt/internal/types",
+ "//jwt/internal/errors",
+ "@com_github_lestrrat_go_blackmagic//:blackmagic",
+ "@com_github_lestrrat_go_option_v2//:option",
+ ],
+)
+
+go_test(
+ name = "jwt_test",
+ srcs = [
+ "jwt_test.go",
+ "options_gen_test.go",
+ "token_options_test.go",
+ "token_test.go",
+ "validate_test.go",
+ "verify_test.go",
+ ],
+ embed = [":jwt"],
+ deps = [
+ "//internal/json",
+ "//internal/jwxtest",
+ "//jwa",
+ "//jwe",
+ "//jwk",
+ "//jwk/ecdsa",
+ "//jws",
+ "//jwt/internal/types",
+ "@com_github_lestrrat_go_httprc_v3//:httprc",
+ "@com_github_stretchr_testify//require",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":jwt",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/README.md b/vendor/github.com/lestrrat-go/jwx/v3/jwt/README.md
new file mode 100644
index 0000000000..a6b5664c94
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/README.md
@@ -0,0 +1,224 @@
+# JWT [](https://pkg.go.dev/github.com/lestrrat-go/jwx/v3/jwt)
+
+Package jwt implements JSON Web Tokens as described in [RFC7519](https://tools.ietf.org/html/rfc7519).
+
+* Convenience methods for oft-used keys ("aud", "sub", "iss", etc)
+* Convenience functions to extract/parse from http.Request, http.Header, url.Values
+* Ability to Get/Set arbitrary keys
+* Conversion to and from JSON
+* Generate signed tokens
+* Verify signed tokens
+* Extra support for OpenID tokens via [github.com/lestrrat-go/jwx/v3/jwt/openid](./jwt/openid)
+
+How-to style documentation can be found in the [docs directory](../docs).
+
+More examples are located in the examples directory ([jwt_example_test.go](../examples/jwt_example_test.go))
+
+# SYNOPSIS
+
+## Verify a signed JWT
+
+```go
+ token, err := jwt.Parse(payload, jwt.WithKey(alg, key))
+ if err != nil {
+ fmt.Printf("failed to parse payload: %s\n", err)
+ }
+```
+
+## Token Usage
+
+```go
+func ExampleJWT() {
+ const aLongLongTimeAgo = 233431200
+
+ t := jwt.New()
+ t.Set(jwt.SubjectKey, `https://github.com/lestrrat-go/jwx/v3/jwt`)
+ t.Set(jwt.AudienceKey, `Golang Users`)
+ t.Set(jwt.IssuedAtKey, time.Unix(aLongLongTimeAgo, 0))
+ t.Set(`privateClaimKey`, `Hello, World!`)
+
+ buf, err := json.MarshalIndent(t, "", " ")
+ if err != nil {
+ fmt.Printf("failed to generate JSON: %s\n", err)
+ return
+ }
+
+ fmt.Printf("%s\n", buf)
+ fmt.Printf("aud -> '%s'\n", t.Audience())
+ fmt.Printf("iat -> '%s'\n", t.IssuedAt().Format(time.RFC3339))
+ if v, ok := t.Get(`privateClaimKey`); ok {
+ fmt.Printf("privateClaimKey -> '%s'\n", v)
+ }
+ fmt.Printf("sub -> '%s'\n", t.Subject())
+
+ key, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ log.Printf("failed to generate private key: %s", err)
+ return
+ }
+
+ {
+ // Signing a token (using raw rsa.PrivateKey)
+ signed, err := jwt.Sign(t, jwt.WithKey(jwa.RS256, key))
+ if err != nil {
+ log.Printf("failed to sign token: %s", err)
+ return
+ }
+ _ = signed
+ }
+
+ {
+ // Signing a token (using JWK)
+ jwkKey, err := jwk.New(key)
+ if err != nil {
+ log.Printf("failed to create JWK key: %s", err)
+ return
+ }
+
+ signed, err := jwt.Sign(t, jwt.WithKey(jwa.RS256, jwkKey))
+ if err != nil {
+ log.Printf("failed to sign token: %s", err)
+ return
+ }
+ _ = signed
+ }
+}
+```
+
+## OpenID Claims
+
+`jwt` package can work with token types other than the default one.
+For OpenID claims, use the token created by `openid.New()`, or
+use the `jwt.WithToken(openid.New())`. If you need to use other specialized
+claims, use `jwt.WithToken()` to specify the exact token type
+
+```go
+func Example_openid() {
+ const aLongLongTimeAgo = 233431200
+
+ t := openid.New()
+ t.Set(jwt.SubjectKey, `https://github.com/lestrrat-go/jwx/v3/jwt`)
+ t.Set(jwt.AudienceKey, `Golang Users`)
+ t.Set(jwt.IssuedAtKey, time.Unix(aLongLongTimeAgo, 0))
+ t.Set(`privateClaimKey`, `Hello, World!`)
+
+ addr := openid.NewAddress()
+ addr.Set(openid.AddressPostalCodeKey, `105-0011`)
+ addr.Set(openid.AddressCountryKey, `日本`)
+ addr.Set(openid.AddressRegionKey, `東京都`)
+ addr.Set(openid.AddressLocalityKey, `港区`)
+ addr.Set(openid.AddressStreetAddressKey, `芝公園 4-2-8`)
+ t.Set(openid.AddressKey, addr)
+
+ buf, err := json.MarshalIndent(t, "", " ")
+ if err != nil {
+ fmt.Printf("failed to generate JSON: %s\n", err)
+ return
+ }
+ fmt.Printf("%s\n", buf)
+
+ t2, err := jwt.Parse(buf, jwt.WithToken(openid.New()))
+ if err != nil {
+ fmt.Printf("failed to parse JSON: %s\n", err)
+ return
+ }
+ if _, ok := t2.(openid.Token); !ok {
+ fmt.Printf("using jwt.WithToken(openid.New()) creates an openid.Token instance")
+ return
+ }
+}
+```
+
+# FAQ
+
+## Why is `jwt.Token` an interface?
+
+In this package, `jwt.Token` is an interface. This is not an arbitrary choice: there are actual reason for the type being an interface.
+
+We understand that if you are migrating from another library this may be a deal breaker, but we hope you can at least appreciate the fact that this was not done arbitrarily, and that there were real technical trade offs that were evaluated.
+
+### No uninitialized tokens
+
+First and foremost, by making it an interface, you cannot use an uninitialized token:
+
+```go
+var token1 jwt.Token // this is nil, you can't just start using this
+if err := json.Unmarshal(data, &token1); err != nil { // so you can't do this
+ ...
+}
+
+// But you _can_ do this, and we _want_ you to do this so the object is properly initialized
+token2 = jwt.New()
+if err := json.Unmarshal(data, &token2); err != nil { // actually, in practice you should use jwt.Parse()
+ ....
+}
+```
+
+### But why does it need to be initialized?
+
+There are several reasons, but one of the reasons is that I'm using a sync.Mutex to avoid races. We want this to be properly initialized.
+
+The other reason is that we support custom claims out of the box. The `map[string]interface{}` container is initialized during new. This is important when checking for equality using reflect-y methods (akin to `reflect.DeepEqual`), because if you allowed zero values, you could end up with "empty" tokens, that actually differ. Consider the following:
+
+```go
+// assume jwt.Token was s struct, not an interface
+token1 := jwt.Token{ privateClaims: make(map[string]interface{}) }
+token2 := jwt.Token{ privateClaims: nil }
+```
+
+These are semantically equivalent, but users would need to be aware of this difference when comparing values. By forcing the user to use a constructor, we can force a uniform empty state.
+
+### Standard way to store values
+
+Unlike some other libraries, this library allows you to store standard claims and non-standard claims in the same token.
+
+You _want_ to store standard claims in a properly typed field, which we do for fields like "iss", "nbf", etc.
+But for non-standard claims, there is just no way of doing this, so we _have_ to use a container like `map[string]interface{}`
+
+This means that if you allow direct access to these fields via a struct, you will have two different ways to access the claims, which is confusing:
+
+```go
+tok.Issuer = ...
+tok.PrivateClaims["foo"] = ...
+```
+
+So we want to hide where this data is stored, and use a standard method like `Set()` and `Get()` to store all the values.
+At this point you are effectively going to hide the implementation detail from the user, so you end up with a struct like below, which is fundamentally not so different from providing just an interface{}:
+
+```go
+type Token struct {
+ // unexported fields
+}
+
+func (tok *Token) Set(...) { ... }
+```
+
+### Use of pointers to store values
+
+We wanted to differentiate the state between a claim being uninitialized, and a claim being initialized to empty.
+
+So we use pointers to store values:
+
+```go
+type stdToken struct {
+ ....
+ issuer *string // if nil, uninitialized. if &(""), initialized to empty
+}
+```
+
+This is fine for us, but we doubt that this would be something users would want to do.
+This is a subtle difference, but cluttering up the API with slight variations of the same type (i.e. pointers vs non-pointers) seemed like a bad idea to us.
+
+```go
+token.Issuer = &issuer // want to avoid this
+
+token.Set(jwt.IssuerKey, "foobar") // so this is what we picked
+```
+
+This way users no longer need to care how the data is internally stored.
+
+### Allow more than one type of token through the same interface
+
+`dgrijalva/jwt-go` does this in a different way, but we felt that it would be more intuitive for all tokens to follow a single interface so there is fewer type conversions required.
+
+See the `openid` token for an example.
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/builder_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/builder_gen.go
new file mode 100644
index 0000000000..48d0375a28
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/builder_gen.go
@@ -0,0 +1,88 @@
+// Code generated by tools/cmd/genjwt/main.go. DO NOT EDIT.
+
+package jwt
+
+import (
+ "fmt"
+ "sync"
+ "time"
+)
+
+// Builder is a convenience wrapper around the New() constructor
+// and the Set() methods to assign values to Token claims.
+// Users can successively call Claim() on the Builder, and have it
+// construct the Token when Build() is called. This alleviates the
+// need for the user to check for the return value of every single
+// Set() method call.
+// Note that each call to Claim() overwrites the value set from the
+// previous call.
+type Builder struct {
+ mu sync.Mutex
+ claims map[string]any
+}
+
+func NewBuilder() *Builder {
+ return &Builder{}
+}
+
+func (b *Builder) init() {
+ if b.claims == nil {
+ b.claims = make(map[string]any)
+ }
+}
+
+func (b *Builder) Claim(name string, value any) *Builder {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.init()
+ b.claims[name] = value
+ return b
+}
+
+func (b *Builder) Audience(v []string) *Builder {
+ return b.Claim(AudienceKey, v)
+}
+
+func (b *Builder) Expiration(v time.Time) *Builder {
+ return b.Claim(ExpirationKey, v)
+}
+
+func (b *Builder) IssuedAt(v time.Time) *Builder {
+ return b.Claim(IssuedAtKey, v)
+}
+
+func (b *Builder) Issuer(v string) *Builder {
+ return b.Claim(IssuerKey, v)
+}
+
+func (b *Builder) JwtID(v string) *Builder {
+ return b.Claim(JwtIDKey, v)
+}
+
+func (b *Builder) NotBefore(v time.Time) *Builder {
+ return b.Claim(NotBeforeKey, v)
+}
+
+func (b *Builder) Subject(v string) *Builder {
+ return b.Claim(SubjectKey, v)
+}
+
+// Build creates a new token based on the claims that the builder has received
+// so far. If a claim cannot be set, then the method returns a nil Token with
+// a en error as a second return value
+//
+// Once `Build()` is called, all claims are cleared from the Builder, and the
+// Builder can be reused to build another token
+func (b *Builder) Build() (Token, error) {
+ b.mu.Lock()
+ claims := b.claims
+ b.claims = nil
+ b.mu.Unlock()
+ tok := New()
+ for k, v := range claims {
+ if err := tok.Set(k, v); err != nil {
+ return nil, fmt.Errorf(`failed to set claim %q: %w`, k, err)
+ }
+ }
+ return tok, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/errors.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/errors.go
new file mode 100644
index 0000000000..c5b529c1ae
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/errors.go
@@ -0,0 +1,95 @@
+package jwt
+
+import (
+ jwterrs "github.com/lestrrat-go/jwx/v3/jwt/internal/errors"
+)
+
+// ClaimNotFoundError returns the opaque error value that is returned when
+// `jwt.Get` fails to find the requested claim.
+//
+// This value should only be used for comparison using `errors.Is()`.
+func ClaimNotFoundError() error {
+ return jwterrs.ErrClaimNotFound
+}
+
+// ClaimAssignmentFailedError returns the opaque error value that is returned
+// when `jwt.Get` fails to assign the value to the destination. For example,
+// this can happen when the value is a string, but you passed a &int as the
+// destination.
+//
+// This value should only be used for comparison using `errors.Is()`.
+func ClaimAssignmentFailedError() error {
+ return jwterrs.ErrClaimAssignmentFailed
+}
+
+// UnknownPayloadTypeError returns the opaque error value that is returned when
+// `jwt.Parse` fails due to not being able to deduce the format of
+// the incoming buffer.
+//
+// This value should only be used for comparison using `errors.Is()`.
+func UnknownPayloadTypeError() error {
+ return jwterrs.ErrUnknownPayloadType
+}
+
+// ParseError returns the opaque error that is returned from jwt.Parse when
+// the input is not a valid JWT.
+//
+// This value should only be used for comparison using `errors.Is()`.
+func ParseError() error {
+ return jwterrs.ErrParse
+}
+
+// ValidateError returns the immutable error used for validation errors
+//
+// This value should only be used for comparison using `errors.Is()`.
+func ValidateError() error {
+ return jwterrs.ErrValidateDefault
+}
+
+// InvalidIssuerError returns the immutable error used when `iss` claim
+// is not satisfied
+//
+// This value should only be used for comparison using `errors.Is()`.
+func InvalidIssuerError() error {
+ return jwterrs.ErrInvalidIssuerDefault
+}
+
+// TokenExpiredError returns the immutable error used when `exp` claim
+// is not satisfied.
+//
+// This value should only be used for comparison using `errors.Is()`.
+func TokenExpiredError() error {
+ return jwterrs.ErrTokenExpiredDefault
+}
+
+// InvalidIssuedAtError returns the immutable error used when `iat` claim
+// is not satisfied
+//
+// This value should only be used for comparison using `errors.Is()`.
+func InvalidIssuedAtError() error {
+ return jwterrs.ErrInvalidIssuedAtDefault
+}
+
+// TokenNotYetValidError returns the immutable error used when `nbf` claim
+// is not satisfied
+//
+// This value should only be used for comparison using `errors.Is()`.
+func TokenNotYetValidError() error {
+ return jwterrs.ErrTokenNotYetValidDefault
+}
+
+// InvalidAudienceError returns the immutable error used when `aud` claim
+// is not satisfied
+//
+// This value should only be used for comparison using `errors.Is()`.
+func InvalidAudienceError() error {
+ return jwterrs.ErrInvalidAudienceDefault
+}
+
+// MissingRequiredClaimError returns the immutable error used when the claim
+// specified by `jwt.IsRequired()` is not present.
+//
+// This value should only be used for comparison using `errors.Is()`.
+func MissingRequiredClaimError() error {
+ return jwterrs.ErrMissingRequiredClaimDefault
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/fastpath.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/fastpath.go
new file mode 100644
index 0000000000..43f7c966da
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/fastpath.go
@@ -0,0 +1,71 @@
+package jwt
+
+import (
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/internal/base64"
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jwk"
+ "github.com/lestrrat-go/jwx/v3/jws"
+ "github.com/lestrrat-go/jwx/v3/jws/jwsbb"
+)
+
+// signFast reinvents the wheel a bit to avoid the overhead of
+// going through the entire jws.Sign() machinery.
+func signFast(t Token, alg jwa.SignatureAlgorithm, key any) ([]byte, error) {
+ algstr := alg.String()
+
+ var kid string
+ if jwkKey, ok := key.(jwk.Key); ok {
+ if v, ok := jwkKey.KeyID(); ok && v != "" {
+ kid = v
+ }
+ }
+
+ // Setup headers
+ // {"alg":"","typ":"JWT"}
+ // 1234567890123456789012
+ want := len(algstr) + 22
+ // also, if kid != "", we need to add "kid":"$kid"
+ if kid != "" {
+ // "kid":""
+ // 12345689
+ want += len(kid) + 9
+ }
+ hdr := pool.ByteSlice().GetCapacity(want)
+ hdr = append(hdr, '{', '"', 'a', 'l', 'g', '"', ':', '"')
+ hdr = append(hdr, algstr...)
+ hdr = append(hdr, '"')
+ if kid != "" {
+ hdr = append(hdr, ',', '"', 'k', 'i', 'd', '"', ':', '"')
+ hdr = append(hdr, kid...)
+ hdr = append(hdr, '"')
+ }
+ hdr = append(hdr, ',', '"', 't', 'y', 'p', '"', ':', '"', 'J', 'W', 'T', '"', '}')
+ defer pool.ByteSlice().Put(hdr)
+
+ // setup the buffer to sign with
+ payload, err := json.Marshal(t)
+ if err != nil {
+ return nil, fmt.Errorf(`jwt.signFast: failed to marshal token payload: %w`, err)
+ }
+
+ combined := jwsbb.SignBuffer(nil, hdr, payload, base64.DefaultEncoder(), true)
+ signer, err := jws.SignerFor(alg)
+ if err != nil {
+ return nil, fmt.Errorf(`jwt.signFast: failed to get signer for %s: %w`, alg, err)
+ }
+
+ signature, err := signer.Sign(key, combined)
+ if err != nil {
+ return nil, fmt.Errorf(`jwt.signFast: failed to sign payload with %s: %w`, alg, err)
+ }
+
+ serialized, err := jwsbb.JoinCompact(nil, hdr, payload, signature, base64.DefaultEncoder(), true)
+ if err != nil {
+ return nil, fmt.Errorf("jwt.signFast: failed to join compact: %w", err)
+ }
+ return serialized, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/filter.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/filter.go
new file mode 100644
index 0000000000..31471dbdfb
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/filter.go
@@ -0,0 +1,34 @@
+package jwt
+
+import (
+ "github.com/lestrrat-go/jwx/v3/transform"
+)
+
+// TokenFilter is an interface that allows users to filter JWT claims.
+// It provides two methods: Filter and Reject; Filter returns a new token with only
+// the claims that match the filter criteria, while Reject returns a new token with
+// only the claims that DO NOT match the filter.
+//
+// EXPERIMENTAL: This API is experimental and its interface and behavior is
+// subject to change in future releases. This API is not subject to semver
+// compatibility guarantees.
+type TokenFilter interface {
+ Filter(token Token) (Token, error)
+ Reject(token Token) (Token, error)
+}
+
+// StandardClaimsFilter returns a TokenFilter that filters out standard JWT claims.
+//
+// You can use this filter to create tokens that either only has standard claims
+// or only custom claims. If you need to configure the filter more precisely, consider
+// using the ClaimNameFilter directly.
+func StandardClaimsFilter() TokenFilter {
+ return stdClaimsFilter
+}
+
+var stdClaimsFilter = NewClaimNameFilter(stdClaimNames...)
+
+// NewClaimNameFilter creates a new ClaimNameFilter with the specified claim names.
+func NewClaimNameFilter(names ...string) TokenFilter {
+ return transform.NewNameBasedFilter[Token](names...)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/http.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/http.go
new file mode 100644
index 0000000000..691c5a0df4
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/http.go
@@ -0,0 +1,295 @@
+package jwt
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+)
+
+// ParseCookie parses a JWT stored in a http.Cookie with the given name.
+// If the specified cookie is not found, http.ErrNoCookie is returned.
+func ParseCookie(req *http.Request, name string, options ...ParseOption) (Token, error) {
+ var dst **http.Cookie
+ for _, option := range options {
+ switch option.Ident() {
+ case identCookie{}:
+ if err := option.Value(&dst); err != nil {
+ return nil, fmt.Errorf(`jws.ParseCookie: value to option WithCookie must be **http.Cookie: %w`, err)
+ }
+ }
+ }
+
+ cookie, err := req.Cookie(name)
+ if err != nil {
+ return nil, err
+ }
+ tok, err := ParseString(cookie.Value, options...)
+ if err != nil {
+ return nil, fmt.Errorf(`jws.ParseCookie: failed to parse token stored in cookie: %w`, err)
+ }
+
+ if dst != nil {
+ *dst = cookie
+ }
+ return tok, nil
+}
+
+// ParseHeader parses a JWT stored in a http.Header.
+//
+// For the header "Authorization", it will strip the prefix "Bearer " and will
+// treat the remaining value as a JWT.
+func ParseHeader(hdr http.Header, name string, options ...ParseOption) (Token, error) {
+ key := http.CanonicalHeaderKey(name)
+ v := strings.TrimSpace(hdr.Get(key))
+ if v == "" {
+ return nil, fmt.Errorf(`empty header (%s)`, key)
+ }
+
+ if key == "Authorization" {
+ // Authorization header is an exception. We strip the "Bearer " from
+ // the prefix
+ v = strings.TrimSpace(strings.TrimPrefix(v, "Bearer"))
+ }
+
+ tok, err := ParseString(v, options...)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to parse token stored in header (%s): %w`, key, err)
+ }
+ return tok, nil
+}
+
+// ParseForm parses a JWT stored in a url.Value.
+func ParseForm(values url.Values, name string, options ...ParseOption) (Token, error) {
+ v := strings.TrimSpace(values.Get(name))
+ if v == "" {
+ return nil, fmt.Errorf(`empty value (%s)`, name)
+ }
+
+ return ParseString(v, options...)
+}
+
+// ParseRequest searches a http.Request object for a JWT token.
+//
+// Specifying WithHeaderKey() will tell it to search under a specific
+// header key. Specifying WithFormKey() will tell it to search under
+// a specific form field.
+//
+// If none of jwt.WithHeaderKey()/jwt.WithCookieKey()/jwt.WithFormKey() is
+// used, "Authorization" header will be searched. If any of these options
+// are specified, you must explicitly re-enable searching for "Authorization" header
+// if you also want to search for it.
+//
+// # searches for "Authorization"
+// jwt.ParseRequest(req)
+//
+// # searches for "x-my-token" ONLY.
+// jwt.ParseRequest(req, jwt.WithHeaderKey("x-my-token"))
+//
+// # searches for "Authorization" AND "x-my-token"
+// jwt.ParseRequest(req, jwt.WithHeaderKey("Authorization"), jwt.WithHeaderKey("x-my-token"))
+//
+// Cookies are searched using (http.Request).Cookie(). If you have multiple
+// cookies with the same name, and you want to search for a specific one that
+// (http.Request).Cookie() would not return, you will need to implement your
+// own logic to extract the cookie and use jwt.ParseString().
+func ParseRequest(req *http.Request, options ...ParseOption) (Token, error) {
+ var hdrkeys []string
+ var formkeys []string
+ var cookiekeys []string
+ var parseOptions []ParseOption
+ for _, option := range options {
+ switch option.Ident() {
+ case identHeaderKey{}:
+ var v string
+ if err := option.Value(&v); err != nil {
+ return nil, fmt.Errorf(`jws.ParseRequest: value to option WithHeaderKey must be string: %w`, err)
+ }
+ hdrkeys = append(hdrkeys, v)
+ case identFormKey{}:
+ var v string
+ if err := option.Value(&v); err != nil {
+ return nil, fmt.Errorf(`jws.ParseRequest: value to option WithFormKey must be string: %w`, err)
+ }
+ formkeys = append(formkeys, v)
+ case identCookieKey{}:
+ var v string
+ if err := option.Value(&v); err != nil {
+ return nil, fmt.Errorf(`jws.ParseRequest: value to option WithCookieKey must be string: %w`, err)
+ }
+ cookiekeys = append(cookiekeys, v)
+ default:
+ parseOptions = append(parseOptions, option)
+ }
+ }
+
+ if len(hdrkeys) == 0 && len(formkeys) == 0 && len(cookiekeys) == 0 {
+ hdrkeys = append(hdrkeys, "Authorization")
+ }
+
+ mhdrs := pool.KeyToErrorMap().Get()
+ defer pool.KeyToErrorMap().Put(mhdrs)
+ mfrms := pool.KeyToErrorMap().Get()
+ defer pool.KeyToErrorMap().Put(mfrms)
+ mcookies := pool.KeyToErrorMap().Get()
+ defer pool.KeyToErrorMap().Put(mcookies)
+
+ for _, hdrkey := range hdrkeys {
+ // Check presence via a direct map lookup
+ if _, ok := req.Header[http.CanonicalHeaderKey(hdrkey)]; !ok {
+ // if non-existent, not error
+ continue
+ }
+
+ tok, err := ParseHeader(req.Header, hdrkey, parseOptions...)
+ if err != nil {
+ mhdrs[hdrkey] = err
+ continue
+ }
+ return tok, nil
+ }
+
+ for _, name := range cookiekeys {
+ tok, err := ParseCookie(req, name, parseOptions...)
+ if err != nil {
+ if err == http.ErrNoCookie {
+ // not fatal
+ mcookies[name] = err
+ }
+ continue
+ }
+ return tok, nil
+ }
+
+ if cl := req.ContentLength; cl > 0 {
+ if err := req.ParseForm(); err != nil {
+ return nil, fmt.Errorf(`failed to parse form: %w`, err)
+ }
+ }
+
+ for _, formkey := range formkeys {
+ // Check presence via a direct map lookup
+ if _, ok := req.Form[formkey]; !ok {
+ // if non-existent, not error
+ continue
+ }
+
+ tok, err := ParseForm(req.Form, formkey, parseOptions...)
+ if err != nil {
+ mfrms[formkey] = err
+ continue
+ }
+ return tok, nil
+ }
+
+ // Everything below is a prelude to error reporting.
+ var triedHdrs strings.Builder
+ for i, hdrkey := range hdrkeys {
+ if i > 0 {
+ triedHdrs.WriteString(", ")
+ }
+ triedHdrs.WriteString(strconv.Quote(hdrkey))
+ }
+
+ var triedForms strings.Builder
+ for i, formkey := range formkeys {
+ if i > 0 {
+ triedForms.WriteString(", ")
+ }
+ triedForms.WriteString(strconv.Quote(formkey))
+ }
+
+ var triedCookies strings.Builder
+ for i, cookiekey := range cookiekeys {
+ if i > 0 {
+ triedCookies.WriteString(", ")
+ }
+ triedCookies.WriteString(strconv.Quote(cookiekey))
+ }
+
+ var b strings.Builder
+ b.WriteString(`failed to find a valid token in any location of the request (tried: `)
+ olen := b.Len()
+ if triedHdrs.Len() > 0 {
+ b.WriteString(`header keys: [`)
+ b.WriteString(triedHdrs.String())
+ b.WriteByte(tokens.CloseSquareBracket)
+ }
+ if triedForms.Len() > 0 {
+ if b.Len() > olen {
+ b.WriteString(", ")
+ }
+ b.WriteString("form keys: [")
+ b.WriteString(triedForms.String())
+ b.WriteByte(tokens.CloseSquareBracket)
+ }
+
+ if triedCookies.Len() > 0 {
+ if b.Len() > olen {
+ b.WriteString(", ")
+ }
+ b.WriteString("cookie keys: [")
+ b.WriteString(triedCookies.String())
+ b.WriteByte(tokens.CloseSquareBracket)
+ }
+ b.WriteByte(')')
+
+ lmhdrs := len(mhdrs)
+ lmfrms := len(mfrms)
+ lmcookies := len(mcookies)
+ var errors []any
+ if lmhdrs > 0 || lmfrms > 0 || lmcookies > 0 {
+ b.WriteString(". Additionally, errors were encountered during attempts to verify using:")
+
+ if lmhdrs > 0 {
+ b.WriteString(" headers: (")
+ count := 0
+ for hdrkey, err := range mhdrs {
+ if count > 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString("[header key: ")
+ b.WriteString(strconv.Quote(hdrkey))
+ b.WriteString(", error: %w]")
+ errors = append(errors, err)
+ count++
+ }
+ b.WriteString(")")
+ }
+
+ if lmcookies > 0 {
+ count := 0
+ b.WriteString(" cookies: (")
+ for cookiekey, err := range mcookies {
+ if count > 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString("[cookie key: ")
+ b.WriteString(strconv.Quote(cookiekey))
+ b.WriteString(", error: %w]")
+ errors = append(errors, err)
+ count++
+ }
+ }
+
+ if lmfrms > 0 {
+ count := 0
+ b.WriteString(" forms: (")
+ for formkey, err := range mfrms {
+ if count > 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString("[form key: ")
+ b.WriteString(strconv.Quote(formkey))
+ b.WriteString(", error: %w]")
+ errors = append(errors, err)
+ count++
+ }
+ }
+ }
+ return nil, fmt.Errorf(b.String(), errors...)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/interface.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/interface.go
new file mode 100644
index 0000000000..f9a9d971ef
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/interface.go
@@ -0,0 +1,8 @@
+package jwt
+
+import (
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+)
+
+type DecodeCtx = json.DecodeCtx
+type TokenWithDecodeCtx = json.DecodeCtxContainer
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/errors/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/errors/BUILD.bazel
new file mode 100644
index 0000000000..a053e8c0aa
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/errors/BUILD.bazel
@@ -0,0 +1,16 @@
+load("@rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "errors",
+ srcs = [
+ "errors.go",
+ ],
+ importpath = "github.com/lestrrat-go/jwx/v3/jwt/internal/errors",
+ visibility = ["//jwt:__subpackages__"],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":errors",
+ visibility = ["//jwt:__subpackages__"],
+)
\ No newline at end of file
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/errors/errors.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/errors/errors.go
new file mode 100644
index 0000000000..a1dca0d5a3
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/errors/errors.go
@@ -0,0 +1,183 @@
+// Package errors exist to store errors for jwt and openid packages.
+//
+// It's internal because we don't want to expose _anything_ about these errors
+// so users absolutely cannot do anything other than use them as opaque errors.
+package errors
+
+import (
+ "errors"
+ "fmt"
+)
+
+var (
+ ErrClaimNotFound = ClaimNotFoundError{}
+ ErrClaimAssignmentFailed = ClaimAssignmentFailedError{Err: errors.New(`claim assignment failed`)}
+ ErrUnknownPayloadType = errors.New(`unknown payload type (payload is not JWT?)`)
+ ErrParse = ParseError{error: errors.New(`jwt.Parse: unknown error`)}
+ ErrValidateDefault = ValidationError{errors.New(`unknown error`)}
+ ErrInvalidIssuerDefault = InvalidIssuerError{errors.New(`"iss" not satisfied`)}
+ ErrTokenExpiredDefault = TokenExpiredError{errors.New(`"exp" not satisfied: token is expired`)}
+ ErrInvalidIssuedAtDefault = InvalidIssuedAtError{errors.New(`"iat" not satisfied`)}
+ ErrTokenNotYetValidDefault = TokenNotYetValidError{errors.New(`"nbf" not satisfied: token is not yet valid`)}
+ ErrInvalidAudienceDefault = InvalidAudienceError{errors.New(`"aud" not satisfied`)}
+ ErrMissingRequiredClaimDefault = &MissingRequiredClaimError{error: errors.New(`required claim is missing`)}
+)
+
+type ClaimNotFoundError struct {
+ Name string
+}
+
+func (e ClaimNotFoundError) Error() string {
+ // This error message uses "field" instead of "claim" for backwards compatibility,
+ // but it shuold really be "claim" since it refers to a JWT claim.
+ return fmt.Sprintf(`field "%s" not found`, e.Name)
+}
+
+func (e ClaimNotFoundError) Is(target error) bool {
+ _, ok := target.(ClaimNotFoundError)
+ return ok
+}
+
+type ClaimAssignmentFailedError struct {
+ Err error
+}
+
+func (e ClaimAssignmentFailedError) Error() string {
+ // This error message probably should be tweaked, but it is this way
+ // for backwards compatibility.
+ return fmt.Sprintf(`failed to assign value to dst: %s`, e.Err.Error())
+}
+
+func (e ClaimAssignmentFailedError) Unwrap() error {
+ return e.Err
+}
+
+func (e ClaimAssignmentFailedError) Is(target error) bool {
+ _, ok := target.(ClaimAssignmentFailedError)
+ return ok
+}
+
+type ParseError struct {
+ error
+}
+
+func (e ParseError) Unwrap() error {
+ return e.error
+}
+
+func (ParseError) Is(err error) bool {
+ _, ok := err.(ParseError)
+ return ok
+}
+
+func ParseErrorf(prefix, f string, args ...any) error {
+ return ParseError{fmt.Errorf(prefix+": "+f, args...)}
+}
+
+type ValidationError struct {
+ error
+}
+
+func (ValidationError) Is(err error) bool {
+ _, ok := err.(ValidationError)
+ return ok
+}
+
+func (err ValidationError) Unwrap() error {
+ return err.error
+}
+
+func ValidateErrorf(f string, args ...any) error {
+ return ValidationError{fmt.Errorf(`jwt.Validate: `+f, args...)}
+}
+
+type InvalidIssuerError struct {
+ error
+}
+
+func (err InvalidIssuerError) Is(target error) bool {
+ _, ok := target.(InvalidIssuerError)
+ return ok
+}
+
+func (err InvalidIssuerError) Unwrap() error {
+ return err.error
+}
+
+func IssuerErrorf(f string, args ...any) error {
+ return InvalidIssuerError{fmt.Errorf(`"iss" not satisfied: `+f, args...)}
+}
+
+type TokenExpiredError struct {
+ error
+}
+
+func (err TokenExpiredError) Is(target error) bool {
+ _, ok := target.(TokenExpiredError)
+ return ok
+}
+
+func (err TokenExpiredError) Unwrap() error {
+ return err.error
+}
+
+type InvalidIssuedAtError struct {
+ error
+}
+
+func (err InvalidIssuedAtError) Is(target error) bool {
+ _, ok := target.(InvalidIssuedAtError)
+ return ok
+}
+
+func (err InvalidIssuedAtError) Unwrap() error {
+ return err.error
+}
+
+type TokenNotYetValidError struct {
+ error
+}
+
+func (err TokenNotYetValidError) Is(target error) bool {
+ _, ok := target.(TokenNotYetValidError)
+ return ok
+}
+
+func (err TokenNotYetValidError) Unwrap() error {
+ return err.error
+}
+
+type InvalidAudienceError struct {
+ error
+}
+
+func (err InvalidAudienceError) Is(target error) bool {
+ _, ok := target.(InvalidAudienceError)
+ return ok
+}
+
+func (err InvalidAudienceError) Unwrap() error {
+ return err.error
+}
+
+func AudienceErrorf(f string, args ...any) error {
+ return InvalidAudienceError{fmt.Errorf(`"aud" not satisfied: `+f, args...)}
+}
+
+type MissingRequiredClaimError struct {
+ error
+
+ claim string
+}
+
+func (err *MissingRequiredClaimError) Is(target error) bool {
+ err1, ok := target.(*MissingRequiredClaimError)
+ if !ok {
+ return false
+ }
+ return err1 == ErrMissingRequiredClaimDefault || err1.claim == err.claim
+}
+
+func MissingRequiredClaimErrorf(name string) error {
+ return &MissingRequiredClaimError{claim: name, error: fmt.Errorf(`required claim "%s" is missing`, name)}
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/BUILD.bazel
new file mode 100644
index 0000000000..1d046a3ecc
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/BUILD.bazel
@@ -0,0 +1,35 @@
+load("@rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "types",
+ srcs = [
+ "date.go",
+ "string.go",
+ ],
+ importpath = "github.com/lestrrat-go/jwx/v3/jwt/internal/types",
+ visibility = ["//jwt:__subpackages__"],
+ deps = [
+ "//internal/json",
+ "//internal/tokens",
+ ],
+)
+
+go_test(
+ name = "types_test",
+ srcs = [
+ "date_test.go",
+ "string_test.go",
+ ],
+ deps = [
+ ":types",
+ "//internal/json",
+ "//jwt",
+ "@com_github_stretchr_testify//require",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":types",
+ visibility = ["//jwt:__subpackages__"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/date.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/date.go
new file mode 100644
index 0000000000..3d40a9ed97
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/date.go
@@ -0,0 +1,192 @@
+package types
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+)
+
+const (
+ DefaultPrecision uint32 = 0 // second level
+ MaxPrecision uint32 = 9 // nanosecond level
+)
+
+var Pedantic uint32
+var ParsePrecision = DefaultPrecision
+var FormatPrecision = DefaultPrecision
+
+// NumericDate represents the date format used in the 'nbf' claim
+type NumericDate struct {
+ time.Time
+}
+
+func (n *NumericDate) Get() time.Time {
+ if n == nil {
+ return (time.Time{}).UTC()
+ }
+ return n.Time
+}
+
+func intToTime(v any, t *time.Time) bool {
+ var n int64
+ switch x := v.(type) {
+ case int64:
+ n = x
+ case int32:
+ n = int64(x)
+ case int16:
+ n = int64(x)
+ case int8:
+ n = int64(x)
+ case int:
+ n = int64(x)
+ default:
+ return false
+ }
+
+ *t = time.Unix(n, 0)
+ return true
+}
+
+func parseNumericString(x string) (time.Time, error) {
+ var t time.Time // empty time for empty return value
+
+ // Only check for the escape hatch if it's the pedantic
+ // flag is off
+ if Pedantic != 1 {
+ // This is an escape hatch for non-conformant providers
+ // that gives us RFC3339 instead of epoch time
+ for _, r := range x {
+ // 0x30 = '0', 0x39 = '9', 0x2E = tokens.Period
+ if (r >= 0x30 && r <= 0x39) || r == 0x2E {
+ continue
+ }
+
+ // if it got here, then it probably isn't epoch time
+ tv, err := time.Parse(time.RFC3339, x)
+ if err != nil {
+ return t, fmt.Errorf(`value is not number of seconds since the epoch, and attempt to parse it as RFC3339 timestamp failed: %w`, err)
+ }
+ return tv, nil
+ }
+ }
+
+ var fractional string
+ whole := x
+ if i := strings.IndexRune(x, tokens.Period); i > 0 {
+ if ParsePrecision > 0 && len(x) > i+1 {
+ fractional = x[i+1:] // everything after the tokens.Period
+ if int(ParsePrecision) < len(fractional) {
+ // Remove insignificant digits
+ fractional = fractional[:int(ParsePrecision)]
+ }
+ // Replace missing fractional diits with zeros
+ for len(fractional) < int(MaxPrecision) {
+ fractional = fractional + "0"
+ }
+ }
+ whole = x[:i]
+ }
+ n, err := strconv.ParseInt(whole, 10, 64)
+ if err != nil {
+ return t, fmt.Errorf(`failed to parse whole value %q: %w`, whole, err)
+ }
+ var nsecs int64
+ if fractional != "" {
+ v, err := strconv.ParseInt(fractional, 10, 64)
+ if err != nil {
+ return t, fmt.Errorf(`failed to parse fractional value %q: %w`, fractional, err)
+ }
+ nsecs = v
+ }
+
+ return time.Unix(n, nsecs).UTC(), nil
+}
+
+func (n *NumericDate) Accept(v any) error {
+ var t time.Time
+ switch x := v.(type) {
+ case float32:
+ tv, err := parseNumericString(fmt.Sprintf(`%.9f`, x))
+ if err != nil {
+ return fmt.Errorf(`failed to accept float32 %.9f: %w`, x, err)
+ }
+ t = tv
+ case float64:
+ tv, err := parseNumericString(fmt.Sprintf(`%.9f`, x))
+ if err != nil {
+ return fmt.Errorf(`failed to accept float32 %.9f: %w`, x, err)
+ }
+ t = tv
+ case json.Number:
+ tv, err := parseNumericString(x.String())
+ if err != nil {
+ return fmt.Errorf(`failed to accept json.Number %q: %w`, x.String(), err)
+ }
+ t = tv
+ case string:
+ tv, err := parseNumericString(x)
+ if err != nil {
+ return fmt.Errorf(`failed to accept string %q: %w`, x, err)
+ }
+ t = tv
+ case time.Time:
+ t = x
+ default:
+ if !intToTime(v, &t) {
+ return fmt.Errorf(`invalid type %T`, v)
+ }
+ }
+ n.Time = t.UTC()
+ return nil
+}
+
+func (n NumericDate) String() string {
+ if FormatPrecision == 0 {
+ return strconv.FormatInt(n.Unix(), 10)
+ }
+
+ // This is cheating, but it's better (easier) than doing floating point math
+ // We basically munge with strings after formatting an integer value
+ // for nanoseconds since epoch
+ s := strconv.FormatInt(n.UnixNano(), 10)
+ for len(s) < int(MaxPrecision) {
+ s = "0" + s
+ }
+
+ slwhole := len(s) - int(MaxPrecision)
+ s = s[:slwhole] + "." + s[slwhole:slwhole+int(FormatPrecision)]
+ if s[0] == tokens.Period {
+ s = "0" + s
+ }
+
+ return s
+}
+
+// MarshalJSON translates from internal representation to JSON NumericDate
+// See https://tools.ietf.org/html/rfc7519#page-6
+func (n *NumericDate) MarshalJSON() ([]byte, error) {
+ if n.IsZero() {
+ return json.Marshal(nil)
+ }
+
+ return json.Marshal(n.String())
+}
+
+func (n *NumericDate) UnmarshalJSON(data []byte) error {
+ var v any
+ if err := json.Unmarshal(data, &v); err != nil {
+ return fmt.Errorf(`failed to unmarshal date: %w`, err)
+ }
+
+ var n2 NumericDate
+ if err := n2.Accept(v); err != nil {
+ return fmt.Errorf(`invalid value for NumericDate: %w`, err)
+ }
+ *n = n2
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/string.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/string.go
new file mode 100644
index 0000000000..8117bea358
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/string.go
@@ -0,0 +1,43 @@
+package types
+
+import (
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+)
+
+type StringList []string
+
+func (l StringList) Get() []string {
+ return []string(l)
+}
+
+func (l *StringList) Accept(v any) error {
+ switch x := v.(type) {
+ case string:
+ *l = StringList([]string{x})
+ case []string:
+ *l = StringList(x)
+ case []any:
+ list := make(StringList, len(x))
+ for i, e := range x {
+ if s, ok := e.(string); ok {
+ list[i] = s
+ continue
+ }
+ return fmt.Errorf(`invalid list element type %T`, e)
+ }
+ *l = list
+ default:
+ return fmt.Errorf(`invalid type: %T`, v)
+ }
+ return nil
+}
+
+func (l *StringList) UnmarshalJSON(data []byte) error {
+ var v any
+ if err := json.Unmarshal(data, &v); err != nil {
+ return fmt.Errorf(`failed to unmarshal data: %w`, err)
+ }
+ return l.Accept(v)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/io.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/io.go
new file mode 100644
index 0000000000..812cda775e
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/io.go
@@ -0,0 +1,42 @@
+// Code generated by tools/cmd/genreadfile/main.go. DO NOT EDIT.
+
+package jwt
+
+import (
+ "fmt"
+ "io/fs"
+ "os"
+)
+
+type sysFS struct{}
+
+func (sysFS) Open(path string) (fs.File, error) {
+ return os.Open(path)
+}
+
+func ReadFile(path string, options ...ReadFileOption) (Token, error) {
+ var parseOptions []ParseOption
+ for _, option := range options {
+ if po, ok := option.(ParseOption); ok {
+ parseOptions = append(parseOptions, po)
+ }
+ }
+
+ var srcFS fs.FS = sysFS{}
+ for _, option := range options {
+ switch option.Ident() {
+ case identFS{}:
+ if err := option.Value(&srcFS); err != nil {
+ return nil, fmt.Errorf("failed to set fs.FS: %w", err)
+ }
+ }
+ }
+
+ f, err := srcFS.Open(path)
+ if err != nil {
+ return nil, err
+ }
+
+ defer f.Close()
+ return ParseReader(f, parseOptions...)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/jwt.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/jwt.go
new file mode 100644
index 0000000000..43e382987a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/jwt.go
@@ -0,0 +1,592 @@
+//go:generate ../tools/cmd/genjwt.sh
+//go:generate stringer -type=TokenOption -output=token_options_gen.go
+
+// Package jwt implements JSON Web Tokens as described in https://tools.ietf.org/html/rfc7519
+package jwt
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "sync/atomic"
+ "time"
+
+ "github.com/lestrrat-go/jwx/v3"
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jws"
+ jwterrs "github.com/lestrrat-go/jwx/v3/jwt/internal/errors"
+ "github.com/lestrrat-go/jwx/v3/jwt/internal/types"
+)
+
+var defaultTruncation atomic.Int64
+
+// Settings controls global settings that are specific to JWTs.
+func Settings(options ...GlobalOption) {
+ var flattenAudience bool
+ var parsePedantic bool
+ var parsePrecision = types.MaxPrecision + 1 // illegal value, so we can detect nothing was set
+ var formatPrecision = types.MaxPrecision + 1 // illegal value, so we can detect nothing was set
+ truncation := time.Duration(-1)
+ for _, option := range options {
+ switch option.Ident() {
+ case identTruncation{}:
+ if err := option.Value(&truncation); err != nil {
+ panic(fmt.Sprintf("jwt.Settings: value for WithTruncation must be time.Duration: %s", err))
+ }
+ case identFlattenAudience{}:
+ if err := option.Value(&flattenAudience); err != nil {
+ panic(fmt.Sprintf("jwt.Settings: value for WithFlattenAudience must be bool: %s", err))
+ }
+ case identNumericDateParsePedantic{}:
+ if err := option.Value(&parsePedantic); err != nil {
+ panic(fmt.Sprintf("jwt.Settings: value for WithNumericDateParsePedantic must be bool: %s", err))
+ }
+ case identNumericDateParsePrecision{}:
+ var v int
+ if err := option.Value(&v); err != nil {
+ panic(fmt.Sprintf("jwt.Settings: value for WithNumericDateParsePrecision must be int: %s", err))
+ }
+ // only accept this value if it's in our desired range
+ if v >= 0 && v <= int(types.MaxPrecision) {
+ parsePrecision = uint32(v)
+ }
+ case identNumericDateFormatPrecision{}:
+ var v int
+ if err := option.Value(&v); err != nil {
+ panic(fmt.Sprintf("jwt.Settings: value for WithNumericDateFormatPrecision must be int: %s", err))
+ }
+ // only accept this value if it's in our desired range
+ if v >= 0 && v <= int(types.MaxPrecision) {
+ formatPrecision = uint32(v)
+ }
+ }
+ }
+
+ if parsePrecision <= types.MaxPrecision { // remember we set default to max + 1
+ v := atomic.LoadUint32(&types.ParsePrecision)
+ if v != parsePrecision {
+ atomic.CompareAndSwapUint32(&types.ParsePrecision, v, parsePrecision)
+ }
+ }
+
+ if formatPrecision <= types.MaxPrecision { // remember we set default to max + 1
+ v := atomic.LoadUint32(&types.FormatPrecision)
+ if v != formatPrecision {
+ atomic.CompareAndSwapUint32(&types.FormatPrecision, v, formatPrecision)
+ }
+ }
+
+ {
+ v := atomic.LoadUint32(&types.Pedantic)
+ if (v == 1) != parsePedantic {
+ var newVal uint32
+ if parsePedantic {
+ newVal = 1
+ }
+ atomic.CompareAndSwapUint32(&types.Pedantic, v, newVal)
+ }
+ }
+
+ {
+ defaultOptionsMu.Lock()
+ if flattenAudience {
+ defaultOptions.Enable(FlattenAudience)
+ } else {
+ defaultOptions.Disable(FlattenAudience)
+ }
+ defaultOptionsMu.Unlock()
+ }
+
+ if truncation >= 0 {
+ defaultTruncation.Store(int64(truncation))
+ }
+}
+
+var registry = json.NewRegistry()
+
+// ParseString calls Parse against a string
+func ParseString(s string, options ...ParseOption) (Token, error) {
+ tok, err := parseBytes([]byte(s), options...)
+ if err != nil {
+ return nil, jwterrs.ParseErrorf(`jwt.ParseString`, `failed to parse string: %w`, err)
+ }
+ return tok, nil
+}
+
+// Parse parses the JWT token payload and creates a new `jwt.Token` object.
+// The token must be encoded in JWS compact format, or a raw JSON form of JWT
+// without any signatures.
+//
+// If you need JWE support on top of JWS, you will need to rollout your
+// own workaround.
+//
+// If the token is signed, and you want to verify the payload matches the signature,
+// you must pass the jwt.WithKey(alg, key) or jwt.WithKeySet(jwk.Set) option.
+// If you do not specify these parameters, no verification will be performed.
+//
+// During verification, if the JWS headers specify a key ID (`kid`), the
+// key used for verification must match the specified ID. If you are somehow
+// using a key without a `kid` (which is highly unlikely if you are working
+// with a JWT from a well-know provider), you can work around this by modifying
+// the `jwk.Key` and setting the `kid` header.
+//
+// If you also want to assert the validity of the JWT itself (i.e. expiration
+// and such), use the `Validate()` function on the returned token, or pass the
+// `WithValidate(true)` option. Validate options can also be passed to
+// `Parse`
+//
+// This function takes both ParseOption and ValidateOption types:
+// ParseOptions control the parsing behavior, and ValidateOptions are
+// passed to `Validate()` when `jwt.WithValidate` is specified.
+func Parse(s []byte, options ...ParseOption) (Token, error) {
+ tok, err := parseBytes(s, options...)
+ if err != nil {
+ return nil, jwterrs.ParseErrorf(`jwt.Parse`, `failed to parse token: %w`, err)
+ }
+ return tok, nil
+}
+
+// ParseInsecure is exactly the same as Parse(), but it disables
+// signature verification and token validation.
+//
+// You cannot override `jwt.WithVerify()` or `jwt.WithValidate()`
+// using this function. Providing these options would result in
+// an error
+func ParseInsecure(s []byte, options ...ParseOption) (Token, error) {
+ for _, option := range options {
+ switch option.Ident() {
+ case identVerify{}, identValidate{}:
+ return nil, jwterrs.ParseErrorf(`jwt.ParseInsecure`, `jwt.WithVerify() and jwt.WithValidate() may not be specified`)
+ }
+ }
+
+ options = append(options, WithVerify(false), WithValidate(false))
+ tok, err := Parse(s, options...)
+ if err != nil {
+ return nil, jwterrs.ParseErrorf(`jwt.ParseInsecure`, `failed to parse token: %w`, err)
+ }
+ return tok, nil
+}
+
+// ParseReader calls Parse against an io.Reader
+func ParseReader(src io.Reader, options ...ParseOption) (Token, error) {
+ // We're going to need the raw bytes regardless. Read it.
+ data, err := io.ReadAll(src)
+ if err != nil {
+ return nil, jwterrs.ParseErrorf(`jwt.ParseReader`, `failed to read from token data source: %w`, err)
+ }
+ tok, err := parseBytes(data, options...)
+ if err != nil {
+ return nil, jwterrs.ParseErrorf(`jwt.ParseReader`, `failed to parse token: %w`, err)
+ }
+ return tok, nil
+}
+
+type parseCtx struct {
+ token Token
+ validateOpts []ValidateOption
+ verifyOpts []jws.VerifyOption
+ localReg *json.Registry
+ pedantic bool
+ skipVerification bool
+ validate bool
+ withKeyCount int
+ withKey *withKey // this is used to detect if we have a WithKey option
+}
+
+func parseBytes(data []byte, options ...ParseOption) (Token, error) {
+ var ctx parseCtx
+
+ // Validation is turned on by default. You need to specify
+ // jwt.WithValidate(false) if you want to disable it
+ ctx.validate = true
+
+ // Verification is required (i.e., it is assumed that the incoming
+ // data is in JWS format) unless the user explicitly asks for
+ // it to be skipped.
+ verification := true
+
+ var verifyOpts []Option
+ for _, o := range options {
+ if v, ok := o.(ValidateOption); ok {
+ ctx.validateOpts = append(ctx.validateOpts, v)
+ continue
+ }
+
+ switch o.Ident() {
+ case identKey{}:
+ // it would be nice to be able to detect if ctx.verifyOpts[0]
+ // is a WithKey option, but unfortunately at that point we have
+ // already converted the options to a jws option, which means
+ // we can no longer compare its Ident() to jwt.identKey{}.
+ // So let's just count this here
+ ctx.withKeyCount++
+ if ctx.withKeyCount == 1 {
+ if err := o.Value(&ctx.withKey); err != nil {
+ return nil, fmt.Errorf("jws.parseBytes: value for WithKey option must be a *jwt.withKey: %w", err)
+ }
+ }
+ verifyOpts = append(verifyOpts, o)
+ case identKeySet{}, identVerifyAuto{}, identKeyProvider{}, identBase64Encoder{}:
+ verifyOpts = append(verifyOpts, o)
+ case identToken{}:
+ var token Token
+ if err := o.Value(&token); err != nil {
+ return nil, fmt.Errorf("jws.parseBytes: value for WithToken option must be a jwt.Token: %w", err)
+ }
+ ctx.token = token
+ case identPedantic{}:
+ if err := o.Value(&ctx.pedantic); err != nil {
+ return nil, fmt.Errorf("jws.parseBytes: value for WithPedantic option must be a bool: %w", err)
+ }
+ case identValidate{}:
+ if err := o.Value(&ctx.validate); err != nil {
+ return nil, fmt.Errorf("jws.parseBytes: value for WithValidate option must be a bool: %w", err)
+ }
+ case identVerify{}:
+ if err := o.Value(&verification); err != nil {
+ return nil, fmt.Errorf("jws.parseBytes: value for WithVerify option must be a bool: %w", err)
+ }
+ case identTypedClaim{}:
+ var pair claimPair
+ if err := o.Value(&pair); err != nil {
+ return nil, fmt.Errorf("jws.parseBytes: value for WithTypedClaim option must be claimPair: %w", err)
+ }
+ if ctx.localReg == nil {
+ ctx.localReg = json.NewRegistry()
+ }
+ ctx.localReg.Register(pair.Name, pair.Value)
+ }
+ }
+
+ if !verification {
+ ctx.skipVerification = true
+ }
+
+ lvo := len(verifyOpts)
+ if lvo == 0 && verification {
+ return nil, fmt.Errorf(`jwt.Parse: no keys for verification are provided (use jwt.WithVerify(false) to explicitly skip)`)
+ }
+
+ if lvo > 0 {
+ converted, err := toVerifyOptions(verifyOpts...)
+ if err != nil {
+ return nil, fmt.Errorf(`jwt.Parse: failed to convert options into jws.VerifyOption: %w`, err)
+ }
+ ctx.verifyOpts = converted
+ }
+
+ data = bytes.TrimSpace(data)
+ return parse(&ctx, data)
+}
+
+const (
+ _JwsVerifyInvalid = iota
+ _JwsVerifyDone
+ _JwsVerifyExpectNested
+ _JwsVerifySkipped
+)
+
+var _ = _JwsVerifyInvalid
+
+func verifyJWS(ctx *parseCtx, payload []byte) ([]byte, int, error) {
+ lvo := len(ctx.verifyOpts)
+ if lvo == 0 {
+ return nil, _JwsVerifySkipped, nil
+ }
+
+ if lvo == 1 && ctx.withKeyCount == 1 {
+ wk := ctx.withKey
+ alg, ok := wk.alg.(jwa.SignatureAlgorithm)
+ if ok && len(wk.options) == 0 {
+ verified, err := jws.VerifyCompactFast(wk.key, payload, alg)
+ if err != nil {
+ return nil, _JwsVerifyDone, err
+ }
+ return verified, _JwsVerifyDone, nil
+ }
+ }
+
+ verifyOpts := append(ctx.verifyOpts, jws.WithCompact())
+ verified, err := jws.Verify(payload, verifyOpts...)
+ return verified, _JwsVerifyDone, err
+}
+
+// verify parameter exists to make sure that we don't accidentally skip
+// over verification just because alg == "" or key == nil or something.
+func parse(ctx *parseCtx, data []byte) (Token, error) {
+ payload := data
+ const maxDecodeLevels = 2
+
+ // If cty = `JWT`, we expect this to be a nested structure
+ var expectNested bool
+
+OUTER:
+ for i := range maxDecodeLevels {
+ switch kind := jwx.GuessFormat(payload); kind {
+ case jwx.JWT:
+ if ctx.pedantic {
+ if expectNested {
+ return nil, fmt.Errorf(`expected nested encrypted/signed payload, got raw JWT`)
+ }
+ }
+
+ if i == 0 {
+ // We were NOT enveloped in other formats
+ if !ctx.skipVerification {
+ if _, _, err := verifyJWS(ctx, payload); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ break OUTER
+ case jwx.InvalidFormat:
+ return nil, UnknownPayloadTypeError()
+ case jwx.UnknownFormat:
+ // "Unknown" may include invalid JWTs, for example, those who lack "aud"
+ // claim. We could be pedantic and reject these
+ if ctx.pedantic {
+ return nil, fmt.Errorf(`unknown JWT format (pedantic)`)
+ }
+
+ if i == 0 {
+ // We were NOT enveloped in other formats
+ if !ctx.skipVerification {
+ if _, _, err := verifyJWS(ctx, payload); err != nil {
+ return nil, err
+ }
+ }
+ }
+ break OUTER
+ case jwx.JWS:
+ // Food for thought: This is going to break if you have multiple layers of
+ // JWS enveloping using different keys. It is highly unlikely use case,
+ // but it might happen.
+
+ // skipVerification should only be set to true by us. It's used
+ // when we just want to parse the JWT out of a payload
+ if !ctx.skipVerification {
+ // nested return value means:
+ // false (next envelope _may_ need to be processed)
+ // true (next envelope MUST be processed)
+ v, state, err := verifyJWS(ctx, payload)
+ if err != nil {
+ return nil, err
+ }
+
+ if state != _JwsVerifySkipped {
+ payload = v
+
+ // We only check for cty and typ if the pedantic flag is enabled
+ if !ctx.pedantic {
+ continue
+ }
+
+ if state == _JwsVerifyExpectNested {
+ expectNested = true
+ continue OUTER
+ }
+
+ // if we're not nested, we found our target. bail out of this loop
+ break OUTER
+ }
+ }
+
+ // No verification.
+ m, err := jws.Parse(data, jws.WithCompact())
+ if err != nil {
+ return nil, fmt.Errorf(`invalid jws message: %w`, err)
+ }
+ payload = m.Payload()
+ default:
+ return nil, fmt.Errorf(`unsupported format (layer: #%d)`, i+1)
+ }
+ expectNested = false
+ }
+
+ if ctx.token == nil {
+ ctx.token = New()
+ }
+
+ if ctx.localReg != nil {
+ dcToken, ok := ctx.token.(TokenWithDecodeCtx)
+ if !ok {
+ return nil, fmt.Errorf(`typed claim was requested, but the token (%T) does not support DecodeCtx`, ctx.token)
+ }
+ dc := json.NewDecodeCtx(ctx.localReg)
+ dcToken.SetDecodeCtx(dc)
+ defer func() { dcToken.SetDecodeCtx(nil) }()
+ }
+
+ if err := json.Unmarshal(payload, ctx.token); err != nil {
+ return nil, fmt.Errorf(`failed to parse token: %w`, err)
+ }
+
+ if ctx.validate {
+ if err := Validate(ctx.token, ctx.validateOpts...); err != nil {
+ return nil, err
+ }
+ }
+ return ctx.token, nil
+}
+
+// Sign is a convenience function to create a signed JWT token serialized in
+// compact form.
+//
+// It accepts either a raw key (e.g. rsa.PrivateKey, ecdsa.PrivateKey, etc)
+// or a jwk.Key, and the name of the algorithm that should be used to sign
+// the token.
+//
+// For well-known algorithms with no special considerations (e.g. detached
+// payloads, extra protected heders, etc), this function will automatically
+// take the fast path and bypass the jws.Sign() machinery, which improves
+// performance significantly.
+//
+// If the key is a jwk.Key and the key contains a key ID (`kid` field),
+// then it is added to the protected header generated by the signature
+//
+// The algorithm specified in the `alg` parameter must be able to support
+// the type of key you provided, otherwise an error is returned.
+// For convenience `alg` is of type jwa.KeyAlgorithm so you can pass
+// the return value of `(jwk.Key).Algorithm()` directly, but in practice
+// it must be an instance of jwa.SignatureAlgorithm, otherwise an error
+// is returned.
+//
+// The protected header will also automatically have the `typ` field set
+// to the literal value `JWT`, unless you provide a custom value for it
+// by jws.WithProtectedHeaders option, that can be passed to `jwt.WithKey“.
+func Sign(t Token, options ...SignOption) ([]byte, error) {
+ // fast path; can only happen if there is exactly one option
+ if len(options) == 1 && (options[0].Ident() == identKey{}) {
+ // The option must be a withKey option.
+ var wk *withKey
+ if err := options[0].Value(&wk); err == nil {
+ alg, ok := wk.alg.(jwa.SignatureAlgorithm)
+ if !ok {
+ return nil, fmt.Errorf(`jwt.Sign: invalid algorithm type %T. jwa.SignatureAlgorithm is required`, wk.alg)
+ }
+
+ // Check if option contains anything other than alg/key
+ if len(wk.options) == 0 {
+ // yay, we have something we can put in the FAST PATH!
+ return signFast(t, alg, wk.key)
+ }
+ // fallthrough
+ }
+ // fallthrough
+ }
+
+ var soptions []jws.SignOption
+ if l := len(options); l > 0 {
+ // we need to from SignOption to Option because ... reasons
+ // (todo: when go1.18 prevails, use type parameters
+ rawoptions := make([]Option, l)
+ for i, option := range options {
+ rawoptions[i] = option
+ }
+
+ converted, err := toSignOptions(rawoptions...)
+ if err != nil {
+ return nil, fmt.Errorf(`jwt.Sign: failed to convert options into jws.SignOption: %w`, err)
+ }
+ soptions = converted
+ }
+ return NewSerializer().sign(soptions...).Serialize(t)
+}
+
+// Equal compares two JWT tokens. Do not use `reflect.Equal` or the like
+// to compare tokens as they will also compare extra detail such as
+// sync.Mutex objects used to control concurrent access.
+//
+// The comparison for values is currently done using a simple equality ("=="),
+// except for time.Time, which uses time.Equal after dropping the monotonic
+// clock and truncating the values to 1 second accuracy.
+//
+// if both t1 and t2 are nil, returns true
+func Equal(t1, t2 Token) bool {
+ if t1 == nil && t2 == nil {
+ return true
+ }
+
+ // we already checked for t1 == t2 == nil, so safe to do this
+ if t1 == nil || t2 == nil {
+ return false
+ }
+
+ j1, err := json.Marshal(t1)
+ if err != nil {
+ return false
+ }
+
+ j2, err := json.Marshal(t2)
+ if err != nil {
+ return false
+ }
+
+ return bytes.Equal(j1, j2)
+}
+
+func (t *stdToken) Clone() (Token, error) {
+ dst := New()
+
+ dst.Options().Set(*(t.Options()))
+ for _, k := range t.Keys() {
+ var v any
+ if err := t.Get(k, &v); err != nil {
+ return nil, fmt.Errorf(`jwt.Clone: failed to get %s: %w`, k, err)
+ }
+ if err := dst.Set(k, v); err != nil {
+ return nil, fmt.Errorf(`jwt.Clone failed to set %s: %w`, k, err)
+ }
+ }
+ return dst, nil
+}
+
+type CustomDecoder = json.CustomDecoder
+type CustomDecodeFunc = json.CustomDecodeFunc
+
+// RegisterCustomField allows users to specify that a private field
+// be decoded as an instance of the specified type. This option has
+// a global effect.
+//
+// For example, suppose you have a custom field `x-birthday`, which
+// you want to represent as a string formatted in RFC3339 in JSON,
+// but want it back as `time.Time`.
+//
+// In such case you would register a custom field as follows
+//
+// jwt.RegisterCustomField(`x-birthday`, time.Time{})
+//
+// Then you can use a `time.Time` variable to extract the value
+// of `x-birthday` field, instead of having to use `any`
+// and later convert it to `time.Time`
+//
+// var bday time.Time
+// _ = token.Get(`x-birthday`, &bday)
+//
+// If you need a more fine-tuned control over the decoding process,
+// you can register a `CustomDecoder`. For example, below shows
+// how to register a decoder that can parse RFC822 format string:
+//
+// jwt.RegisterCustomField(`x-birthday`, jwt.CustomDecodeFunc(func(data []byte) (any, error) {
+// return time.Parse(time.RFC822, string(data))
+// }))
+//
+// Please note that use of custom fields can be problematic if you
+// are using a library that does not implement MarshalJSON/UnmarshalJSON
+// and you try to roundtrip from an object to JSON, and then back to an object.
+// For example, in the above example, you can _parse_ time values formatted
+// in the format specified in RFC822, but when you convert an object into
+// JSON, it will be formatted in RFC3339, because that's what `time.Time`
+// likes to do. To avoid this, it's always better to use a custom type
+// that wraps your desired type (in this case `time.Time`) and implement
+// MarshalJSON and UnmashalJSON.
+func RegisterCustomField(name string, object any) {
+ registry.Register(name, object)
+}
+
+func getDefaultTruncation() time.Duration {
+ return time.Duration(defaultTruncation.Load())
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/options.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/options.go
new file mode 100644
index 0000000000..cadf163b15
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/options.go
@@ -0,0 +1,322 @@
+package jwt
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jwe"
+ "github.com/lestrrat-go/jwx/v3/jwk"
+ "github.com/lestrrat-go/jwx/v3/jws"
+ "github.com/lestrrat-go/option/v2"
+)
+
+type identInsecureNoSignature struct{}
+type identKey struct{}
+type identKeySet struct{}
+type identTypedClaim struct{}
+type identVerifyAuto struct{}
+
+func toSignOptions(options ...Option) ([]jws.SignOption, error) {
+ soptions := make([]jws.SignOption, 0, len(options))
+ for _, option := range options {
+ switch option.Ident() {
+ case identInsecureNoSignature{}:
+ soptions = append(soptions, jws.WithInsecureNoSignature())
+ case identKey{}:
+ var wk withKey
+ if err := option.Value(&wk); err != nil {
+ return nil, fmt.Errorf(`toSignOtpions: failed to convert option value to withKey: %w`, err)
+ }
+ var wksoptions []jws.WithKeySuboption
+ for _, subopt := range wk.options {
+ wksopt, ok := subopt.(jws.WithKeySuboption)
+ if !ok {
+ return nil, fmt.Errorf(`expected optional arguments in jwt.WithKey to be jws.WithKeySuboption, but got %T`, subopt)
+ }
+ wksoptions = append(wksoptions, wksopt)
+ }
+
+ soptions = append(soptions, jws.WithKey(wk.alg, wk.key, wksoptions...))
+ case identSignOption{}:
+ var sigOpt jws.SignOption
+ if err := option.Value(&sigOpt); err != nil {
+ return nil, fmt.Errorf(`failed to decode SignOption: %w`, err)
+ }
+ soptions = append(soptions, sigOpt)
+ case identBase64Encoder{}:
+ var enc jws.Base64Encoder
+ if err := option.Value(&enc); err != nil {
+ return nil, fmt.Errorf(`failed to decode Base64Encoder: %w`, err)
+ }
+ soptions = append(soptions, jws.WithBase64Encoder(enc))
+ }
+ }
+ return soptions, nil
+}
+
+func toEncryptOptions(options ...Option) ([]jwe.EncryptOption, error) {
+ soptions := make([]jwe.EncryptOption, 0, len(options))
+ for _, option := range options {
+ switch option.Ident() {
+ case identKey{}:
+ var wk withKey
+ if err := option.Value(&wk); err != nil {
+ return nil, fmt.Errorf(`toEncryptOptions: failed to convert option value to withKey: %w`, err)
+ }
+ var wksoptions []jwe.WithKeySuboption
+ for _, subopt := range wk.options {
+ wksopt, ok := subopt.(jwe.WithKeySuboption)
+ if !ok {
+ return nil, fmt.Errorf(`expected optional arguments in jwt.WithKey to be jwe.WithKeySuboption, but got %T`, subopt)
+ }
+ wksoptions = append(wksoptions, wksopt)
+ }
+
+ soptions = append(soptions, jwe.WithKey(wk.alg, wk.key, wksoptions...))
+ case identEncryptOption{}:
+ var encOpt jwe.EncryptOption
+ if err := option.Value(&encOpt); err != nil {
+ return nil, fmt.Errorf(`failed to decode EncryptOption: %w`, err)
+ }
+ soptions = append(soptions, encOpt)
+ }
+ }
+ return soptions, nil
+}
+
+func toVerifyOptions(options ...Option) ([]jws.VerifyOption, error) {
+ voptions := make([]jws.VerifyOption, 0, len(options))
+ for _, option := range options {
+ switch option.Ident() {
+ case identKey{}:
+ var wk withKey
+ if err := option.Value(&wk); err != nil {
+ return nil, fmt.Errorf(`toVerifyOptions: failed to convert option value to withKey: %w`, err)
+ }
+ var wksoptions []jws.WithKeySuboption
+ for _, subopt := range wk.options {
+ wksopt, ok := subopt.(jws.WithKeySuboption)
+ if !ok {
+ return nil, fmt.Errorf(`expected optional arguments in jwt.WithKey to be jws.WithKeySuboption, but got %T`, subopt)
+ }
+ wksoptions = append(wksoptions, wksopt)
+ }
+
+ voptions = append(voptions, jws.WithKey(wk.alg, wk.key, wksoptions...))
+ case identKeySet{}:
+ var wks withKeySet
+ if err := option.Value(&wks); err != nil {
+ return nil, fmt.Errorf(`failed to convert option value to withKeySet: %w`, err)
+ }
+ var wkssoptions []jws.WithKeySetSuboption
+ for _, subopt := range wks.options {
+ wkssopt, ok := subopt.(jws.WithKeySetSuboption)
+ if !ok {
+ return nil, fmt.Errorf(`expected optional arguments in jwt.WithKey to be jws.WithKeySetSuboption, but got %T`, subopt)
+ }
+ wkssoptions = append(wkssoptions, wkssopt)
+ }
+
+ voptions = append(voptions, jws.WithKeySet(wks.set, wkssoptions...))
+ case identVerifyAuto{}:
+ var vo jws.VerifyOption
+ if err := option.Value(&vo); err != nil {
+ return nil, fmt.Errorf(`failed to decode VerifyOption: %w`, err)
+ }
+ voptions = append(voptions, vo)
+ case identKeyProvider{}:
+ var kp jws.KeyProvider
+ if err := option.Value(&kp); err != nil {
+ return nil, fmt.Errorf(`failed to decode KeyProvider: %w`, err)
+ }
+ voptions = append(voptions, jws.WithKeyProvider(kp))
+ case identBase64Encoder{}:
+ var enc jws.Base64Encoder
+ if err := option.Value(&enc); err != nil {
+ return nil, fmt.Errorf(`failed to decode Base64Encoder: %w`, err)
+ }
+ voptions = append(voptions, jws.WithBase64Encoder(enc))
+ }
+ }
+ return voptions, nil
+}
+
+type withKey struct {
+ alg jwa.KeyAlgorithm
+ key any
+ options []Option
+}
+
+// WithKey is a multipurpose option. It can be used for either jwt.Sign, jwt.Parse (and
+// its siblings), and jwt.Serializer methods. For signatures, please see the documentation
+// for `jws.WithKey` for more details. For encryption, please see the documentation
+// for `jwe.WithKey`.
+//
+// It is the caller's responsibility to match the suboptions to the operation that they
+// are performing. For example, you are not allowed to do this, because the operation
+// is to generate a signature, and yet you are passing options for jwe:
+//
+// jwt.Sign(token, jwt.WithKey(alg, key, jweOptions...))
+//
+// In the above example, the creation of the option via `jwt.WithKey()` will work, but
+// when `jwt.Sign()` is called, the fact that you passed JWE suboptions will be
+// detected, and an error will occur.
+func WithKey(alg jwa.KeyAlgorithm, key any, suboptions ...Option) SignEncryptParseOption {
+ return &signEncryptParseOption{option.New(identKey{}, &withKey{
+ alg: alg,
+ key: key,
+ options: suboptions,
+ })}
+}
+
+type withKeySet struct {
+ set jwk.Set
+ options []any
+}
+
+// WithKeySet forces the Parse method to verify the JWT message
+// using one of the keys in the given key set.
+//
+// Key IDs (`kid`) in the JWS message and the JWK in the given `jwk.Set`
+// must match in order for the key to be a candidate to be used for
+// verification.
+//
+// This is for security reasons. If you must disable it, you can do so by
+// specifying `jws.WithRequireKid(false)` in the suboptions. But we don't
+// recommend it unless you know exactly what the security implications are
+//
+// When using this option, keys MUST have a proper 'alg' field
+// set. This is because we need to know the exact algorithm that
+// you (the user) wants to use to verify the token. We do NOT
+// trust the token's headers, because they can easily be tampered with.
+//
+// However, there _is_ a workaround if you do understand the risks
+// of allowing a library to automatically choose a signature verification strategy,
+// and you do not mind the verification process having to possibly
+// attempt using multiple times before succeeding to verify. See
+// `jws.InferAlgorithmFromKey` option
+//
+// If you have only one key in the set, and are sure you want to
+// use that key, you can use the `jwt.WithDefaultKey` option.
+func WithKeySet(set jwk.Set, options ...any) ParseOption {
+ return &parseOption{option.New(identKeySet{}, &withKeySet{
+ set: set,
+ options: options,
+ })}
+}
+
+// WithIssuer specifies that expected issuer value. If not specified,
+// the value of issuer is not verified at all.
+func WithIssuer(s string) ValidateOption {
+ return WithValidator(issuerClaimValueIs(s))
+}
+
+// WithSubject specifies that expected subject value. If not specified,
+// the value of subject is not verified at all.
+func WithSubject(s string) ValidateOption {
+ return WithValidator(ClaimValueIs(SubjectKey, s))
+}
+
+// WithJwtID specifies that expected jti value. If not specified,
+// the value of jti is not verified at all.
+func WithJwtID(s string) ValidateOption {
+ return WithValidator(ClaimValueIs(JwtIDKey, s))
+}
+
+// WithAudience specifies that expected audience value.
+// `Validate()` will return true if one of the values in the `aud` element
+// matches this value. If not specified, the value of `aud` is not
+// verified at all.
+func WithAudience(s string) ValidateOption {
+ return WithValidator(audienceClaimContainsString(s))
+}
+
+// WithClaimValue specifies the expected value for a given claim
+func WithClaimValue(name string, v any) ValidateOption {
+ return WithValidator(ClaimValueIs(name, v))
+}
+
+// WithTypedClaim allows a private claim to be parsed into the object type of
+// your choice. It works much like the RegisterCustomField, but the effect
+// is only applicable to the jwt.Parse function call which receives this option.
+//
+// While this can be extremely useful, this option should be used with caution:
+// There are many caveats that your entire team/user-base needs to be aware of,
+// and therefore in general its use is discouraged. Only use it when you know
+// what you are doing, and you document its use clearly for others.
+//
+// First and foremost, this is a "per-object" option. Meaning that given the same
+// serialized format, it is possible to generate two objects whose internal
+// representations may differ. That is, if you parse one _WITH_ the option,
+// and the other _WITHOUT_, their internal representation may completely differ.
+// This could potentially lead to problems.
+//
+// Second, specifying this option will slightly slow down the decoding process
+// as it needs to consult multiple definitions sources (global and local), so
+// be careful if you are decoding a large number of tokens, as the effects will stack up.
+//
+// Finally, this option will also NOT work unless the tokens themselves support such
+// parsing mechanism. For example, while tokens obtained from `jwt.New()` and
+// `openid.New()` will respect this option, if you provide your own custom
+// token type, it will need to implement the TokenWithDecodeCtx interface.
+func WithTypedClaim(name string, object any) ParseOption {
+ return &parseOption{option.New(identTypedClaim{}, claimPair{Name: name, Value: object})}
+}
+
+// WithRequiredClaim specifies that the claim identified the given name
+// must exist in the token. Only the existence of the claim is checked:
+// the actual value associated with that field is not checked.
+func WithRequiredClaim(name string) ValidateOption {
+ return WithValidator(IsRequired(name))
+}
+
+// WithMaxDelta specifies that given two claims `c1` and `c2` that represent time, the difference in
+// time.Duration must be less than equal to the value specified by `d`. If `c1` or `c2` is the
+// empty string, the current time (as computed by `time.Now` or the object passed via
+// `WithClock()`) is used for the comparison.
+//
+// `c1` and `c2` are also assumed to be required, therefore not providing either claim in the
+// token will result in an error.
+//
+// Because there is no way of reliably knowing how to parse private claims, we currently only
+// support `iat`, `exp`, and `nbf` claims.
+//
+// If the empty string is passed to c1 or c2, then the current time (as calculated by time.Now() or
+// the clock object provided via WithClock()) is used.
+//
+// For example, in order to specify that `exp` - `iat` should be less than 10*time.Second, you would write
+//
+// jwt.Validate(token, jwt.WithMaxDelta(10*time.Second, jwt.ExpirationKey, jwt.IssuedAtKey))
+//
+// If AcceptableSkew of 2 second is specified, the above will return valid for any value of
+// `exp` - `iat` between 8 (10-2) and 12 (10+2).
+func WithMaxDelta(dur time.Duration, c1, c2 string) ValidateOption {
+ return WithValidator(MaxDeltaIs(c1, c2, dur))
+}
+
+// WithMinDelta is almost exactly the same as WithMaxDelta, but force validation to fail if
+// the difference between time claims are less than dur.
+//
+// For example, in order to specify that `exp` - `iat` should be greater than 10*time.Second, you would write
+//
+// jwt.Validate(token, jwt.WithMinDelta(10*time.Second, jwt.ExpirationKey, jwt.IssuedAtKey))
+//
+// The validation would fail if the difference is less than 10 seconds.
+func WithMinDelta(dur time.Duration, c1, c2 string) ValidateOption {
+ return WithValidator(MinDeltaIs(c1, c2, dur))
+}
+
+// WithVerifyAuto specifies that the JWS verification should be attempted
+// by using the data available in the JWS message. Currently only verification
+// method available is to use the keys available in the JWKS URL pointed
+// in the `jku` field.
+//
+// Please read the documentation for `jws.VerifyAuto` for more details.
+func WithVerifyAuto(f jwk.Fetcher, options ...jwk.FetchOption) ParseOption {
+ return &parseOption{option.New(identVerifyAuto{}, jws.WithVerifyAuto(f, options...))}
+}
+
+func WithInsecureNoSignature() SignOption {
+ return &signEncryptParseOption{option.New(identInsecureNoSignature{}, (any)(nil))}
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/options.yaml b/vendor/github.com/lestrrat-go/jwx/v3/jwt/options.yaml
new file mode 100644
index 0000000000..bfcadfac25
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/options.yaml
@@ -0,0 +1,274 @@
+package_name: jwt
+output: jwt/options_gen.go
+interfaces:
+ - name: GlobalOption
+ comment: |
+ GlobalOption describes an Option that can be passed to `Settings()`.
+ - name: EncryptOption
+ comment: |
+ EncryptOption describes an Option that can be passed to (jwt.Serializer).Encrypt
+ - name: ParseOption
+ methods:
+ - parseOption
+ - readFileOption
+ comment: |
+ ParseOption describes an Option that can be passed to `jwt.Parse()`.
+ ParseOption also implements ReadFileOption, therefore it may be
+ safely pass them to `jwt.ReadFile()`
+ - name: SignOption
+ comment: |
+ SignOption describes an Option that can be passed to `jwt.Sign()` or
+ (jwt.Serializer).Sign
+ - name: SignParseOption
+ methods:
+ - signOption
+ - parseOption
+ - readFileOption
+ comment: |
+ SignParseOption describes an Option that can be passed to both `jwt.Sign()` or
+ `jwt.Parse()`
+ - name: SignEncryptParseOption
+ methods:
+ - parseOption
+ - encryptOption
+ - readFileOption
+ - signOption
+ comment: |
+ SignEncryptParseOption describes an Option that can be passed to both `jwt.Sign()` or
+ `jwt.Parse()`
+ - name: ValidateOption
+ methods:
+ - parseOption
+ - readFileOption
+ - validateOption
+ comment: |
+ ValidateOption describes an Option that can be passed to Validate().
+ ValidateOption also implements ParseOption, therefore it may be
+ safely passed to `Parse()` (and thus `jwt.ReadFile()`)
+ - name: ReadFileOption
+ comment: |
+ ReadFileOption is a type of `Option` that can be passed to `jws.ReadFile`
+ - name: GlobalValidateOption
+ methods:
+ - globalOption
+ - parseOption
+ - readFileOption
+ - validateOption
+ comment: |
+ GlobalValidateOption describes an Option that can be passed to `jwt.Settings()` and `jwt.Validate()`
+options:
+ - ident: AcceptableSkew
+ interface: ValidateOption
+ argument_type: time.Duration
+ comment: |
+ WithAcceptableSkew specifies the duration in which exp, iat and nbf
+ claims may differ by. This value should be positive
+ - ident: Truncation
+ interface: GlobalValidateOption
+ argument_type: time.Duration
+ comment: |
+ WithTruncation specifies the amount that should be used when
+ truncating time values used during time-based validation routines,
+ and by default this is disabled.
+
+ In v2 of this library, time values were truncated down to second accuracy, i.e.
+ 1.0000001 seconds is truncated to 1 second. To restore this behavior, set
+ this value to `time.Second`
+
+ Since v3, this option can be passed to `jwt.Settings()` to set the truncation
+ value globally, as well as per invocation of `jwt.Validate()`
+ - ident: Clock
+ interface: ValidateOption
+ argument_type: Clock
+ comment: |
+ WithClock specifies the `Clock` to be used when verifying
+ exp, iat and nbf claims.
+ - ident: Context
+ interface: ValidateOption
+ argument_type: context.Context
+ comment: |
+ WithContext allows you to specify a context.Context object to be used
+ with `jwt.Validate()` option.
+
+ Please be aware that in the next major release of this library,
+ `jwt.Validate()`'s signature will change to include an explicit
+ `context.Context` object.
+ - ident: ResetValidators
+ interface: ValidateOption
+ argument_type: bool
+ comment: |
+ WithResetValidators specifies that the default validators should be
+ reset before applying the custom validators. By default `jwt.Validate()`
+ checks for the validity of JWT by checking `exp`, `nbf`, and `iat`, even
+ when you specify more validators through other options.
+
+ You SHOULD NOT use this option unless you know exactly what you are doing,
+ as this will pose significant security issues when used incorrectly.
+
+ Using this option with the value `true` will remove all default checks,
+ and will expect you to specify validators as options. This is useful when you
+ want to skip the default validators and only use specific validators, such as
+ for https://openid.net/specs/openid-connect-rpinitiated-1_0.html, where
+ the token could be accepted even if the token is expired.
+
+ If you set this option to true and you do not specify any validators,
+ `jwt.Validate()` will return an error.
+
+ The default value is `false` (`iat`, `exp`, and `nbf` are automatically checked).
+ - ident: FlattenAudience
+ interface: GlobalOption
+ argument_type: bool
+ comment: |
+ WithFlattenAudience specifies the the `jwt.FlattenAudience` option on
+ every token defaults to enabled. You can still disable this on a per-object
+ basis using the `jwt.Options().Disable(jwt.FlattenAudience)` method call.
+
+ See the documentation for `jwt.TokenOptionSet`, `(jwt.Token).Options`, and
+ `jwt.FlattenAudience` for more details
+ - ident: FormKey
+ interface: ParseOption
+ argument_type: string
+ comment: |
+ WithFormKey is used to specify header keys to search for tokens.
+
+ While the type system allows this option to be passed to jwt.Parse() directly,
+ doing so will have no effect. Only use it for HTTP request parsing functions
+ - ident: HeaderKey
+ interface: ParseOption
+ argument_type: string
+ comment: |
+ WithHeaderKey is used to specify header keys to search for tokens.
+
+ While the type system allows this option to be passed to `jwt.Parse()` directly,
+ doing so will have no effect. Only use it for HTTP request parsing functions
+ - ident: Cookie
+ interface: ParseOption
+ argument_type: '**http.Cookie'
+ comment: |
+ WithCookie is used to specify a variable to store the cookie used when `jwt.ParseCookie()`
+ is called. This allows you to inspect the cookie for additional information after a successful
+ parsing of the JWT token stored in the cookie.
+
+ While the type system allows this option to be passed to `jwt.Parse()` directly,
+ doing so will have no effect. Only use it for HTTP request parsing functions
+ - ident: CookieKey
+ interface: ParseOption
+ argument_type: string
+ comment: |
+ WithCookieKey is used to specify cookie keys to search for tokens.
+
+ While the type system allows this option to be passed to `jwt.Parse()` directly,
+ doing so will have no effect. Only use it for HTTP request parsing functions
+ - ident: Token
+ interface: ParseOption
+ argument_type: Token
+ comment: |
+ WithToken specifies the token instance in which the resulting JWT is stored
+ when parsing JWT tokens
+ - ident: Validate
+ interface: ParseOption
+ argument_type: bool
+ comment: |
+ WithValidate is passed to `Parse()` method to denote that the
+ validation of the JWT token should be performed (or not) after
+ a successful parsing of the incoming payload.
+
+ This option is enabled by default.
+
+ If you would like disable validation,
+ you must use `jwt.WithValidate(false)` or use `jwt.ParseInsecure()`
+ - ident: Verify
+ interface: ParseOption
+ argument_type: bool
+ comment: |
+ WithVerify is passed to `Parse()` method to denote that the
+ signature verification should be performed after a successful
+ deserialization of the incoming payload.
+
+ This option is enabled by default.
+
+ If you do not provide any verification key sources, `jwt.Parse()`
+ would return an error.
+
+ If you would like to only parse the JWT payload and not verify it,
+ you must use `jwt.WithVerify(false)` or use `jwt.ParseInsecure()`
+ - ident: KeyProvider
+ interface: ParseOption
+ argument_type: jws.KeyProvider
+ comment: |
+ WithKeyProvider allows users to specify an object to provide keys to
+ sign/verify tokens using arbitrary code. Please read the documentation
+ for `jws.KeyProvider` in the `jws` package for details on how this works.
+ - ident: Pedantic
+ interface: ParseOption
+ argument_type: bool
+ comment: |
+ WithPedantic enables pedantic mode for parsing JWTs. Currently this only
+ applies to checking for the correct `typ` and/or `cty` when necessary.
+ - ident: EncryptOption
+ interface: EncryptOption
+ argument_type: jwe.EncryptOption
+ comment: |
+ WithEncryptOption provides an escape hatch for cases where extra options to
+ `(jws.Serializer).Encrypt()` must be specified when using `jwt.Sign()`. Normally you do not
+ need to use this.
+ - ident: SignOption
+ interface: SignOption
+ argument_type: jws.SignOption
+ comment: |
+ WithSignOption provides an escape hatch for cases where extra options to
+ `jws.Sign()` must be specified when using `jwt.Sign()`. Normally you do not
+ need to use this.
+ - ident: Validator
+ interface: ValidateOption
+ argument_type: Validator
+ comment: |
+ WithValidator validates the token with the given Validator.
+
+ For example, in order to validate tokens that are only valid during August, you would write
+
+ validator := jwt.ValidatorFunc(func(_ context.Context, t jwt.Token) error {
+ if time.Now().Month() != 8 {
+ return fmt.Errorf(`tokens are only valid during August!`)
+ }
+ return nil
+ })
+ err := jwt.Validate(token, jwt.WithValidator(validator))
+ - ident: FS
+ interface: ReadFileOption
+ argument_type: fs.FS
+ comment: |
+ WithFS specifies the source `fs.FS` object to read the file from.
+ - ident: NumericDateParsePrecision
+ interface: GlobalOption
+ argument_type: int
+ comment: |
+ WithNumericDateParsePrecision sets the precision up to which the
+ library uses to parse fractional dates found in the numeric date
+ fields. Default is 0 (second, no fractions), max is 9 (nanosecond)
+ - ident: NumericDateFormatPrecision
+ interface: GlobalOption
+ argument_type: int
+ comment: |
+ WithNumericDateFormatPrecision sets the precision up to which the
+ library uses to format fractional dates found in the numeric date
+ fields. Default is 0 (second, no fractions), max is 9 (nanosecond)
+ - ident: NumericDateParsePedantic
+ interface: GlobalOption
+ argument_type: bool
+ comment: |
+ WithNumericDateParsePedantic specifies if the parser should behave
+ in a pedantic manner when parsing numeric dates. Normally this library
+ attempts to interpret timestamps as a numeric value representing
+ number of seconds (with an optional fractional part), but if that fails
+ it tries to parse using a RFC3339 parser. This allows us to parse
+ payloads from non-conforming servers.
+
+ However, when you set WithNumericDateParePedantic to `true`, the
+ RFC3339 parser is not tried, and we expect a numeric value strictly
+ - ident: Base64Encoder
+ interface: SignParseOption
+ argument_type: jws.Base64Encoder
+ comment: |
+ WithBase64Encoder specifies the base64 encoder to use for signing
+ tokens and verifying JWS signatures.
\ No newline at end of file
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/options_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/options_gen.go
new file mode 100644
index 0000000000..3a644a6e4c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/options_gen.go
@@ -0,0 +1,495 @@
+// Code generated by tools/cmd/genoptions/main.go. DO NOT EDIT.
+
+package jwt
+
+import (
+ "context"
+ "io/fs"
+ "net/http"
+ "time"
+
+ "github.com/lestrrat-go/jwx/v3/jwe"
+ "github.com/lestrrat-go/jwx/v3/jws"
+ "github.com/lestrrat-go/option/v2"
+)
+
+type Option = option.Interface
+
+// EncryptOption describes an Option that can be passed to (jwt.Serializer).Encrypt
+type EncryptOption interface {
+ Option
+ encryptOption()
+}
+
+type encryptOption struct {
+ Option
+}
+
+func (*encryptOption) encryptOption() {}
+
+// GlobalOption describes an Option that can be passed to `Settings()`.
+type GlobalOption interface {
+ Option
+ globalOption()
+}
+
+type globalOption struct {
+ Option
+}
+
+func (*globalOption) globalOption() {}
+
+// GlobalValidateOption describes an Option that can be passed to `jwt.Settings()` and `jwt.Validate()`
+type GlobalValidateOption interface {
+ Option
+ globalOption()
+ parseOption()
+ readFileOption()
+ validateOption()
+}
+
+type globalValidateOption struct {
+ Option
+}
+
+func (*globalValidateOption) globalOption() {}
+
+func (*globalValidateOption) parseOption() {}
+
+func (*globalValidateOption) readFileOption() {}
+
+func (*globalValidateOption) validateOption() {}
+
+// ParseOption describes an Option that can be passed to `jwt.Parse()`.
+// ParseOption also implements ReadFileOption, therefore it may be
+// safely pass them to `jwt.ReadFile()`
+type ParseOption interface {
+ Option
+ parseOption()
+ readFileOption()
+}
+
+type parseOption struct {
+ Option
+}
+
+func (*parseOption) parseOption() {}
+
+func (*parseOption) readFileOption() {}
+
+// ReadFileOption is a type of `Option` that can be passed to `jws.ReadFile`
+type ReadFileOption interface {
+ Option
+ readFileOption()
+}
+
+type readFileOption struct {
+ Option
+}
+
+func (*readFileOption) readFileOption() {}
+
+// SignEncryptParseOption describes an Option that can be passed to both `jwt.Sign()` or
+// `jwt.Parse()`
+type SignEncryptParseOption interface {
+ Option
+ parseOption()
+ encryptOption()
+ readFileOption()
+ signOption()
+}
+
+type signEncryptParseOption struct {
+ Option
+}
+
+func (*signEncryptParseOption) parseOption() {}
+
+func (*signEncryptParseOption) encryptOption() {}
+
+func (*signEncryptParseOption) readFileOption() {}
+
+func (*signEncryptParseOption) signOption() {}
+
+// SignOption describes an Option that can be passed to `jwt.Sign()` or
+// (jwt.Serializer).Sign
+type SignOption interface {
+ Option
+ signOption()
+}
+
+type signOption struct {
+ Option
+}
+
+func (*signOption) signOption() {}
+
+// SignParseOption describes an Option that can be passed to both `jwt.Sign()` or
+// `jwt.Parse()`
+type SignParseOption interface {
+ Option
+ signOption()
+ parseOption()
+ readFileOption()
+}
+
+type signParseOption struct {
+ Option
+}
+
+func (*signParseOption) signOption() {}
+
+func (*signParseOption) parseOption() {}
+
+func (*signParseOption) readFileOption() {}
+
+// ValidateOption describes an Option that can be passed to Validate().
+// ValidateOption also implements ParseOption, therefore it may be
+// safely passed to `Parse()` (and thus `jwt.ReadFile()`)
+type ValidateOption interface {
+ Option
+ parseOption()
+ readFileOption()
+ validateOption()
+}
+
+type validateOption struct {
+ Option
+}
+
+func (*validateOption) parseOption() {}
+
+func (*validateOption) readFileOption() {}
+
+func (*validateOption) validateOption() {}
+
+type identAcceptableSkew struct{}
+type identBase64Encoder struct{}
+type identClock struct{}
+type identContext struct{}
+type identCookie struct{}
+type identCookieKey struct{}
+type identEncryptOption struct{}
+type identFS struct{}
+type identFlattenAudience struct{}
+type identFormKey struct{}
+type identHeaderKey struct{}
+type identKeyProvider struct{}
+type identNumericDateFormatPrecision struct{}
+type identNumericDateParsePedantic struct{}
+type identNumericDateParsePrecision struct{}
+type identPedantic struct{}
+type identResetValidators struct{}
+type identSignOption struct{}
+type identToken struct{}
+type identTruncation struct{}
+type identValidate struct{}
+type identValidator struct{}
+type identVerify struct{}
+
+func (identAcceptableSkew) String() string {
+ return "WithAcceptableSkew"
+}
+
+func (identBase64Encoder) String() string {
+ return "WithBase64Encoder"
+}
+
+func (identClock) String() string {
+ return "WithClock"
+}
+
+func (identContext) String() string {
+ return "WithContext"
+}
+
+func (identCookie) String() string {
+ return "WithCookie"
+}
+
+func (identCookieKey) String() string {
+ return "WithCookieKey"
+}
+
+func (identEncryptOption) String() string {
+ return "WithEncryptOption"
+}
+
+func (identFS) String() string {
+ return "WithFS"
+}
+
+func (identFlattenAudience) String() string {
+ return "WithFlattenAudience"
+}
+
+func (identFormKey) String() string {
+ return "WithFormKey"
+}
+
+func (identHeaderKey) String() string {
+ return "WithHeaderKey"
+}
+
+func (identKeyProvider) String() string {
+ return "WithKeyProvider"
+}
+
+func (identNumericDateFormatPrecision) String() string {
+ return "WithNumericDateFormatPrecision"
+}
+
+func (identNumericDateParsePedantic) String() string {
+ return "WithNumericDateParsePedantic"
+}
+
+func (identNumericDateParsePrecision) String() string {
+ return "WithNumericDateParsePrecision"
+}
+
+func (identPedantic) String() string {
+ return "WithPedantic"
+}
+
+func (identResetValidators) String() string {
+ return "WithResetValidators"
+}
+
+func (identSignOption) String() string {
+ return "WithSignOption"
+}
+
+func (identToken) String() string {
+ return "WithToken"
+}
+
+func (identTruncation) String() string {
+ return "WithTruncation"
+}
+
+func (identValidate) String() string {
+ return "WithValidate"
+}
+
+func (identValidator) String() string {
+ return "WithValidator"
+}
+
+func (identVerify) String() string {
+ return "WithVerify"
+}
+
+// WithAcceptableSkew specifies the duration in which exp, iat and nbf
+// claims may differ by. This value should be positive
+func WithAcceptableSkew(v time.Duration) ValidateOption {
+ return &validateOption{option.New(identAcceptableSkew{}, v)}
+}
+
+// WithBase64Encoder specifies the base64 encoder to use for signing
+// tokens and verifying JWS signatures.
+func WithBase64Encoder(v jws.Base64Encoder) SignParseOption {
+ return &signParseOption{option.New(identBase64Encoder{}, v)}
+}
+
+// WithClock specifies the `Clock` to be used when verifying
+// exp, iat and nbf claims.
+func WithClock(v Clock) ValidateOption {
+ return &validateOption{option.New(identClock{}, v)}
+}
+
+// WithContext allows you to specify a context.Context object to be used
+// with `jwt.Validate()` option.
+//
+// Please be aware that in the next major release of this library,
+// `jwt.Validate()`'s signature will change to include an explicit
+// `context.Context` object.
+func WithContext(v context.Context) ValidateOption {
+ return &validateOption{option.New(identContext{}, v)}
+}
+
+// WithCookie is used to specify a variable to store the cookie used when `jwt.ParseCookie()`
+// is called. This allows you to inspect the cookie for additional information after a successful
+// parsing of the JWT token stored in the cookie.
+//
+// While the type system allows this option to be passed to `jwt.Parse()` directly,
+// doing so will have no effect. Only use it for HTTP request parsing functions
+func WithCookie(v **http.Cookie) ParseOption {
+ return &parseOption{option.New(identCookie{}, v)}
+}
+
+// WithCookieKey is used to specify cookie keys to search for tokens.
+//
+// While the type system allows this option to be passed to `jwt.Parse()` directly,
+// doing so will have no effect. Only use it for HTTP request parsing functions
+func WithCookieKey(v string) ParseOption {
+ return &parseOption{option.New(identCookieKey{}, v)}
+}
+
+// WithEncryptOption provides an escape hatch for cases where extra options to
+// `(jws.Serializer).Encrypt()` must be specified when using `jwt.Sign()`. Normally you do not
+// need to use this.
+func WithEncryptOption(v jwe.EncryptOption) EncryptOption {
+ return &encryptOption{option.New(identEncryptOption{}, v)}
+}
+
+// WithFS specifies the source `fs.FS` object to read the file from.
+func WithFS(v fs.FS) ReadFileOption {
+ return &readFileOption{option.New(identFS{}, v)}
+}
+
+// WithFlattenAudience specifies the the `jwt.FlattenAudience` option on
+// every token defaults to enabled. You can still disable this on a per-object
+// basis using the `jwt.Options().Disable(jwt.FlattenAudience)` method call.
+//
+// See the documentation for `jwt.TokenOptionSet`, `(jwt.Token).Options`, and
+// `jwt.FlattenAudience` for more details
+func WithFlattenAudience(v bool) GlobalOption {
+ return &globalOption{option.New(identFlattenAudience{}, v)}
+}
+
+// WithFormKey is used to specify header keys to search for tokens.
+//
+// While the type system allows this option to be passed to jwt.Parse() directly,
+// doing so will have no effect. Only use it for HTTP request parsing functions
+func WithFormKey(v string) ParseOption {
+ return &parseOption{option.New(identFormKey{}, v)}
+}
+
+// WithHeaderKey is used to specify header keys to search for tokens.
+//
+// While the type system allows this option to be passed to `jwt.Parse()` directly,
+// doing so will have no effect. Only use it for HTTP request parsing functions
+func WithHeaderKey(v string) ParseOption {
+ return &parseOption{option.New(identHeaderKey{}, v)}
+}
+
+// WithKeyProvider allows users to specify an object to provide keys to
+// sign/verify tokens using arbitrary code. Please read the documentation
+// for `jws.KeyProvider` in the `jws` package for details on how this works.
+func WithKeyProvider(v jws.KeyProvider) ParseOption {
+ return &parseOption{option.New(identKeyProvider{}, v)}
+}
+
+// WithNumericDateFormatPrecision sets the precision up to which the
+// library uses to format fractional dates found in the numeric date
+// fields. Default is 0 (second, no fractions), max is 9 (nanosecond)
+func WithNumericDateFormatPrecision(v int) GlobalOption {
+ return &globalOption{option.New(identNumericDateFormatPrecision{}, v)}
+}
+
+// WithNumericDateParsePedantic specifies if the parser should behave
+// in a pedantic manner when parsing numeric dates. Normally this library
+// attempts to interpret timestamps as a numeric value representing
+// number of seconds (with an optional fractional part), but if that fails
+// it tries to parse using a RFC3339 parser. This allows us to parse
+// payloads from non-conforming servers.
+//
+// However, when you set WithNumericDateParePedantic to `true`, the
+// RFC3339 parser is not tried, and we expect a numeric value strictly
+func WithNumericDateParsePedantic(v bool) GlobalOption {
+ return &globalOption{option.New(identNumericDateParsePedantic{}, v)}
+}
+
+// WithNumericDateParsePrecision sets the precision up to which the
+// library uses to parse fractional dates found in the numeric date
+// fields. Default is 0 (second, no fractions), max is 9 (nanosecond)
+func WithNumericDateParsePrecision(v int) GlobalOption {
+ return &globalOption{option.New(identNumericDateParsePrecision{}, v)}
+}
+
+// WithPedantic enables pedantic mode for parsing JWTs. Currently this only
+// applies to checking for the correct `typ` and/or `cty` when necessary.
+func WithPedantic(v bool) ParseOption {
+ return &parseOption{option.New(identPedantic{}, v)}
+}
+
+// WithResetValidators specifies that the default validators should be
+// reset before applying the custom validators. By default `jwt.Validate()`
+// checks for the validity of JWT by checking `exp`, `nbf`, and `iat`, even
+// when you specify more validators through other options.
+//
+// You SHOULD NOT use this option unless you know exactly what you are doing,
+// as this will pose significant security issues when used incorrectly.
+//
+// Using this option with the value `true` will remove all default checks,
+// and will expect you to specify validators as options. This is useful when you
+// want to skip the default validators and only use specific validators, such as
+// for https://openid.net/specs/openid-connect-rpinitiated-1_0.html, where
+// the token could be accepted even if the token is expired.
+//
+// If you set this option to true and you do not specify any validators,
+// `jwt.Validate()` will return an error.
+//
+// The default value is `false` (`iat`, `exp`, and `nbf` are automatically checked).
+func WithResetValidators(v bool) ValidateOption {
+ return &validateOption{option.New(identResetValidators{}, v)}
+}
+
+// WithSignOption provides an escape hatch for cases where extra options to
+// `jws.Sign()` must be specified when using `jwt.Sign()`. Normally you do not
+// need to use this.
+func WithSignOption(v jws.SignOption) SignOption {
+ return &signOption{option.New(identSignOption{}, v)}
+}
+
+// WithToken specifies the token instance in which the resulting JWT is stored
+// when parsing JWT tokens
+func WithToken(v Token) ParseOption {
+ return &parseOption{option.New(identToken{}, v)}
+}
+
+// WithTruncation specifies the amount that should be used when
+// truncating time values used during time-based validation routines,
+// and by default this is disabled.
+//
+// In v2 of this library, time values were truncated down to second accuracy, i.e.
+// 1.0000001 seconds is truncated to 1 second. To restore this behavior, set
+// this value to `time.Second`
+//
+// Since v3, this option can be passed to `jwt.Settings()` to set the truncation
+// value globally, as well as per invocation of `jwt.Validate()`
+func WithTruncation(v time.Duration) GlobalValidateOption {
+ return &globalValidateOption{option.New(identTruncation{}, v)}
+}
+
+// WithValidate is passed to `Parse()` method to denote that the
+// validation of the JWT token should be performed (or not) after
+// a successful parsing of the incoming payload.
+//
+// This option is enabled by default.
+//
+// If you would like disable validation,
+// you must use `jwt.WithValidate(false)` or use `jwt.ParseInsecure()`
+func WithValidate(v bool) ParseOption {
+ return &parseOption{option.New(identValidate{}, v)}
+}
+
+// WithValidator validates the token with the given Validator.
+//
+// For example, in order to validate tokens that are only valid during August, you would write
+//
+// validator := jwt.ValidatorFunc(func(_ context.Context, t jwt.Token) error {
+// if time.Now().Month() != 8 {
+// return fmt.Errorf(`tokens are only valid during August!`)
+// }
+// return nil
+// })
+// err := jwt.Validate(token, jwt.WithValidator(validator))
+func WithValidator(v Validator) ValidateOption {
+ return &validateOption{option.New(identValidator{}, v)}
+}
+
+// WithVerify is passed to `Parse()` method to denote that the
+// signature verification should be performed after a successful
+// deserialization of the incoming payload.
+//
+// This option is enabled by default.
+//
+// If you do not provide any verification key sources, `jwt.Parse()`
+// would return an error.
+//
+// If you would like to only parse the JWT payload and not verify it,
+// you must use `jwt.WithVerify(false)` or use `jwt.ParseInsecure()`
+func WithVerify(v bool) ParseOption {
+ return &parseOption{option.New(identVerify{}, v)}
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/serialize.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/serialize.go
new file mode 100644
index 0000000000..9d3bdac94a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/serialize.go
@@ -0,0 +1,264 @@
+package jwt
+
+import (
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/jwe"
+ "github.com/lestrrat-go/jwx/v3/jws"
+)
+
+type SerializeCtx interface {
+ Step() int
+ Nested() bool
+}
+
+type serializeCtx struct {
+ step int
+ nested bool
+}
+
+func (ctx *serializeCtx) Step() int {
+ return ctx.step
+}
+
+func (ctx *serializeCtx) Nested() bool {
+ return ctx.nested
+}
+
+type SerializeStep interface {
+ Serialize(SerializeCtx, any) (any, error)
+}
+
+// errStep is always an error. used to indicate that a method like
+// serializer.Sign or Encrypt already errored out on configuration
+type errStep struct {
+ err error
+}
+
+func (e errStep) Serialize(_ SerializeCtx, _ any) (any, error) {
+ return nil, e.err
+}
+
+// Serializer is a generic serializer for JWTs. Whereas other convenience
+// functions can only do one thing (such as generate a JWS signed JWT),
+// Using this construct you can serialize the token however you want.
+//
+// By default, the serializer only marshals the token into a JSON payload.
+// You must set up the rest of the steps that should be taken by the
+// serializer.
+//
+// For example, to marshal the token into JSON, then apply JWS and JWE
+// in that order, you would do:
+//
+// serialized, err := jwt.NewSerializer().
+// Sign(jwa.RS256, key).
+// Encrypt(jwe.WithEncryptOption(jwe.WithKey(jwa.RSA_OAEP(), publicKey))).
+// Serialize(token)
+//
+// The `jwt.Sign()` function is equivalent to
+//
+// serialized, err := jwt.NewSerializer().
+// Sign(...args...).
+// Serialize(token)
+type Serializer struct {
+ steps []SerializeStep
+}
+
+// NewSerializer creates a new empty serializer.
+func NewSerializer() *Serializer {
+ return &Serializer{}
+}
+
+// Reset clears all of the registered steps.
+func (s *Serializer) Reset() *Serializer {
+ s.steps = nil
+ return s
+}
+
+// Step adds a new Step to the serialization process
+func (s *Serializer) Step(step SerializeStep) *Serializer {
+ s.steps = append(s.steps, step)
+ return s
+}
+
+type jsonSerializer struct{}
+
+func (jsonSerializer) Serialize(_ SerializeCtx, v any) (any, error) {
+ token, ok := v.(Token)
+ if !ok {
+ return nil, fmt.Errorf(`invalid input: expected jwt.Token`)
+ }
+
+ buf, err := json.Marshal(token)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to serialize as JSON: %w`, err)
+ }
+ return buf, nil
+}
+
+type genericHeader interface {
+ Get(string, any) error
+ Set(string, any) error
+ Has(string) bool
+}
+
+func setTypeOrCty(ctx SerializeCtx, hdrs genericHeader) error {
+ // cty and typ are common between JWE/JWS, so we don't use
+ // the constants in jws/jwe package here
+ const typKey = `typ`
+ const ctyKey = `cty`
+
+ if ctx.Step() == 1 {
+ // We are executed immediately after json marshaling
+ if !hdrs.Has(typKey) {
+ if err := hdrs.Set(typKey, `JWT`); err != nil {
+ return fmt.Errorf(`failed to set %s key to "JWT": %w`, typKey, err)
+ }
+ }
+ } else {
+ if ctx.Nested() {
+ // If this is part of a nested sequence, we should set cty = 'JWT'
+ // https://datatracker.ietf.org/doc/html/rfc7519#section-5.2
+ if err := hdrs.Set(ctyKey, `JWT`); err != nil {
+ return fmt.Errorf(`failed to set %s key to "JWT": %w`, ctyKey, err)
+ }
+ }
+ }
+ return nil
+}
+
+type jwsSerializer struct {
+ options []jws.SignOption
+}
+
+func (s *jwsSerializer) Serialize(ctx SerializeCtx, v any) (any, error) {
+ payload, ok := v.([]byte)
+ if !ok {
+ return nil, fmt.Errorf(`expected []byte as input`)
+ }
+
+ for _, option := range s.options {
+ var pc interface{ Protected(jws.Headers) jws.Headers }
+ if err := option.Value(&pc); err != nil {
+ continue
+ }
+ hdrs := pc.Protected(jws.NewHeaders())
+ if err := setTypeOrCty(ctx, hdrs); err != nil {
+ return nil, err // this is already wrapped
+ }
+
+ // JWTs MUST NOT use b64 = false
+ // https://datatracker.ietf.org/doc/html/rfc7797#section-7
+ var b64 bool
+ if err := hdrs.Get("b64", &b64); err == nil {
+ if !b64 { // b64 = false
+ return nil, fmt.Errorf(`b64 cannot be false for JWTs`)
+ }
+ }
+ }
+ return jws.Sign(payload, s.options...)
+}
+
+func (s *Serializer) Sign(options ...SignOption) *Serializer {
+ var soptions []jws.SignOption
+ if l := len(options); l > 0 {
+ // we need to from SignOption to Option because ... reasons
+ // (todo: when go1.18 prevails, use type parameters
+ rawoptions := make([]Option, l)
+ for i, option := range options {
+ rawoptions[i] = option
+ }
+
+ converted, err := toSignOptions(rawoptions...)
+ if err != nil {
+ return s.Step(errStep{fmt.Errorf(`(jwt.Serializer).Sign: failed to convert options into jws.SignOption: %w`, err)})
+ }
+ soptions = converted
+ }
+ return s.sign(soptions...)
+}
+
+func (s *Serializer) sign(options ...jws.SignOption) *Serializer {
+ return s.Step(&jwsSerializer{
+ options: options,
+ })
+}
+
+type jweSerializer struct {
+ options []jwe.EncryptOption
+}
+
+func (s *jweSerializer) Serialize(ctx SerializeCtx, v any) (any, error) {
+ payload, ok := v.([]byte)
+ if !ok {
+ return nil, fmt.Errorf(`expected []byte as input`)
+ }
+
+ hdrs := jwe.NewHeaders()
+ if err := setTypeOrCty(ctx, hdrs); err != nil {
+ return nil, err // this is already wrapped
+ }
+
+ options := append([]jwe.EncryptOption{jwe.WithMergeProtectedHeaders(true), jwe.WithProtectedHeaders(hdrs)}, s.options...)
+ return jwe.Encrypt(payload, options...)
+}
+
+// Encrypt specifies the JWT to be serialized as an encrypted payload.
+//
+// One notable difference between this method and `jwe.Encrypt()` is that
+// while `jwe.Encrypt()` OVERWRITES the previous headers when `jwe.WithProtectedHeaders()`
+// is provided, this method MERGES them. This is due to the fact that we
+// MUST add some extra headers to construct a proper JWE message.
+// Be careful when you pass multiple `jwe.EncryptOption`s.
+func (s *Serializer) Encrypt(options ...EncryptOption) *Serializer {
+ var eoptions []jwe.EncryptOption
+ if l := len(options); l > 0 {
+ // we need to from SignOption to Option because ... reasons
+ // (todo: when go1.18 prevails, use type parameters
+ rawoptions := make([]Option, l)
+ for i, option := range options {
+ rawoptions[i] = option
+ }
+
+ converted, err := toEncryptOptions(rawoptions...)
+ if err != nil {
+ return s.Step(errStep{fmt.Errorf(`(jwt.Serializer).Encrypt: failed to convert options into jwe.EncryptOption: %w`, err)})
+ }
+ eoptions = converted
+ }
+ return s.encrypt(eoptions...)
+}
+
+func (s *Serializer) encrypt(options ...jwe.EncryptOption) *Serializer {
+ return s.Step(&jweSerializer{
+ options: options,
+ })
+}
+
+func (s *Serializer) Serialize(t Token) ([]byte, error) {
+ steps := make([]SerializeStep, len(s.steps)+1)
+ steps[0] = jsonSerializer{}
+ for i, step := range s.steps {
+ steps[i+1] = step
+ }
+
+ var ctx serializeCtx
+ ctx.nested = len(s.steps) > 1
+ var payload any = t
+ for i, step := range steps {
+ ctx.step = i
+ v, err := step.Serialize(&ctx, payload)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to serialize token at step #%d: %w`, i+1, err)
+ }
+ payload = v
+ }
+
+ res, ok := payload.([]byte)
+ if !ok {
+ return nil, fmt.Errorf(`invalid serialization produced`)
+ }
+
+ return res, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_gen.go
new file mode 100644
index 0000000000..2361ff5621
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_gen.go
@@ -0,0 +1,635 @@
+// Code generated by tools/cmd/genjwt/main.go. DO NOT EDIT.
+
+package jwt
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/lestrrat-go/blackmagic"
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+ "github.com/lestrrat-go/jwx/v3/internal/pool"
+ "github.com/lestrrat-go/jwx/v3/internal/tokens"
+ jwterrs "github.com/lestrrat-go/jwx/v3/jwt/internal/errors"
+ "github.com/lestrrat-go/jwx/v3/jwt/internal/types"
+)
+
+const (
+ AudienceKey = "aud"
+ ExpirationKey = "exp"
+ IssuedAtKey = "iat"
+ IssuerKey = "iss"
+ JwtIDKey = "jti"
+ NotBeforeKey = "nbf"
+ SubjectKey = "sub"
+)
+
+// stdClaimNames is a list of all standard claim names defined in the JWT specification.
+var stdClaimNames = []string{AudienceKey, ExpirationKey, IssuedAtKey, IssuerKey, JwtIDKey, NotBeforeKey, SubjectKey}
+
+// Token represents a generic JWT token.
+// which are type-aware (to an extent). Other claims may be accessed via the `Get`/`Set`
+// methods but their types are not taken into consideration at all. If you have non-standard
+// claims that you must frequently access, consider creating accessors functions
+// like the following
+//
+// func SetFoo(tok jwt.Token) error
+// func GetFoo(tok jwt.Token) (*Customtyp, error)
+//
+// Embedding jwt.Token into another struct is not recommended, because
+// jwt.Token needs to handle private claims, and this really does not
+// work well when it is embedded in other structure
+type Token interface {
+ // Audience returns the value for "aud" field of the token
+ Audience() ([]string, bool)
+
+ // Expiration returns the value for "exp" field of the token
+ Expiration() (time.Time, bool)
+
+ // IssuedAt returns the value for "iat" field of the token
+ IssuedAt() (time.Time, bool)
+
+ // Issuer returns the value for "iss" field of the token
+ Issuer() (string, bool)
+
+ // JwtID returns the value for "jti" field of the token
+ JwtID() (string, bool)
+
+ // NotBefore returns the value for "nbf" field of the token
+ NotBefore() (time.Time, bool)
+
+ // Subject returns the value for "sub" field of the token
+ Subject() (string, bool)
+
+ // Get is used to extract the value of any claim, including non-standard claims, out of the token.
+ //
+ // The first argument is the name of the claim. The second argument is a pointer
+ // to a variable that will receive the value of the claim. The method returns
+ // an error if the claim does not exist, or if the value cannot be assigned to
+ // the destination variable. Note that a field is considered to "exist" even if
+ // the value is empty-ish (e.g. 0, false, ""), as long as it is explicitly set.
+ //
+ // For standard claims, you can use the corresponding getter method, such as
+ // `Issuer()`, `Subject()`, `Audience()`, `IssuedAt()`, `NotBefore()`, `ExpiresAt()`
+ //
+ // Note that fields of JWS/JWE are NOT accessible through this method. You need
+ // to use `jws.Parse` and `jwe.Parse` to obtain the JWS/JWE message (and NOT
+ // the payload, which presumably is the JWT), and then use their `Get` methods in their respective packages
+ Get(string, any) error
+
+ // Set assigns a value to the corresponding field in the token. Some
+ // pre-defined fields such as `nbf`, `iat`, `iss` need their values to
+ // be of a specific type. See the other getter methods in this interface
+ // for the types of each of these fields
+ Set(string, any) error
+
+ // Has returns true if the specified claim has a value, even if
+ // the value is empty-ish (e.g. 0, false, "") as long as it has been
+ // explicitly set.
+ Has(string) bool
+ Remove(string) error
+
+ // Options returns the per-token options associated with this token.
+ // The options set value will be copied when the token is cloned via `Clone()`
+ // but it will not survive when the token goes through marshaling/unmarshaling
+ // such as `json.Marshal` and `json.Unmarshal`
+ Options() *TokenOptionSet
+ Clone() (Token, error)
+ Keys() []string
+}
+type stdToken struct {
+ mu *sync.RWMutex
+ dc DecodeCtx // per-object context for decoding
+ options TokenOptionSet // per-object option
+ audience types.StringList // https://tools.ietf.org/html/rfc7519#section-4.1.3
+ expiration *types.NumericDate // https://tools.ietf.org/html/rfc7519#section-4.1.4
+ issuedAt *types.NumericDate // https://tools.ietf.org/html/rfc7519#section-4.1.6
+ issuer *string // https://tools.ietf.org/html/rfc7519#section-4.1.1
+ jwtID *string // https://tools.ietf.org/html/rfc7519#section-4.1.7
+ notBefore *types.NumericDate // https://tools.ietf.org/html/rfc7519#section-4.1.5
+ subject *string // https://tools.ietf.org/html/rfc7519#section-4.1.2
+ privateClaims map[string]any
+}
+
+// New creates a standard token, with minimal knowledge of
+// possible claims. Standard claims include"aud", "exp", "iat", "iss", "jti", "nbf" and "sub".
+// Convenience accessors are provided for these standard claims
+func New() Token {
+ return &stdToken{
+ mu: &sync.RWMutex{},
+ privateClaims: make(map[string]any),
+ options: DefaultOptionSet(),
+ }
+}
+
+func (t *stdToken) Options() *TokenOptionSet {
+ return &t.options
+}
+
+func (t *stdToken) Has(name string) bool {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ switch name {
+ case AudienceKey:
+ return t.audience != nil
+ case ExpirationKey:
+ return t.expiration != nil
+ case IssuedAtKey:
+ return t.issuedAt != nil
+ case IssuerKey:
+ return t.issuer != nil
+ case JwtIDKey:
+ return t.jwtID != nil
+ case NotBeforeKey:
+ return t.notBefore != nil
+ case SubjectKey:
+ return t.subject != nil
+ default:
+ _, ok := t.privateClaims[name]
+ return ok
+ }
+}
+
+func (t *stdToken) Get(name string, dst any) error {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ switch name {
+ case AudienceKey:
+ if t.audience == nil {
+ return jwterrs.ClaimNotFoundError{Name: name}
+ }
+ if err := blackmagic.AssignIfCompatible(dst, t.audience.Get()); err != nil {
+ return jwterrs.ClaimAssignmentFailedError{Err: err}
+ }
+ return nil
+ case ExpirationKey:
+ if t.expiration == nil {
+ return jwterrs.ClaimNotFoundError{Name: name}
+ }
+ if err := blackmagic.AssignIfCompatible(dst, t.expiration.Get()); err != nil {
+ return jwterrs.ClaimAssignmentFailedError{Err: err}
+ }
+ return nil
+ case IssuedAtKey:
+ if t.issuedAt == nil {
+ return jwterrs.ClaimNotFoundError{Name: name}
+ }
+ if err := blackmagic.AssignIfCompatible(dst, t.issuedAt.Get()); err != nil {
+ return jwterrs.ClaimAssignmentFailedError{Err: err}
+ }
+ return nil
+ case IssuerKey:
+ if t.issuer == nil {
+ return jwterrs.ClaimNotFoundError{Name: name}
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(t.issuer)); err != nil {
+ return jwterrs.ClaimAssignmentFailedError{Err: err}
+ }
+ return nil
+ case JwtIDKey:
+ if t.jwtID == nil {
+ return jwterrs.ClaimNotFoundError{Name: name}
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(t.jwtID)); err != nil {
+ return jwterrs.ClaimAssignmentFailedError{Err: err}
+ }
+ return nil
+ case NotBeforeKey:
+ if t.notBefore == nil {
+ return jwterrs.ClaimNotFoundError{Name: name}
+ }
+ if err := blackmagic.AssignIfCompatible(dst, t.notBefore.Get()); err != nil {
+ return jwterrs.ClaimAssignmentFailedError{Err: err}
+ }
+ return nil
+ case SubjectKey:
+ if t.subject == nil {
+ return jwterrs.ClaimNotFoundError{Name: name}
+ }
+ if err := blackmagic.AssignIfCompatible(dst, *(t.subject)); err != nil {
+ return jwterrs.ClaimAssignmentFailedError{Err: err}
+ }
+ return nil
+ default:
+ v, ok := t.privateClaims[name]
+ if !ok {
+ return jwterrs.ClaimNotFoundError{Name: name}
+ }
+ if err := blackmagic.AssignIfCompatible(dst, v); err != nil {
+ return jwterrs.ClaimAssignmentFailedError{Err: err}
+ }
+ return nil
+ }
+}
+
+func (t *stdToken) Remove(key string) error {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ switch key {
+ case AudienceKey:
+ t.audience = nil
+ case ExpirationKey:
+ t.expiration = nil
+ case IssuedAtKey:
+ t.issuedAt = nil
+ case IssuerKey:
+ t.issuer = nil
+ case JwtIDKey:
+ t.jwtID = nil
+ case NotBeforeKey:
+ t.notBefore = nil
+ case SubjectKey:
+ t.subject = nil
+ default:
+ delete(t.privateClaims, key)
+ }
+ return nil
+}
+
+func (t *stdToken) Set(name string, value any) error {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ return t.setNoLock(name, value)
+}
+
+func (t *stdToken) DecodeCtx() DecodeCtx {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ return t.dc
+}
+
+func (t *stdToken) SetDecodeCtx(v DecodeCtx) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ t.dc = v
+}
+
+func (t *stdToken) setNoLock(name string, value any) error {
+ switch name {
+ case AudienceKey:
+ var acceptor types.StringList
+ if err := acceptor.Accept(value); err != nil {
+ return fmt.Errorf(`invalid value for %s key: %w`, AudienceKey, err)
+ }
+ t.audience = acceptor
+ return nil
+ case ExpirationKey:
+ var acceptor types.NumericDate
+ if err := acceptor.Accept(value); err != nil {
+ return fmt.Errorf(`invalid value for %s key: %w`, ExpirationKey, err)
+ }
+ t.expiration = &acceptor
+ return nil
+ case IssuedAtKey:
+ var acceptor types.NumericDate
+ if err := acceptor.Accept(value); err != nil {
+ return fmt.Errorf(`invalid value for %s key: %w`, IssuedAtKey, err)
+ }
+ t.issuedAt = &acceptor
+ return nil
+ case IssuerKey:
+ if v, ok := value.(string); ok {
+ t.issuer = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, IssuerKey, value)
+ case JwtIDKey:
+ if v, ok := value.(string); ok {
+ t.jwtID = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, JwtIDKey, value)
+ case NotBeforeKey:
+ var acceptor types.NumericDate
+ if err := acceptor.Accept(value); err != nil {
+ return fmt.Errorf(`invalid value for %s key: %w`, NotBeforeKey, err)
+ }
+ t.notBefore = &acceptor
+ return nil
+ case SubjectKey:
+ if v, ok := value.(string); ok {
+ t.subject = &v
+ return nil
+ }
+ return fmt.Errorf(`invalid value for %s key: %T`, SubjectKey, value)
+ default:
+ if t.privateClaims == nil {
+ t.privateClaims = map[string]any{}
+ }
+ t.privateClaims[name] = value
+ }
+ return nil
+}
+
+func (t *stdToken) Audience() ([]string, bool) {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ if t.audience != nil {
+ return t.audience.Get(), true
+ }
+ return nil, false
+}
+
+func (t *stdToken) Expiration() (time.Time, bool) {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ if t.expiration != nil {
+ return t.expiration.Get(), true
+ }
+ return time.Time{}, false
+}
+
+func (t *stdToken) IssuedAt() (time.Time, bool) {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ if t.issuedAt != nil {
+ return t.issuedAt.Get(), true
+ }
+ return time.Time{}, false
+}
+
+func (t *stdToken) Issuer() (string, bool) {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ if t.issuer != nil {
+ return *(t.issuer), true
+ }
+ return "", false
+}
+
+func (t *stdToken) JwtID() (string, bool) {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ if t.jwtID != nil {
+ return *(t.jwtID), true
+ }
+ return "", false
+}
+
+func (t *stdToken) NotBefore() (time.Time, bool) {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ if t.notBefore != nil {
+ return t.notBefore.Get(), true
+ }
+ return time.Time{}, false
+}
+
+func (t *stdToken) Subject() (string, bool) {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ if t.subject != nil {
+ return *(t.subject), true
+ }
+ return "", false
+}
+
+func (t *stdToken) PrivateClaims() map[string]any {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ return t.privateClaims
+}
+
+func (t *stdToken) UnmarshalJSON(buf []byte) error {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ t.audience = nil
+ t.expiration = nil
+ t.issuedAt = nil
+ t.issuer = nil
+ t.jwtID = nil
+ t.notBefore = nil
+ t.subject = nil
+ dec := json.NewDecoder(bytes.NewReader(buf))
+LOOP:
+ for {
+ tok, err := dec.Token()
+ if err != nil {
+ return fmt.Errorf(`error reading token: %w`, err)
+ }
+ switch tok := tok.(type) {
+ case json.Delim:
+ // Assuming we're doing everything correctly, we should ONLY
+ // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here.
+ if tok == tokens.CloseCurlyBracket { // End of object
+ break LOOP
+ } else if tok != tokens.OpenCurlyBracket {
+ return fmt.Errorf(`expected '%c', but got '%c'`, tokens.OpenCurlyBracket, tok)
+ }
+ case string: // Objects can only have string keys
+ switch tok {
+ case AudienceKey:
+ var decoded types.StringList
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, AudienceKey, err)
+ }
+ t.audience = decoded
+ case ExpirationKey:
+ var decoded types.NumericDate
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, ExpirationKey, err)
+ }
+ t.expiration = &decoded
+ case IssuedAtKey:
+ var decoded types.NumericDate
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, IssuedAtKey, err)
+ }
+ t.issuedAt = &decoded
+ case IssuerKey:
+ if err := json.AssignNextStringToken(&t.issuer, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, IssuerKey, err)
+ }
+ case JwtIDKey:
+ if err := json.AssignNextStringToken(&t.jwtID, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, JwtIDKey, err)
+ }
+ case NotBeforeKey:
+ var decoded types.NumericDate
+ if err := dec.Decode(&decoded); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, NotBeforeKey, err)
+ }
+ t.notBefore = &decoded
+ case SubjectKey:
+ if err := json.AssignNextStringToken(&t.subject, dec); err != nil {
+ return fmt.Errorf(`failed to decode value for key %s: %w`, SubjectKey, err)
+ }
+ default:
+ if dc := t.dc; dc != nil {
+ if localReg := dc.Registry(); localReg != nil {
+ decoded, err := localReg.Decode(dec, tok)
+ if err == nil {
+ t.setNoLock(tok, decoded)
+ continue
+ }
+ }
+ }
+ decoded, err := registry.Decode(dec, tok)
+ if err == nil {
+ t.setNoLock(tok, decoded)
+ continue
+ }
+ return fmt.Errorf(`could not decode field %s: %w`, tok, err)
+ }
+ default:
+ return fmt.Errorf(`invalid token %T`, tok)
+ }
+ }
+ return nil
+}
+
+func (t *stdToken) Keys() []string {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ keys := make([]string, 0, 7+len(t.privateClaims))
+ if t.audience != nil {
+ keys = append(keys, AudienceKey)
+ }
+ if t.expiration != nil {
+ keys = append(keys, ExpirationKey)
+ }
+ if t.issuedAt != nil {
+ keys = append(keys, IssuedAtKey)
+ }
+ if t.issuer != nil {
+ keys = append(keys, IssuerKey)
+ }
+ if t.jwtID != nil {
+ keys = append(keys, JwtIDKey)
+ }
+ if t.notBefore != nil {
+ keys = append(keys, NotBeforeKey)
+ }
+ if t.subject != nil {
+ keys = append(keys, SubjectKey)
+ }
+ for k := range t.privateClaims {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+type claimPair struct {
+ Name string
+ Value any
+}
+
+var claimPairPool = sync.Pool{
+ New: func() any {
+ return make([]claimPair, 0, 7)
+ },
+}
+
+func getClaimPairList() []claimPair {
+ return claimPairPool.Get().([]claimPair)
+}
+
+func putClaimPairList(list []claimPair) {
+ list = list[:0]
+ claimPairPool.Put(list)
+}
+
+// makePairs creates a list of claimPair objects that are sorted by
+// their key names. The key names are always their JSON names, and
+// the values are already JSON encoded.
+// Because makePairs needs to allocate a slice, it _slows_ down
+// marshaling of the token to JSON. The upside is that it allows us to
+// marshal the token keys in a deterministic order.
+// Do we really need it...? Well, technically we don't, but it's so
+// much nicer to have this to make the example tests actually work
+// deterministically. Also if for whatever reason this becomes a
+// performance issue, we can always/ add a flag to use a more _optimized_ code path.
+//
+// The caller is responsible to call putClaimPairList() to return the
+// allocated slice back to the pool.
+
+func (t *stdToken) makePairs() ([]claimPair, error) {
+ pairs := getClaimPairList()
+ if t.audience != nil {
+ buf, err := json.MarshalAudience(t.audience, t.options.IsEnabled(FlattenAudience))
+ if err != nil {
+ return nil, fmt.Errorf(`failed to encode "aud": %w`, err)
+ }
+ pairs = append(pairs, claimPair{Name: AudienceKey, Value: buf})
+ }
+ if t.expiration != nil {
+ buf, err := json.Marshal(t.expiration.Unix())
+ if err != nil {
+ return nil, fmt.Errorf(`failed to encode "exp": %w`, err)
+ }
+ pairs = append(pairs, claimPair{Name: ExpirationKey, Value: buf})
+ }
+ if t.issuedAt != nil {
+ buf, err := json.Marshal(t.issuedAt.Unix())
+ if err != nil {
+ return nil, fmt.Errorf(`failed to encode "iat": %w`, err)
+ }
+ pairs = append(pairs, claimPair{Name: IssuedAtKey, Value: buf})
+ }
+ if t.issuer != nil {
+ buf, err := json.Marshal(*(t.issuer))
+ if err != nil {
+ return nil, fmt.Errorf(`failed to encode field "iss": %w`, err)
+ }
+ pairs = append(pairs, claimPair{Name: IssuerKey, Value: buf})
+ }
+ if t.jwtID != nil {
+ buf, err := json.Marshal(*(t.jwtID))
+ if err != nil {
+ return nil, fmt.Errorf(`failed to encode field "jti": %w`, err)
+ }
+ pairs = append(pairs, claimPair{Name: JwtIDKey, Value: buf})
+ }
+ if t.notBefore != nil {
+ buf, err := json.Marshal(t.notBefore.Unix())
+ if err != nil {
+ return nil, fmt.Errorf(`failed to encode "nbf": %w`, err)
+ }
+ pairs = append(pairs, claimPair{Name: NotBeforeKey, Value: buf})
+ }
+ if t.subject != nil {
+ buf, err := json.Marshal(*(t.subject))
+ if err != nil {
+ return nil, fmt.Errorf(`failed to encode field "sub": %w`, err)
+ }
+ pairs = append(pairs, claimPair{Name: SubjectKey, Value: buf})
+ }
+ for k, v := range t.privateClaims {
+ buf, err := json.Marshal(v)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to encode field %q: %w`, k, err)
+ }
+ pairs = append(pairs, claimPair{Name: k, Value: buf})
+ }
+
+ sort.Slice(pairs, func(i, j int) bool {
+ return pairs[i].Name < pairs[j].Name
+ })
+
+ return pairs, nil
+}
+
+func (t stdToken) MarshalJSON() ([]byte, error) {
+ buf := pool.BytesBuffer().Get()
+ defer pool.BytesBuffer().Put(buf)
+ pairs, err := t.makePairs()
+ if err != nil {
+ return nil, fmt.Errorf(`failed to make pairs: %w`, err)
+ }
+ buf.WriteByte(tokens.OpenCurlyBracket)
+
+ for i, pair := range pairs {
+ if i > 0 {
+ buf.WriteByte(tokens.Comma)
+ }
+ fmt.Fprintf(buf, "%q: %s", pair.Name, pair.Value)
+ }
+ buf.WriteByte(tokens.CloseCurlyBracket)
+ ret := make([]byte, buf.Len())
+ copy(ret, buf.Bytes())
+ putClaimPairList(pairs)
+ return ret, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options.go
new file mode 100644
index 0000000000..0f54e05611
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options.go
@@ -0,0 +1,78 @@
+package jwt
+
+import "sync"
+
+// TokenOptionSet is a bit flag containing per-token options.
+type TokenOptionSet uint64
+
+var defaultOptions TokenOptionSet
+var defaultOptionsMu sync.RWMutex
+
+// TokenOption describes a single token option that can be set on
+// the per-token option set (TokenOptionSet)
+type TokenOption uint64
+
+const (
+ // FlattenAudience option controls whether the "aud" claim should be flattened
+ // to a single string upon the token being serialized to JSON.
+ //
+ // This is sometimes important when a JWT consumer does not understand that
+ // the "aud" claim can actually take the form of an array of strings.
+ // (We have been notified by users that AWS Cognito has manifested this behavior
+ // at some point)
+ //
+ // Unless the global option is set using `jwt.Settings()`, the default value is
+ // `disabled`, which means that "aud" claims are always rendered as a arrays of
+ // strings when serialized to JSON.
+ FlattenAudience TokenOption = 1 << iota
+
+ // MaxPerTokenOption is a marker to denote the last value that an option can take.
+ // This value has no meaning other than to be used as a marker.
+ MaxPerTokenOption
+)
+
+// Value returns the uint64 value of a single option
+func (o TokenOption) Value() uint64 {
+ return uint64(o)
+}
+
+// Value returns the uint64 bit flag value of an option set
+func (o TokenOptionSet) Value() uint64 {
+ return uint64(o)
+}
+
+// DefaultOptionSet creates a new TokenOptionSet using the default
+// option set. This may differ depending on if/when functions that
+// change the global state has been called, such as `jwt.Settings`
+func DefaultOptionSet() TokenOptionSet {
+ return TokenOptionSet(defaultOptions.Value())
+}
+
+// Clear sets all bits to zero, effectively disabling all options
+func (o *TokenOptionSet) Clear() {
+ *o = TokenOptionSet(uint64(0))
+}
+
+// Set sets the value of this option set, effectively *replacing*
+// the entire option set with the new value. This is NOT the same
+// as Enable/Disable.
+func (o *TokenOptionSet) Set(s TokenOptionSet) {
+ *o = s
+}
+
+// Enable sets the appropriate value to enable the option in the
+// option set
+func (o *TokenOptionSet) Enable(flag TokenOption) {
+ *o = TokenOptionSet(o.Value() | uint64(flag))
+}
+
+// Enable sets the appropriate value to disable the option in the
+// option set
+func (o *TokenOptionSet) Disable(flag TokenOption) {
+ *o = TokenOptionSet(o.Value() & ^uint64(flag))
+}
+
+// IsEnabled returns true if the given bit on the option set is enabled.
+func (o TokenOptionSet) IsEnabled(flag TokenOption) bool {
+ return (uint64(o)&uint64(flag) == uint64(flag))
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options_gen.go
new file mode 100644
index 0000000000..7e7cbf14aa
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options_gen.go
@@ -0,0 +1,25 @@
+// Code generated by "stringer -type=TokenOption -output=token_options_gen.go"; DO NOT EDIT.
+
+package jwt
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[FlattenAudience-1]
+ _ = x[MaxPerTokenOption-2]
+}
+
+const _TokenOption_name = "FlattenAudienceMaxPerTokenOption"
+
+var _TokenOption_index = [...]uint8{0, 15, 32}
+
+func (i TokenOption) String() string {
+ i -= 1
+ if i >= TokenOption(len(_TokenOption_index)-1) {
+ return "TokenOption(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _TokenOption_name[_TokenOption_index[i]:_TokenOption_index[i+1]]
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/validate.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/validate.go
new file mode 100644
index 0000000000..dbc43edbc2
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/validate.go
@@ -0,0 +1,418 @@
+package jwt
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "time"
+
+ jwterrs "github.com/lestrrat-go/jwx/v3/jwt/internal/errors"
+)
+
+type Clock interface {
+ Now() time.Time
+}
+type ClockFunc func() time.Time
+
+func (f ClockFunc) Now() time.Time {
+ return f()
+}
+
+func isSupportedTimeClaim(c string) error {
+ switch c {
+ case ExpirationKey, IssuedAtKey, NotBeforeKey:
+ return nil
+ }
+ return fmt.Errorf(`unsupported time claim %s`, strconv.Quote(c))
+}
+
+func timeClaim(t Token, clock Clock, c string) time.Time {
+ // We don't check if the claims already exist. It should have been done
+ // by piggybacking on `required` check.
+ switch c {
+ case ExpirationKey:
+ tv, _ := t.Expiration()
+ return tv
+ case IssuedAtKey:
+ tv, _ := t.IssuedAt()
+ return tv
+ case NotBeforeKey:
+ tv, _ := t.NotBefore()
+ return tv
+ case "":
+ return clock.Now()
+ }
+ return time.Time{} // should *NEVER* reach here, but...
+}
+
+// Validate makes sure that the essential claims stand.
+//
+// See the various `WithXXX` functions for optional parameters
+// that can control the behavior of this method.
+func Validate(t Token, options ...ValidateOption) error {
+ ctx := context.Background()
+ trunc := getDefaultTruncation()
+
+ var clock Clock = ClockFunc(time.Now)
+ var skew time.Duration
+ var baseValidators = []Validator{
+ IsIssuedAtValid(),
+ IsExpirationValid(),
+ IsNbfValid(),
+ }
+ var extraValidators []Validator
+ var resetValidators bool
+ for _, o := range options {
+ switch o.Ident() {
+ case identClock{}:
+ if err := o.Value(&clock); err != nil {
+ return fmt.Errorf(`jwt.Validate: value for WithClock() option must be jwt.Clock: %w`, err)
+ }
+ case identAcceptableSkew{}:
+ if err := o.Value(&skew); err != nil {
+ return fmt.Errorf(`jwt.Validate: value for WithAcceptableSkew() option must be time.Duration: %w`, err)
+ }
+ case identTruncation{}:
+ if err := o.Value(&trunc); err != nil {
+ return fmt.Errorf(`jwt.Validate: value for WithTruncation() option must be time.Duration: %w`, err)
+ }
+ case identContext{}:
+ if err := o.Value(&ctx); err != nil {
+ return fmt.Errorf(`jwt.Validate: value for WithContext() option must be context.Context: %w`, err)
+ }
+ case identResetValidators{}:
+ if err := o.Value(&resetValidators); err != nil {
+ return fmt.Errorf(`jwt.Validate: value for WithResetValidators() option must be bool: %w`, err)
+ }
+ case identValidator{}:
+ var v Validator
+ if err := o.Value(&v); err != nil {
+ return fmt.Errorf(`jwt.Validate: value for WithValidator() option must be jwt.Validator: %w`, err)
+ }
+ switch v := v.(type) {
+ case *isInTimeRange:
+ if v.c1 != "" {
+ if err := isSupportedTimeClaim(v.c1); err != nil {
+ return err
+ }
+ extraValidators = append(extraValidators, IsRequired(v.c1))
+ }
+ if v.c2 != "" {
+ if err := isSupportedTimeClaim(v.c2); err != nil {
+ return err
+ }
+ extraValidators = append(extraValidators, IsRequired(v.c2))
+ }
+ }
+ extraValidators = append(extraValidators, v)
+ }
+ }
+
+ ctx = SetValidationCtxSkew(ctx, skew)
+ ctx = SetValidationCtxClock(ctx, clock)
+ ctx = SetValidationCtxTruncation(ctx, trunc)
+
+ var validators []Validator
+ if !resetValidators {
+ validators = append(baseValidators, extraValidators...)
+ } else {
+ if len(extraValidators) == 0 {
+ return jwterrs.ValidateErrorf(`no validators specified: jwt.WithResetValidators(true) and no jwt.WithValidator() specified`)
+ }
+ validators = extraValidators
+ }
+
+ for _, v := range validators {
+ if err := v.Validate(ctx, t); err != nil {
+ return jwterrs.ValidateErrorf(`validation failed: %w`, err)
+ }
+ }
+
+ return nil
+}
+
+type isInTimeRange struct {
+ c1 string
+ c2 string
+ dur time.Duration
+ less bool // if true, d =< c1 - c2. otherwise d >= c1 - c2
+}
+
+// MaxDeltaIs implements the logic behind `WithMaxDelta()` option
+func MaxDeltaIs(c1, c2 string, dur time.Duration) Validator {
+ return &isInTimeRange{
+ c1: c1,
+ c2: c2,
+ dur: dur,
+ less: true,
+ }
+}
+
+// MinDeltaIs implements the logic behind `WithMinDelta()` option
+func MinDeltaIs(c1, c2 string, dur time.Duration) Validator {
+ return &isInTimeRange{
+ c1: c1,
+ c2: c2,
+ dur: dur,
+ less: false,
+ }
+}
+
+func (iitr *isInTimeRange) Validate(ctx context.Context, t Token) error {
+ clock := ValidationCtxClock(ctx) // MUST be populated
+ skew := ValidationCtxSkew(ctx) // MUST be populated
+ // We don't check if the claims already exist, because we already did that
+ // by piggybacking on `required` check.
+ t1 := timeClaim(t, clock, iitr.c1)
+ t2 := timeClaim(t, clock, iitr.c2)
+ if iitr.less { // t1 - t2 <= iitr.dur
+ // t1 - t2 < iitr.dur + skew
+ if t1.Sub(t2) > iitr.dur+skew {
+ return fmt.Errorf(`iitr between %s and %s exceeds %s (skew %s)`, iitr.c1, iitr.c2, iitr.dur, skew)
+ }
+ } else {
+ if t1.Sub(t2) < iitr.dur-skew {
+ return fmt.Errorf(`iitr between %s and %s is less than %s (skew %s)`, iitr.c1, iitr.c2, iitr.dur, skew)
+ }
+ }
+ return nil
+}
+
+// Validator describes interface to validate a Token.
+type Validator interface {
+ // Validate should return an error if a required conditions is not met.
+ Validate(context.Context, Token) error
+}
+
+// ValidatorFunc is a type of Validator that does not have any
+// state, that is implemented as a function
+type ValidatorFunc func(context.Context, Token) error
+
+func (vf ValidatorFunc) Validate(ctx context.Context, tok Token) error {
+ return vf(ctx, tok)
+}
+
+type identValidationCtxClock struct{}
+type identValidationCtxSkew struct{}
+type identValidationCtxTruncation struct{}
+
+func SetValidationCtxClock(ctx context.Context, cl Clock) context.Context {
+ return context.WithValue(ctx, identValidationCtxClock{}, cl)
+}
+
+func SetValidationCtxTruncation(ctx context.Context, dur time.Duration) context.Context {
+ return context.WithValue(ctx, identValidationCtxTruncation{}, dur)
+}
+
+func SetValidationCtxSkew(ctx context.Context, dur time.Duration) context.Context {
+ return context.WithValue(ctx, identValidationCtxSkew{}, dur)
+}
+
+// ValidationCtxClock returns the Clock object associated with
+// the current validation context. This value will always be available
+// during validation of tokens.
+func ValidationCtxClock(ctx context.Context) Clock {
+ //nolint:forcetypeassert
+ return ctx.Value(identValidationCtxClock{}).(Clock)
+}
+
+func ValidationCtxSkew(ctx context.Context) time.Duration {
+ //nolint:forcetypeassert
+ return ctx.Value(identValidationCtxSkew{}).(time.Duration)
+}
+
+func ValidationCtxTruncation(ctx context.Context) time.Duration {
+ //nolint:forcetypeassert
+ return ctx.Value(identValidationCtxTruncation{}).(time.Duration)
+}
+
+// IsExpirationValid is one of the default validators that will be executed.
+// It does not need to be specified by users, but it exists as an
+// exported field so that you can check what it does.
+//
+// The supplied context.Context object must have the "clock" and "skew"
+// populated with appropriate values using SetValidationCtxClock() and
+// SetValidationCtxSkew()
+func IsExpirationValid() Validator {
+ return ValidatorFunc(isExpirationValid)
+}
+
+func isExpirationValid(ctx context.Context, t Token) error {
+ tv, ok := t.Expiration()
+ if !ok {
+ return nil
+ }
+
+ clock := ValidationCtxClock(ctx) // MUST be populated
+ skew := ValidationCtxSkew(ctx) // MUST be populated
+ trunc := ValidationCtxTruncation(ctx) // MUST be populated
+
+ now := clock.Now().Truncate(trunc)
+ ttv := tv.Truncate(trunc)
+
+ // expiration date must be after NOW
+ if !now.Before(ttv.Add(skew)) {
+ return TokenExpiredError()
+ }
+ return nil
+}
+
+// IsIssuedAtValid is one of the default validators that will be executed.
+// It does not need to be specified by users, but it exists as an
+// exported field so that you can check what it does.
+//
+// The supplied context.Context object must have the "clock" and "skew"
+// populated with appropriate values using SetValidationCtxClock() and
+// SetValidationCtxSkew()
+func IsIssuedAtValid() Validator {
+ return ValidatorFunc(isIssuedAtValid)
+}
+
+func isIssuedAtValid(ctx context.Context, t Token) error {
+ tv, ok := t.IssuedAt()
+ if !ok {
+ return nil
+ }
+
+ clock := ValidationCtxClock(ctx) // MUST be populated
+ skew := ValidationCtxSkew(ctx) // MUST be populated
+ trunc := ValidationCtxTruncation(ctx) // MUST be populated
+
+ now := clock.Now().Truncate(trunc)
+ ttv := tv.Truncate(trunc)
+
+ if now.Before(ttv.Add(-1 * skew)) {
+ return InvalidIssuedAtError()
+ }
+ return nil
+}
+
+// IsNbfValid is one of the default validators that will be executed.
+// It does not need to be specified by users, but it exists as an
+// exported field so that you can check what it does.
+//
+// The supplied context.Context object must have the "clock" and "skew"
+// populated with appropriate values using SetValidationCtxClock() and
+// SetValidationCtxSkew()
+func IsNbfValid() Validator {
+ return ValidatorFunc(isNbfValid)
+}
+
+func isNbfValid(ctx context.Context, t Token) error {
+ tv, ok := t.NotBefore()
+ if !ok {
+ return nil
+ }
+
+ clock := ValidationCtxClock(ctx) // MUST be populated
+ skew := ValidationCtxSkew(ctx) // MUST be populated
+ trunc := ValidationCtxTruncation(ctx) // MUST be populated
+
+ // Truncation always happens even for trunc = 0 because
+ // we also use this to strip monotonic clocks
+ now := clock.Now().Truncate(trunc)
+ ttv := tv.Truncate(trunc)
+
+ // "now" cannot be before t - skew, so we check for now > t - skew
+ ttv = ttv.Add(-1 * skew)
+ if now.Before(ttv) {
+ return TokenNotYetValidError()
+ }
+ return nil
+}
+
+type claimContainsString struct {
+ name string
+ value string
+ makeErr func(string, ...any) error
+}
+
+// ClaimContainsString can be used to check if the claim called `name`, which is
+// expected to be a list of strings, contains `value`. Currently, because of the
+// implementation, this will probably only work for `aud` fields.
+func ClaimContainsString(name, value string) Validator {
+ return claimContainsString{
+ name: name,
+ value: value,
+ makeErr: fmt.Errorf,
+ }
+}
+
+func (ccs claimContainsString) Validate(_ context.Context, t Token) error {
+ var list []string
+ if err := t.Get(ccs.name, &list); err != nil {
+ return ccs.makeErr(`claim %q does not exist or is not a []string: %w`, ccs.name, err)
+ }
+
+ for _, v := range list {
+ if v == ccs.value {
+ return nil
+ }
+ }
+ return ccs.makeErr(`%q not satisfied`, ccs.name)
+}
+
+// audienceClaimContainsString can be used to check if the audience claim, which is
+// expected to be a list of strings, contains `value`.
+func audienceClaimContainsString(value string) Validator {
+ return claimContainsString{
+ name: AudienceKey,
+ value: value,
+ makeErr: jwterrs.AudienceErrorf,
+ }
+}
+
+type claimValueIs struct {
+ name string
+ value any
+ makeErr func(string, ...any) error
+}
+
+// ClaimValueIs creates a Validator that checks if the value of claim `name`
+// matches `value`. The comparison is done using a simple `==` comparison,
+// and therefore complex comparisons may fail using this code. If you
+// need to do more, use a custom Validator.
+func ClaimValueIs(name string, value any) Validator {
+ return &claimValueIs{
+ name: name,
+ value: value,
+ makeErr: fmt.Errorf,
+ }
+}
+
+func (cv *claimValueIs) Validate(_ context.Context, t Token) error {
+ var v any
+ if err := t.Get(cv.name, &v); err != nil {
+ return cv.makeErr(`claim %[1]q does not exist or is not a []string: %[2]w`, cv.name, err)
+ }
+ if v != cv.value {
+ return cv.makeErr(`claim %[1]q does not have the expected value`, cv.name)
+ }
+ return nil
+}
+
+// issuerClaimValueIs creates a Validator that checks if the issuer claim
+// matches `value`.
+func issuerClaimValueIs(value string) Validator {
+ return &claimValueIs{
+ name: IssuerKey,
+ value: value,
+ makeErr: jwterrs.IssuerErrorf,
+ }
+}
+
+// IsRequired creates a Validator that checks if the required claim `name`
+// exists in the token
+func IsRequired(name string) Validator {
+ return isRequired(name)
+}
+
+type isRequired string
+
+func (ir isRequired) Validate(_ context.Context, t Token) error {
+ name := string(ir)
+ if !t.Has(name) {
+ return jwterrs.MissingRequiredClaimErrorf(name)
+ }
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwx.go b/vendor/github.com/lestrrat-go/jwx/v3/jwx.go
new file mode 100644
index 0000000000..fc394e5137
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/jwx.go
@@ -0,0 +1,45 @@
+//go:generate ./tools/cmd/genreadfile.sh
+//go:generate ./tools/cmd/genoptions.sh
+//go:generate stringer -type=FormatKind
+//go:generate mv formatkind_string.go formatkind_string_gen.go
+
+// Package jwx contains tools that deal with the various JWx (JOSE)
+// technologies such as JWT, JWS, JWE, etc in Go.
+//
+// JWS (https://tools.ietf.org/html/rfc7515)
+// JWE (https://tools.ietf.org/html/rfc7516)
+// JWK (https://tools.ietf.org/html/rfc7517)
+// JWA (https://tools.ietf.org/html/rfc7518)
+// JWT (https://tools.ietf.org/html/rfc7519)
+//
+// Examples are stored in a separate Go module (to avoid adding
+// dependencies to this module), and thus does not appear in the
+// online documentation for this module.
+// You can find the examples in Github at https://github.com/lestrrat-go/jwx/tree/v3/examples
+//
+// You can find more high level documentation at Github (https://github.com/lestrrat-go/jwx/tree/v2)
+//
+// FAQ style documentation can be found in the repository (https://github.com/lestrrat-go/jwx/tree/develop/v3/docs)
+package jwx
+
+import (
+ "github.com/lestrrat-go/jwx/v3/internal/json"
+)
+
+// DecoderSettings gives you a access to configure the "encoding/json".Decoder
+// used to decode JSON objects within the jwx framework.
+func DecoderSettings(options ...JSONOption) {
+ // XXX We're using this format instead of just passing a single boolean
+ // in case a new option is to be added some time later
+ var useNumber bool
+ for _, option := range options {
+ switch option.Ident() {
+ case identUseNumber{}:
+ if err := option.Value(&useNumber); err != nil {
+ panic("jwx.DecoderSettings: useNumber option must be a boolean")
+ }
+ }
+ }
+
+ json.DecoderSettings(useNumber)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/options.go b/vendor/github.com/lestrrat-go/jwx/v3/options.go
new file mode 100644
index 0000000000..b642a199d8
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/options.go
@@ -0,0 +1,30 @@
+package jwx
+
+import "github.com/lestrrat-go/option/v2"
+
+type identUseNumber struct{}
+
+type Option = option.Interface
+
+type JSONOption interface {
+ Option
+ isJSONOption()
+}
+
+type jsonOption struct {
+ Option
+}
+
+func (o *jsonOption) isJSONOption() {}
+
+func newJSONOption(n any, v any) JSONOption {
+ return &jsonOption{option.New(n, v)}
+}
+
+// WithUseNumber controls whether the jwx package should unmarshal
+// JSON objects with the "encoding/json".Decoder.UseNumber feature on.
+//
+// Default is false.
+func WithUseNumber(b bool) JSONOption {
+ return newJSONOption(identUseNumber{}, b)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/transform/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/transform/BUILD.bazel
new file mode 100644
index 0000000000..3333c6607c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/transform/BUILD.bazel
@@ -0,0 +1,32 @@
+load("@rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "transform",
+ srcs = [
+ "filter.go",
+ "map.go",
+ ],
+ importpath = "github.com/lestrrat-go/jwx/v3/transform",
+ visibility = ["//visibility:public"],
+ deps = [
+ "@com_github_lestrrat_go_blackmagic//:blackmagic",
+ ],
+)
+
+go_test(
+ name = "transform_test",
+ srcs = [
+ "map_test.go",
+ ],
+ deps = [
+ ":transform",
+ "//jwt",
+ "@com_github_stretchr_testify//require",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":transform",
+ visibility = ["//visibility:public"],
+)
\ No newline at end of file
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/transform/filter.go b/vendor/github.com/lestrrat-go/jwx/v3/transform/filter.go
new file mode 100644
index 0000000000..da2972db6a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/transform/filter.go
@@ -0,0 +1,115 @@
+package transform
+
+import "sync"
+
+// FilterLogic is an interface that defines the logic for filtering objects.
+type FilterLogic interface {
+ Apply(key string, object any) bool
+}
+
+// FilterLogicFunc is a function type that implements the FilterLogic interface.
+type FilterLogicFunc func(key string, object any) bool
+
+func (f FilterLogicFunc) Apply(key string, object any) bool {
+ return f(key, object)
+}
+
+// Filterable is an interface that must be implemented by objects that can be filtered.
+type Filterable[T any] interface {
+ // Keys returns the names of all fields in the object.
+ Keys() []string
+
+ // Clone returns a deep copy of the object.
+ Clone() (T, error)
+
+ // Remove removes a field from the object.
+ Remove(string) error
+}
+
+// Apply is a standalone function that provides type-safe filtering based on
+// specified filter logic.
+//
+// It returns a new object with only the fields that match the result of `logic.Apply`.
+func Apply[T Filterable[T]](object T, logic FilterLogic) (T, error) {
+ return filterWith(object, logic, true)
+}
+
+// Reject is a standalone function that provides type-safe filtering based on
+// specified filter logic.
+//
+// It returns a new object with only the fields that DO NOT match the result
+// of `logic.Apply`.
+func Reject[T Filterable[T]](object T, logic FilterLogic) (T, error) {
+ return filterWith(object, logic, false)
+}
+
+// filterWith is an internal function used by both Apply and Reject functions
+// to apply the filtering logic to an object. If include is true, only fields
+// matching the logic are included. If include is false, fields matching
+// the logic are excluded.
+func filterWith[T Filterable[T]](object T, logic FilterLogic, include bool) (T, error) {
+ var zero T
+
+ result, err := object.Clone()
+ if err != nil {
+ return zero, err
+ }
+
+ for _, k := range result.Keys() {
+ if ok := logic.Apply(k, object); (include && ok) || (!include && !ok) {
+ continue
+ }
+
+ if err := result.Remove(k); err != nil {
+ return zero, err
+ }
+ }
+
+ return result, nil
+}
+
+// NameBasedFilter is a filter that filters fields based on their field names.
+type NameBasedFilter[T Filterable[T]] struct {
+ names map[string]struct{}
+ mu sync.RWMutex
+ logic FilterLogic
+}
+
+// NewNameBasedFilter creates a new NameBasedFilter with the specified field names.
+//
+// NameBasedFilter is the underlying implementation of the
+// various filters in jwe, jwk, jws, and jwt packages. You normally do not
+// need to use this directly.
+func NewNameBasedFilter[T Filterable[T]](names ...string) *NameBasedFilter[T] {
+ nameMap := make(map[string]struct{}, len(names))
+ for _, name := range names {
+ nameMap[name] = struct{}{}
+ }
+
+ nf := &NameBasedFilter[T]{
+ names: nameMap,
+ }
+
+ nf.logic = FilterLogicFunc(nf.filter)
+ return nf
+}
+
+func (nf *NameBasedFilter[T]) filter(k string, _ any) bool {
+ _, ok := nf.names[k]
+ return ok
+}
+
+// Filter returns a new object with only the fields that match the specified names.
+func (nf *NameBasedFilter[T]) Filter(object T) (T, error) {
+ nf.mu.RLock()
+ defer nf.mu.RUnlock()
+
+ return Apply(object, nf.logic)
+}
+
+// Reject returns a new object with only the fields that DO NOT match the specified names.
+func (nf *NameBasedFilter[T]) Reject(object T) (T, error) {
+ nf.mu.RLock()
+ defer nf.mu.RUnlock()
+ return Reject(object, nf.logic)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v3/transform/map.go b/vendor/github.com/lestrrat-go/jwx/v3/transform/map.go
new file mode 100644
index 0000000000..4eb80cb99f
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v3/transform/map.go
@@ -0,0 +1,46 @@
+package transform
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/lestrrat-go/blackmagic"
+)
+
+// Mappable is an interface that defines methods required when converting
+// a jwx structure into a map[string]any.
+//
+// EXPERIMENTAL: This API is experimental and its interface and behavior is
+// subject to change in future releases. This API is not subject to semver
+// compatibility guarantees.
+type Mappable interface {
+ Get(key string, dst any) error
+ Keys() []string
+}
+
+// AsMap takes the specified Mappable object and populates the map
+// `dst` with the key-value pairs from the Mappable object.
+// Many objects in jwe, jwk, jws, and jwt packages including
+// `jwt.Token`, `jwk.Key`, `jws.Header`, etc.
+//
+// EXPERIMENTAL: This API is experimental and its interface and behavior is
+// subject to change in future releases. This API is not subject to semver
+// compatibility guarantees.
+func AsMap(m Mappable, dst map[string]any) error {
+ if dst == nil {
+ return fmt.Errorf("transform.AsMap: destination map cannot be nil")
+ }
+
+ for _, k := range m.Keys() {
+ var val any
+ if err := m.Get(k, &val); err != nil {
+ // Allow invalid value errors. Assume they are just nil values.
+ if !errors.Is(err, blackmagic.InvalidValueError()) {
+ return fmt.Errorf(`transform.AsMap: failed to get key %q: %w`, k, err)
+ }
+ }
+ dst[k] = val
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/lestrrat-go/option/v2/.gitignore b/vendor/github.com/lestrrat-go/option/v2/.gitignore
new file mode 100644
index 0000000000..66fd13c903
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/option/v2/.gitignore
@@ -0,0 +1,15 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
diff --git a/vendor/github.com/lestrrat-go/option/v2/LICENSE b/vendor/github.com/lestrrat-go/option/v2/LICENSE
new file mode 100644
index 0000000000..188ea7685c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/option/v2/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 lestrrat-go
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/lestrrat-go/option/v2/README.md b/vendor/github.com/lestrrat-go/option/v2/README.md
new file mode 100644
index 0000000000..cab0044ed3
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/option/v2/README.md
@@ -0,0 +1,245 @@
+# option
+
+Base object for the "Optional Parameters Pattern".
+
+# DESCRIPTION
+
+The beauty of this pattern is that you can achieve a method that can
+take the following simple calling style
+
+```go
+obj.Method(mandatory1, mandatory2)
+```
+
+or the following, if you want to modify its behavior with optional parameters
+
+```go
+obj.Method(mandatory1, mandatory2, optional1, optional2, optional3)
+```
+
+Instead of the more clunky zero value for optionals style
+
+```go
+obj.Method(mandatory1, mandatory2, nil, "", 0)
+```
+
+or the equally clunky config object style, which requires you to create a
+struct with `NamesThatLookReallyLongBecauseItNeedsToIncludeMethodNamesConfig
+
+```go
+cfg := &ConfigForMethod{
+ Optional1: ...,
+ Optional2: ...,
+ Optional3: ...,
+}
+obj.Method(mandatory1, mandatory2, &cfg)
+```
+
+# SYNOPSIS
+
+Create an "identifier" for the option. We recommend using an unexported empty struct,
+because
+
+1. It is uniquely identifiable globally
+1. Takes minimal space
+1. Since it's unexported, you do not have to worry about it leaking elsewhere or having it changed by consumers
+
+```go
+// an unexported empty struct
+type identFeatureX struct{}
+```
+
+Then define a method to create an option using this identifier. Here we assume
+that the option will be a boolean option.
+
+```go
+// this is optional, but for readability we usually use a wrapper
+// around option.Interface, or a type alias.
+type Option
+func WithFeatureX(v bool) Option {
+ // use the constructor to create a new option
+ return option.New(identFeatureX{}, v)
+}
+```
+
+Now you can create an option, which essentially a two element tuple consisting
+of an identifier and its associated value.
+
+To consume this, you will need to create a function with variadic parameters,
+and iterate over the list looking for a particular identifier:
+
+```go
+func MyAwesomeFunc( /* mandatory parameters omitted */, options ...[]Option) {
+ var enableFeatureX bool
+ // The nolint directive is recommended if you are using linters such
+ // as golangci-lint
+ //nolint:forcetypeassert
+ for _, option := range options {
+ switch option.Ident() {
+ case identFeatureX{}:
+ enableFeatureX = option.Value().(bool)
+ // other cases omitted
+ }
+ }
+ if enableFeatureX {
+ ....
+ }
+}
+```
+
+# Option objects
+
+Option objects take two arguments, its identifier and the value it contains.
+
+The identifier can be anything, but it's usually better to use a an unexported
+empty struct so that only you have the ability to generate said option:
+
+```go
+type identOptionalParamOne struct{}
+type identOptionalParamTwo struct{}
+type identOptionalParamThree struct{}
+
+func WithOptionOne(v ...) Option {
+ return option.New(identOptionalParamOne{}, v)
+}
+```
+
+Then you can call the method we described above as
+
+```go
+obj.Method(m1, m2, WithOptionOne(...), WithOptionTwo(...), WithOptionThree(...))
+```
+
+Options should be parsed in a code that looks somewhat like this
+
+```go
+func (obj *Object) Method(m1 Type1, m2 Type2, options ...Option) {
+ paramOne := defaultValueParamOne
+ for _, option := range options {
+ switch option.Ident() {
+ case identOptionalParamOne{}:
+ paramOne = option.Value().(...)
+ }
+ }
+ ...
+}
+```
+
+The loop requires a bit of boilerplate, and admittedly, this is the main downside
+of this module. However, if you think you want use the Option as a Function pattern,
+please check the FAQ below for rationale.
+
+# Simple usage
+
+Most of the times all you need to do is to declare the Option type as an alias
+in your code:
+
+```go
+package myawesomepkg
+
+import "github.com/lestrrat-go/option"
+
+type Option = option.Interface
+```
+
+Then you can start defining options like they are described in the SYNOPSIS section.
+
+# Differentiating Options
+
+When you have multiple methods and options, and those options can only be passed to
+each one the methods, it's hard to see which options should be passed to which method.
+
+```go
+func WithX() Option { ... }
+func WithY() Option { ... }
+
+// Now, which of WithX/WithY go to which method?
+func (*Obj) Method1(options ...Option) {}
+func (*Obj) Method2(options ...Option) {}
+```
+
+In this case the easiest way to make it obvious is to put an extra layer around
+the options so that they have different types
+
+```go
+type Method1Option interface {
+ Option
+ method1Option()
+}
+
+type method1Option struct { Option }
+func (*method1Option) method1Option() {}
+
+func WithX() Method1Option {
+ return &methodOption{option.New(...)}
+}
+
+func (*Obj) Method1(options ...Method1Option) {}
+```
+
+This way the compiler knows if an option can be passed to a given method.
+
+# FAQ
+
+## Why aren't these function-based?
+
+Using a base option type like `type Option func(ctx interface{})` is certainly one way to achieve the same goal. In this case, you are giving the option itself the ability to "configure" the main object. For example:
+
+```go
+type Foo struct {
+ optionaValue bool
+}
+
+type Option func(*Foo) error
+
+func WithOptionalValue(v bool) Option {
+ return Option(func(f *Foo) error {
+ f.optionalValue = v
+ return nil
+ })
+}
+
+func NewFoo(options ...Option) (*Foo, error) {
+ var f Foo
+ for _, o := range options {
+ if err := o(&f); err != nil {
+ return nil, err
+ }
+ }
+ return &f
+}
+```
+
+This in itself is fine, but we think there are a few problems:
+
+### 1. It's hard to create a reusable "Option" type
+
+We create many libraries using this optional pattern. We would like to provide a default base object. However, this function based approach is not reusuable because each "Option" type requires that it has a context-specific input type. For example, if the "Option" type in the previous example was `func(interface{}) error`, then its usability will significantly decrease because of the type conversion.
+
+This is not to say that this library's approach is better as it also requires type conversion to convert the _value_ of the option. However, part of the beauty of the original function based approach was the ease of its use, and we claim that this significantly decreases the merits of the function based approach.
+
+### 2. The receiver requires exported fields
+
+Part of the appeal for a function-based option pattern is by giving the option itself the ability to do what it wants, you open up the possibility of allowing third-parties to create options that do things that the library authors did not think about.
+
+```go
+package thirdparty
+, but when I read drum sheet music, I kind of get thrown off b/c many times it says to hit the bass drum where I feel like it's a snare hit.
+func WithMyAwesomeOption( ... ) mypkg.Option {
+ return mypkg.Option(func(f *mypkg) error {
+ f.X = ...
+ f.Y = ...
+ f.Z = ...
+ return nil
+ })
+}
+```
+
+However, for any third party code to access and set field values, these fields (`X`, `Y`, `Z`) must be exported. Basically you will need an "open" struct.
+
+Exported fields are absolutely no problem when you have a struct that represents data alone (i.e., API calls that refer or change state information) happen, but we think that casually expose fields for a library struct is a sure way to maintenance hell in the future. What happens when you want to change the API? What happens when you realize that you want to use the field as state (i.e. use it for more than configuration)? What if they kept referring to that field, and then you have concurrent code accessing it?
+
+Giving third parties complete access to exported fields is like handing out a loaded weapon to the users, and you are at their mercy.
+
+Of course, providing public APIs for everything so you can validate and control concurrency is an option, but then ... it's a lot of work, and you may have to provide APIs _only_ so that users can refer it in the option-configuration phase. That sounds like a lot of extra work.
+
diff --git a/vendor/github.com/lestrrat-go/option/v2/option.go b/vendor/github.com/lestrrat-go/option/v2/option.go
new file mode 100644
index 0000000000..f4fcca3b58
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/option/v2/option.go
@@ -0,0 +1,47 @@
+package option
+
+import (
+ "fmt"
+
+ "github.com/lestrrat-go/blackmagic"
+)
+
+// Interface defines the minimum interface that an option must fulfill
+type Interface interface {
+ // Ident returns the "identity" of this option, a unique identifier that
+ // can be used to differentiate between options
+ Ident() any
+
+ // Value assigns the stored value into the dst argument, which must be
+ // a pointer to a variable that can store the value. If the assignment
+ // is successful, it return nil, otherwise it returns an error.
+ Value(dst any) error
+}
+
+type pair[T any] struct {
+ ident any
+ value T
+}
+
+// New creates a new Option
+func New[T any](ident any, value T) Interface {
+ return &pair[T]{
+ ident: ident,
+ value: value,
+ }
+}
+
+func (p *pair[T]) Ident() any {
+ return p.ident
+}
+
+func (p *pair[T]) Value(dst any) error {
+ if err := blackmagic.AssignIfCompatible(dst, p.value); err != nil {
+ return fmt.Errorf("failed to assign value %T to %T: %s", p.value, dst, err)
+ }
+ return nil
+}
+
+func (p *pair[T]) String() string {
+ return fmt.Sprintf(`%v(%v)`, p.ident, p.value)
+}
diff --git a/vendor/github.com/lestrrat-go/option/v2/set.go b/vendor/github.com/lestrrat-go/option/v2/set.go
new file mode 100644
index 0000000000..def943407a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/option/v2/set.go
@@ -0,0 +1,92 @@
+package option
+
+import (
+ "sync"
+)
+
+// Set is a container to store multiple options. Because options are
+// usually used all over the place to configure various aspects of
+// a system, it is often useful to be able to collect multiple options
+// together and pass them around as a single entity.
+//
+// Note that Set is meant to be add-only; You usually do not remove
+// options from a Set.
+//
+// The intention is to create a set using a sync.Pool; we would like
+// to provide a centralized pool of Sets so that you don't need to
+// instantiate a new pool for every type of option you want to
+// store, but that is not quite possible because of the limitations
+// of parameterized types in Go. Instead create a `*option.SetPool`
+// with an appropriate type parameter and allocator.
+type Set[T Interface] struct {
+ mu sync.RWMutex
+ options []T
+}
+
+func NewSet[T Interface]() *Set[T] {
+ return &Set[T]{
+ options: make([]T, 0, 1),
+ }
+}
+
+func (s *Set[T]) Add(opt T) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.options = append(s.options, opt)
+}
+
+func (s *Set[T]) Reset() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.options = s.options[:0] // Reset the options slice to avoid memory leaks
+}
+
+func (s *Set[T]) Len() int {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return len(s.options)
+}
+
+func (s *Set[T]) Option(i int) T {
+ var zero T
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ if i < 0 || i >= len(s.options) {
+ return zero
+ }
+ return s.options[i]
+}
+
+// List returns a slice of all options stored in the Set.
+// Note that the slice is the same slice that is used internally, so
+// you should not modify the contents of the slice directly.
+// This to avoid unnecessary allocations and copying of the slice for
+// performance reasons.
+func (s *Set[T]) List() []T {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return s.options
+}
+
+// SetPool is a pool of Sets that can be used to efficiently manage
+// the lifecycle of Sets. It uses a sync.Pool to store and retrieve
+// Sets, allowing for efficient reuse of memory and reducing the
+// number of allocations required when creating new Sets.
+type SetPool[T Interface] struct {
+ pool *sync.Pool // sync.Pool that contains *Set[T]
+}
+
+func NewSetPool[T Interface](pool *sync.Pool) *SetPool[T] {
+ return &SetPool[T]{
+ pool: pool,
+ }
+}
+
+func (p *SetPool[T]) Get() *Set[T] {
+ return p.pool.Get().(*Set[T])
+}
+
+func (p *SetPool[T]) Put(s *Set[T]) {
+ s.Reset()
+ p.pool.Put(s)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/annotations.go b/vendor/github.com/open-policy-agent/opa/ast/annotations.go
index d6267a0e64..3bc5fb36a5 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/annotations.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/annotations.go
@@ -5,973 +5,33 @@
package ast
import (
- "encoding/json"
- "fmt"
- "net/url"
- "sort"
- "strings"
-
- astJSON "github.com/open-policy-agent/opa/ast/json"
- "github.com/open-policy-agent/opa/internal/deepcopy"
- "github.com/open-policy-agent/opa/util"
-)
-
-const (
- annotationScopePackage = "package"
- annotationScopeImport = "import"
- annotationScopeRule = "rule"
- annotationScopeDocument = "document"
- annotationScopeSubpackages = "subpackages"
+ v1 "github.com/open-policy-agent/opa/v1/ast"
)
type (
// Annotations represents metadata attached to other AST nodes such as rules.
- Annotations struct {
- Scope string `json:"scope"`
- Title string `json:"title,omitempty"`
- Entrypoint bool `json:"entrypoint,omitempty"`
- Description string `json:"description,omitempty"`
- Organizations []string `json:"organizations,omitempty"`
- RelatedResources []*RelatedResourceAnnotation `json:"related_resources,omitempty"`
- Authors []*AuthorAnnotation `json:"authors,omitempty"`
- Schemas []*SchemaAnnotation `json:"schemas,omitempty"`
- Custom map[string]interface{} `json:"custom,omitempty"`
- Location *Location `json:"location,omitempty"`
-
- comments []*Comment
- node Node
- jsonOptions astJSON.Options
- }
+ Annotations = v1.Annotations
// SchemaAnnotation contains a schema declaration for the document identified by the path.
- SchemaAnnotation struct {
- Path Ref `json:"path"`
- Schema Ref `json:"schema,omitempty"`
- Definition *interface{} `json:"definition,omitempty"`
- }
-
- AuthorAnnotation struct {
- Name string `json:"name"`
- Email string `json:"email,omitempty"`
- }
-
- RelatedResourceAnnotation struct {
- Ref url.URL `json:"ref"`
- Description string `json:"description,omitempty"`
- }
-
- AnnotationSet struct {
- byRule map[*Rule][]*Annotations
- byPackage map[int]*Annotations
- byPath *annotationTreeNode
- modules []*Module // Modules this set was constructed from
- }
+ SchemaAnnotation = v1.SchemaAnnotation
- annotationTreeNode struct {
- Value *Annotations
- Children map[Value]*annotationTreeNode // we assume key elements are hashable (vars and strings only!)
- }
+ AuthorAnnotation = v1.AuthorAnnotation
- AnnotationsRef struct {
- Path Ref `json:"path"` // The path of the node the annotations are applied to
- Annotations *Annotations `json:"annotations,omitempty"`
- Location *Location `json:"location,omitempty"` // The location of the node the annotations are applied to
+ RelatedResourceAnnotation = v1.RelatedResourceAnnotation
- jsonOptions astJSON.Options
+ AnnotationSet = v1.AnnotationSet
- node Node // The node the annotations are applied to
- }
+ AnnotationsRef = v1.AnnotationsRef
- AnnotationsRefSet []*AnnotationsRef
+ AnnotationsRefSet = v1.AnnotationsRefSet
- FlatAnnotationsRefSet AnnotationsRefSet
+ FlatAnnotationsRefSet = v1.FlatAnnotationsRefSet
)
-func (a *Annotations) String() string {
- bs, _ := a.MarshalJSON()
- return string(bs)
-}
-
-// Loc returns the location of this annotation.
-func (a *Annotations) Loc() *Location {
- return a.Location
-}
-
-// SetLoc updates the location of this annotation.
-func (a *Annotations) SetLoc(l *Location) {
- a.Location = l
-}
-
-// EndLoc returns the location of this annotation's last comment line.
-func (a *Annotations) EndLoc() *Location {
- count := len(a.comments)
- if count == 0 {
- return a.Location
- }
- return a.comments[count-1].Location
-}
-
-// Compare returns an integer indicating if a is less than, equal to, or greater
-// than other.
-func (a *Annotations) Compare(other *Annotations) int {
-
- if a == nil && other == nil {
- return 0
- }
-
- if a == nil {
- return -1
- }
-
- if other == nil {
- return 1
- }
-
- if cmp := scopeCompare(a.Scope, other.Scope); cmp != 0 {
- return cmp
- }
-
- if cmp := strings.Compare(a.Title, other.Title); cmp != 0 {
- return cmp
- }
-
- if cmp := strings.Compare(a.Description, other.Description); cmp != 0 {
- return cmp
- }
-
- if cmp := compareStringLists(a.Organizations, other.Organizations); cmp != 0 {
- return cmp
- }
-
- if cmp := compareRelatedResources(a.RelatedResources, other.RelatedResources); cmp != 0 {
- return cmp
- }
-
- if cmp := compareAuthors(a.Authors, other.Authors); cmp != 0 {
- return cmp
- }
-
- if cmp := compareSchemas(a.Schemas, other.Schemas); cmp != 0 {
- return cmp
- }
-
- if a.Entrypoint != other.Entrypoint {
- if a.Entrypoint {
- return 1
- }
- return -1
- }
-
- if cmp := util.Compare(a.Custom, other.Custom); cmp != 0 {
- return cmp
- }
-
- return 0
-}
-
-// GetTargetPath returns the path of the node these Annotations are applied to (the target)
-func (a *Annotations) GetTargetPath() Ref {
- switch n := a.node.(type) {
- case *Package:
- return n.Path
- case *Rule:
- return n.Ref().GroundPrefix()
- default:
- return nil
- }
-}
-
-func (a *Annotations) setJSONOptions(opts astJSON.Options) {
- a.jsonOptions = opts
- if a.Location != nil {
- a.Location.JSONOptions = opts
- }
-}
-
-func (a *Annotations) MarshalJSON() ([]byte, error) {
- if a == nil {
- return []byte(`{"scope":""}`), nil
- }
-
- data := map[string]interface{}{
- "scope": a.Scope,
- }
-
- if a.Title != "" {
- data["title"] = a.Title
- }
-
- if a.Description != "" {
- data["description"] = a.Description
- }
-
- if a.Entrypoint {
- data["entrypoint"] = a.Entrypoint
- }
-
- if len(a.Organizations) > 0 {
- data["organizations"] = a.Organizations
- }
-
- if len(a.RelatedResources) > 0 {
- data["related_resources"] = a.RelatedResources
- }
-
- if len(a.Authors) > 0 {
- data["authors"] = a.Authors
- }
-
- if len(a.Schemas) > 0 {
- data["schemas"] = a.Schemas
- }
-
- if len(a.Custom) > 0 {
- data["custom"] = a.Custom
- }
-
- if a.jsonOptions.MarshalOptions.IncludeLocation.Annotations {
- if a.Location != nil {
- data["location"] = a.Location
- }
- }
-
- return json.Marshal(data)
-}
-
func NewAnnotationsRef(a *Annotations) *AnnotationsRef {
- var loc *Location
- if a.node != nil {
- loc = a.node.Loc()
- }
-
- return &AnnotationsRef{
- Location: loc,
- Path: a.GetTargetPath(),
- Annotations: a,
- node: a.node,
- jsonOptions: a.jsonOptions,
- }
-}
-
-func (ar *AnnotationsRef) GetPackage() *Package {
- switch n := ar.node.(type) {
- case *Package:
- return n
- case *Rule:
- return n.Module.Package
- default:
- return nil
- }
-}
-
-func (ar *AnnotationsRef) GetRule() *Rule {
- switch n := ar.node.(type) {
- case *Rule:
- return n
- default:
- return nil
- }
-}
-
-func (ar *AnnotationsRef) MarshalJSON() ([]byte, error) {
- data := map[string]interface{}{
- "path": ar.Path,
- }
-
- if ar.Annotations != nil {
- data["annotations"] = ar.Annotations
- }
-
- if ar.jsonOptions.MarshalOptions.IncludeLocation.AnnotationsRef {
- if ar.Location != nil {
- data["location"] = ar.Location
- }
- }
-
- return json.Marshal(data)
-}
-
-func scopeCompare(s1, s2 string) int {
-
- o1 := scopeOrder(s1)
- o2 := scopeOrder(s2)
-
- if o2 < o1 {
- return 1
- } else if o2 > o1 {
- return -1
- }
-
- if s1 < s2 {
- return -1
- } else if s2 < s1 {
- return 1
- }
-
- return 0
-}
-
-func scopeOrder(s string) int {
- switch s {
- case annotationScopeRule:
- return 1
- }
- return 0
-}
-
-func compareAuthors(a, b []*AuthorAnnotation) int {
- if len(a) > len(b) {
- return 1
- } else if len(a) < len(b) {
- return -1
- }
-
- for i := 0; i < len(a); i++ {
- if cmp := a[i].Compare(b[i]); cmp != 0 {
- return cmp
- }
- }
-
- return 0
-}
-
-func compareRelatedResources(a, b []*RelatedResourceAnnotation) int {
- if len(a) > len(b) {
- return 1
- } else if len(a) < len(b) {
- return -1
- }
-
- for i := 0; i < len(a); i++ {
- if cmp := strings.Compare(a[i].String(), b[i].String()); cmp != 0 {
- return cmp
- }
- }
-
- return 0
-}
-
-func compareSchemas(a, b []*SchemaAnnotation) int {
- maxLen := len(a)
- if len(b) < maxLen {
- maxLen = len(b)
- }
-
- for i := 0; i < maxLen; i++ {
- if cmp := a[i].Compare(b[i]); cmp != 0 {
- return cmp
- }
- }
-
- if len(a) > len(b) {
- return 1
- } else if len(a) < len(b) {
- return -1
- }
-
- return 0
-}
-
-func compareStringLists(a, b []string) int {
- if len(a) > len(b) {
- return 1
- } else if len(a) < len(b) {
- return -1
- }
-
- for i := 0; i < len(a); i++ {
- if cmp := strings.Compare(a[i], b[i]); cmp != 0 {
- return cmp
- }
- }
-
- return 0
-}
-
-// Copy returns a deep copy of s.
-func (a *Annotations) Copy(node Node) *Annotations {
- cpy := *a
-
- cpy.Organizations = make([]string, len(a.Organizations))
- copy(cpy.Organizations, a.Organizations)
-
- cpy.RelatedResources = make([]*RelatedResourceAnnotation, len(a.RelatedResources))
- for i := range a.RelatedResources {
- cpy.RelatedResources[i] = a.RelatedResources[i].Copy()
- }
-
- cpy.Authors = make([]*AuthorAnnotation, len(a.Authors))
- for i := range a.Authors {
- cpy.Authors[i] = a.Authors[i].Copy()
- }
-
- cpy.Schemas = make([]*SchemaAnnotation, len(a.Schemas))
- for i := range a.Schemas {
- cpy.Schemas[i] = a.Schemas[i].Copy()
- }
-
- cpy.Custom = deepcopy.Map(a.Custom)
-
- cpy.node = node
-
- return &cpy
-}
-
-// toObject constructs an AST Object from the annotation.
-func (a *Annotations) toObject() (*Object, *Error) {
- obj := NewObject()
-
- if a == nil {
- return &obj, nil
- }
-
- if len(a.Scope) > 0 {
- obj.Insert(StringTerm("scope"), StringTerm(a.Scope))
- }
-
- if len(a.Title) > 0 {
- obj.Insert(StringTerm("title"), StringTerm(a.Title))
- }
-
- if a.Entrypoint {
- obj.Insert(StringTerm("entrypoint"), BooleanTerm(true))
- }
-
- if len(a.Description) > 0 {
- obj.Insert(StringTerm("description"), StringTerm(a.Description))
- }
-
- if len(a.Organizations) > 0 {
- orgs := make([]*Term, 0, len(a.Organizations))
- for _, org := range a.Organizations {
- orgs = append(orgs, StringTerm(org))
- }
- obj.Insert(StringTerm("organizations"), ArrayTerm(orgs...))
- }
-
- if len(a.RelatedResources) > 0 {
- rrs := make([]*Term, 0, len(a.RelatedResources))
- for _, rr := range a.RelatedResources {
- rrObj := NewObject(Item(StringTerm("ref"), StringTerm(rr.Ref.String())))
- if len(rr.Description) > 0 {
- rrObj.Insert(StringTerm("description"), StringTerm(rr.Description))
- }
- rrs = append(rrs, NewTerm(rrObj))
- }
- obj.Insert(StringTerm("related_resources"), ArrayTerm(rrs...))
- }
-
- if len(a.Authors) > 0 {
- as := make([]*Term, 0, len(a.Authors))
- for _, author := range a.Authors {
- aObj := NewObject()
- if len(author.Name) > 0 {
- aObj.Insert(StringTerm("name"), StringTerm(author.Name))
- }
- if len(author.Email) > 0 {
- aObj.Insert(StringTerm("email"), StringTerm(author.Email))
- }
- as = append(as, NewTerm(aObj))
- }
- obj.Insert(StringTerm("authors"), ArrayTerm(as...))
- }
-
- if len(a.Schemas) > 0 {
- ss := make([]*Term, 0, len(a.Schemas))
- for _, s := range a.Schemas {
- sObj := NewObject()
- if len(s.Path) > 0 {
- sObj.Insert(StringTerm("path"), NewTerm(s.Path.toArray()))
- }
- if len(s.Schema) > 0 {
- sObj.Insert(StringTerm("schema"), NewTerm(s.Schema.toArray()))
- }
- if s.Definition != nil {
- def, err := InterfaceToValue(s.Definition)
- if err != nil {
- return nil, NewError(CompileErr, a.Location, "invalid definition in schema annotation: %s", err.Error())
- }
- sObj.Insert(StringTerm("definition"), NewTerm(def))
- }
- ss = append(ss, NewTerm(sObj))
- }
- obj.Insert(StringTerm("schemas"), ArrayTerm(ss...))
- }
-
- if len(a.Custom) > 0 {
- c, err := InterfaceToValue(a.Custom)
- if err != nil {
- return nil, NewError(CompileErr, a.Location, "invalid custom annotation %s", err.Error())
- }
- obj.Insert(StringTerm("custom"), NewTerm(c))
- }
-
- return &obj, nil
-}
-
-func attachRuleAnnotations(mod *Module) {
- // make a copy of the annotations
- cpy := make([]*Annotations, len(mod.Annotations))
- for i, a := range mod.Annotations {
- cpy[i] = a.Copy(a.node)
- }
-
- for _, rule := range mod.Rules {
- var j int
- var found bool
- for i, a := range cpy {
- if rule.Ref().GroundPrefix().Equal(a.GetTargetPath()) {
- if a.Scope == annotationScopeDocument {
- rule.Annotations = append(rule.Annotations, a)
- } else if a.Scope == annotationScopeRule && rule.Loc().Row > a.Location.Row {
- j = i
- found = true
- rule.Annotations = append(rule.Annotations, a)
- }
- }
- }
-
- if found && j < len(cpy) {
- cpy = append(cpy[:j], cpy[j+1:]...)
- }
- }
-}
-
-func attachAnnotationsNodes(mod *Module) Errors {
- var errs Errors
-
- // Find first non-annotation statement following each annotation and attach
- // the annotation to that statement.
- for _, a := range mod.Annotations {
- for _, stmt := range mod.stmts {
- _, ok := stmt.(*Annotations)
- if !ok {
- if stmt.Loc().Row > a.Location.Row {
- a.node = stmt
- break
- }
- }
- }
-
- if a.Scope == "" {
- switch a.node.(type) {
- case *Rule:
- if a.Entrypoint {
- a.Scope = annotationScopeDocument
- } else {
- a.Scope = annotationScopeRule
- }
- case *Package:
- a.Scope = annotationScopePackage
- case *Import:
- a.Scope = annotationScopeImport
- }
- }
-
- if err := validateAnnotationScopeAttachment(a); err != nil {
- errs = append(errs, err)
- }
-
- if err := validateAnnotationEntrypointAttachment(a); err != nil {
- errs = append(errs, err)
- }
- }
-
- return errs
-}
-
-func validateAnnotationScopeAttachment(a *Annotations) *Error {
-
- switch a.Scope {
- case annotationScopeRule, annotationScopeDocument:
- if _, ok := a.node.(*Rule); ok {
- return nil
- }
- return newScopeAttachmentErr(a, "rule")
- case annotationScopePackage, annotationScopeSubpackages:
- if _, ok := a.node.(*Package); ok {
- return nil
- }
- return newScopeAttachmentErr(a, "package")
- }
-
- return NewError(ParseErr, a.Loc(), "invalid annotation scope '%v'. Use one of '%s', '%s', '%s', or '%s'",
- a.Scope, annotationScopeRule, annotationScopeDocument, annotationScopePackage, annotationScopeSubpackages)
-}
-
-func validateAnnotationEntrypointAttachment(a *Annotations) *Error {
- if a.Entrypoint && !(a.Scope == annotationScopeDocument || a.Scope == annotationScopePackage) {
- return NewError(
- ParseErr, a.Loc(), "annotation entrypoint applied to non-document or package scope '%v'", a.Scope)
- }
- return nil
-}
-
-// Copy returns a deep copy of a.
-func (a *AuthorAnnotation) Copy() *AuthorAnnotation {
- cpy := *a
- return &cpy
-}
-
-// Compare returns an integer indicating if s is less than, equal to, or greater
-// than other.
-func (a *AuthorAnnotation) Compare(other *AuthorAnnotation) int {
- if cmp := strings.Compare(a.Name, other.Name); cmp != 0 {
- return cmp
- }
-
- if cmp := strings.Compare(a.Email, other.Email); cmp != 0 {
- return cmp
- }
-
- return 0
-}
-
-func (a *AuthorAnnotation) String() string {
- if len(a.Email) == 0 {
- return a.Name
- } else if len(a.Name) == 0 {
- return fmt.Sprintf("<%s>", a.Email)
- }
- return fmt.Sprintf("%s <%s>", a.Name, a.Email)
-}
-
-// Copy returns a deep copy of rr.
-func (rr *RelatedResourceAnnotation) Copy() *RelatedResourceAnnotation {
- cpy := *rr
- return &cpy
-}
-
-// Compare returns an integer indicating if s is less than, equal to, or greater
-// than other.
-func (rr *RelatedResourceAnnotation) Compare(other *RelatedResourceAnnotation) int {
- if cmp := strings.Compare(rr.Description, other.Description); cmp != 0 {
- return cmp
- }
-
- if cmp := strings.Compare(rr.Ref.String(), other.Ref.String()); cmp != 0 {
- return cmp
- }
-
- return 0
-}
-
-func (rr *RelatedResourceAnnotation) String() string {
- bs, _ := json.Marshal(rr)
- return string(bs)
-}
-
-func (rr *RelatedResourceAnnotation) MarshalJSON() ([]byte, error) {
- d := map[string]interface{}{
- "ref": rr.Ref.String(),
- }
-
- if len(rr.Description) > 0 {
- d["description"] = rr.Description
- }
-
- return json.Marshal(d)
-}
-
-// Copy returns a deep copy of s.
-func (s *SchemaAnnotation) Copy() *SchemaAnnotation {
- cpy := *s
- return &cpy
-}
-
-// Compare returns an integer indicating if s is less than, equal to, or greater
-// than other.
-func (s *SchemaAnnotation) Compare(other *SchemaAnnotation) int {
-
- if cmp := s.Path.Compare(other.Path); cmp != 0 {
- return cmp
- }
-
- if cmp := s.Schema.Compare(other.Schema); cmp != 0 {
- return cmp
- }
-
- if s.Definition != nil && other.Definition == nil {
- return -1
- } else if s.Definition == nil && other.Definition != nil {
- return 1
- } else if s.Definition != nil && other.Definition != nil {
- return util.Compare(*s.Definition, *other.Definition)
- }
-
- return 0
-}
-
-func (s *SchemaAnnotation) String() string {
- bs, _ := json.Marshal(s)
- return string(bs)
-}
-
-func newAnnotationSet() *AnnotationSet {
- return &AnnotationSet{
- byRule: map[*Rule][]*Annotations{},
- byPackage: map[int]*Annotations{},
- byPath: newAnnotationTree(),
- }
+ return v1.NewAnnotationsRef(a)
}
func BuildAnnotationSet(modules []*Module) (*AnnotationSet, Errors) {
- as := newAnnotationSet()
- var errs Errors
- for _, m := range modules {
- for _, a := range m.Annotations {
- if err := as.add(a); err != nil {
- errs = append(errs, err)
- }
- }
- }
- if len(errs) > 0 {
- return nil, errs
- }
- as.modules = modules
- return as, nil
-}
-
-// NOTE(philipc): During copy propagation, the underlying Nodes can be
-// stripped away from the annotations, leading to nil deref panics. We
-// silently ignore these cases for now, as a workaround.
-func (as *AnnotationSet) add(a *Annotations) *Error {
- switch a.Scope {
- case annotationScopeRule:
- if rule, ok := a.node.(*Rule); ok {
- as.byRule[rule] = append(as.byRule[rule], a)
- }
- case annotationScopePackage:
- if pkg, ok := a.node.(*Package); ok {
- hash := pkg.Path.Hash()
- if exist, ok := as.byPackage[hash]; ok {
- return errAnnotationRedeclared(a, exist.Location)
- }
- as.byPackage[hash] = a
- }
- case annotationScopeDocument:
- if rule, ok := a.node.(*Rule); ok {
- path := rule.Ref().GroundPrefix()
- x := as.byPath.get(path)
- if x != nil {
- return errAnnotationRedeclared(a, x.Value.Location)
- }
- as.byPath.insert(path, a)
- }
- case annotationScopeSubpackages:
- if pkg, ok := a.node.(*Package); ok {
- x := as.byPath.get(pkg.Path)
- if x != nil && x.Value != nil {
- return errAnnotationRedeclared(a, x.Value.Location)
- }
- as.byPath.insert(pkg.Path, a)
- }
- }
- return nil
-}
-
-func (as *AnnotationSet) GetRuleScope(r *Rule) []*Annotations {
- if as == nil {
- return nil
- }
- return as.byRule[r]
-}
-
-func (as *AnnotationSet) GetSubpackagesScope(path Ref) []*Annotations {
- if as == nil {
- return nil
- }
- return as.byPath.ancestors(path)
-}
-
-func (as *AnnotationSet) GetDocumentScope(path Ref) *Annotations {
- if as == nil {
- return nil
- }
- if node := as.byPath.get(path); node != nil {
- return node.Value
- }
- return nil
-}
-
-func (as *AnnotationSet) GetPackageScope(pkg *Package) *Annotations {
- if as == nil {
- return nil
- }
- return as.byPackage[pkg.Path.Hash()]
-}
-
-// Flatten returns a flattened list view of this AnnotationSet.
-// The returned slice is sorted, first by the annotations' target path, then by their target location
-func (as *AnnotationSet) Flatten() FlatAnnotationsRefSet {
- // This preallocation often won't be optimal, but it's superior to starting with a nil slice.
- refs := make([]*AnnotationsRef, 0, len(as.byPath.Children)+len(as.byRule)+len(as.byPackage))
-
- refs = as.byPath.flatten(refs)
-
- for _, a := range as.byPackage {
- refs = append(refs, NewAnnotationsRef(a))
- }
-
- for _, as := range as.byRule {
- for _, a := range as {
- refs = append(refs, NewAnnotationsRef(a))
- }
- }
-
- // Sort by path, then annotation location, for stable output
- sort.SliceStable(refs, func(i, j int) bool {
- return refs[i].Compare(refs[j]) < 0
- })
-
- return refs
-}
-
-// Chain returns the chain of annotations leading up to the given rule.
-// The returned slice is ordered as follows
-// 0. Entries for the given rule, ordered from the METADATA block declared immediately above the rule, to the block declared farthest away (always at least one entry)
-// 1. The 'document' scope entry, if any
-// 2. The 'package' scope entry, if any
-// 3. Entries for the 'subpackages' scope, if any; ordered from the closest package path to the fartest. E.g.: 'do.re.mi', 'do.re', 'do'
-// The returned slice is guaranteed to always contain at least one entry, corresponding to the given rule.
-func (as *AnnotationSet) Chain(rule *Rule) AnnotationsRefSet {
- var refs []*AnnotationsRef
-
- ruleAnnots := as.GetRuleScope(rule)
-
- if len(ruleAnnots) >= 1 {
- for _, a := range ruleAnnots {
- refs = append(refs, NewAnnotationsRef(a))
- }
- } else {
- // Make sure there is always a leading entry representing the passed rule, even if it has no annotations
- refs = append(refs, &AnnotationsRef{
- Location: rule.Location,
- Path: rule.Ref().GroundPrefix(),
- node: rule,
- })
- }
-
- if len(refs) > 1 {
- // Sort by annotation location; chain must start with annotations declared closest to rule, then going outward
- sort.SliceStable(refs, func(i, j int) bool {
- return refs[i].Annotations.Location.Compare(refs[j].Annotations.Location) > 0
- })
- }
-
- docAnnots := as.GetDocumentScope(rule.Ref().GroundPrefix())
- if docAnnots != nil {
- refs = append(refs, NewAnnotationsRef(docAnnots))
- }
-
- pkg := rule.Module.Package
- pkgAnnots := as.GetPackageScope(pkg)
- if pkgAnnots != nil {
- refs = append(refs, NewAnnotationsRef(pkgAnnots))
- }
-
- subPkgAnnots := as.GetSubpackagesScope(pkg.Path)
- // We need to reverse the order, as subPkgAnnots ordering will start at the root,
- // whereas we want to end at the root.
- for i := len(subPkgAnnots) - 1; i >= 0; i-- {
- refs = append(refs, NewAnnotationsRef(subPkgAnnots[i]))
- }
-
- return refs
-}
-
-func (ars FlatAnnotationsRefSet) Insert(ar *AnnotationsRef) FlatAnnotationsRefSet {
- result := make(FlatAnnotationsRefSet, 0, len(ars)+1)
-
- // insertion sort, first by path, then location
- for i, current := range ars {
- if ar.Compare(current) < 0 {
- result = append(result, ar)
- result = append(result, ars[i:]...)
- break
- }
- result = append(result, current)
- }
-
- if len(result) < len(ars)+1 {
- result = append(result, ar)
- }
-
- return result
-}
-
-func newAnnotationTree() *annotationTreeNode {
- return &annotationTreeNode{
- Value: nil,
- Children: map[Value]*annotationTreeNode{},
- }
-}
-
-func (t *annotationTreeNode) insert(path Ref, value *Annotations) {
- node := t
- for _, k := range path {
- child, ok := node.Children[k.Value]
- if !ok {
- child = newAnnotationTree()
- node.Children[k.Value] = child
- }
- node = child
- }
- node.Value = value
-}
-
-func (t *annotationTreeNode) get(path Ref) *annotationTreeNode {
- node := t
- for _, k := range path {
- if node == nil {
- return nil
- }
- child, ok := node.Children[k.Value]
- if !ok {
- return nil
- }
- node = child
- }
- return node
-}
-
-// ancestors returns a slice of annotations in ascending order, starting with the root of ref; e.g.: 'root', 'root.foo', 'root.foo.bar'.
-func (t *annotationTreeNode) ancestors(path Ref) (result []*Annotations) {
- node := t
- for _, k := range path {
- if node == nil {
- return result
- }
- child, ok := node.Children[k.Value]
- if !ok {
- return result
- }
- if child.Value != nil {
- result = append(result, child.Value)
- }
- node = child
- }
- return result
-}
-
-func (t *annotationTreeNode) flatten(refs []*AnnotationsRef) []*AnnotationsRef {
- if a := t.Value; a != nil {
- refs = append(refs, NewAnnotationsRef(a))
- }
- for _, c := range t.Children {
- refs = c.flatten(refs)
- }
- return refs
-}
-
-func (ar *AnnotationsRef) Compare(other *AnnotationsRef) int {
- if c := ar.Path.Compare(other.Path); c != 0 {
- return c
- }
-
- if c := ar.Annotations.Location.Compare(other.Annotations.Location); c != 0 {
- return c
- }
-
- return ar.Annotations.Compare(other.Annotations)
+ return v1.BuildAnnotationSet(modules)
}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/builtins.go b/vendor/github.com/open-policy-agent/opa/ast/builtins.go
index f54d91d317..d0ab69a163 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/builtins.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/builtins.go
@@ -5,1348 +5,230 @@
package ast
import (
- "strings"
-
- "github.com/open-policy-agent/opa/types"
+ v1 "github.com/open-policy-agent/opa/v1/ast"
)
// Builtins is the registry of built-in functions supported by OPA.
// Call RegisterBuiltin to add a new built-in.
-var Builtins []*Builtin
+var Builtins = v1.Builtins
// RegisterBuiltin adds a new built-in function to the registry.
func RegisterBuiltin(b *Builtin) {
- Builtins = append(Builtins, b)
- BuiltinMap[b.Name] = b
- if len(b.Infix) > 0 {
- BuiltinMap[b.Infix] = b
- }
+ v1.RegisterBuiltin(b)
}
// DefaultBuiltins is the registry of built-in functions supported in OPA
// by default. When adding a new built-in function to OPA, update this
// list.
-var DefaultBuiltins = [...]*Builtin{
- // Unification/equality ("=")
- Equality,
-
- // Assignment (":=")
- Assign,
-
- // Membership, infix "in": `x in xs`
- Member,
- MemberWithKey,
-
- // Comparisons
- GreaterThan,
- GreaterThanEq,
- LessThan,
- LessThanEq,
- NotEqual,
- Equal,
-
- // Arithmetic
- Plus,
- Minus,
- Multiply,
- Divide,
- Ceil,
- Floor,
- Round,
- Abs,
- Rem,
-
- // Bitwise Arithmetic
- BitsOr,
- BitsAnd,
- BitsNegate,
- BitsXOr,
- BitsShiftLeft,
- BitsShiftRight,
-
- // Binary
- And,
- Or,
-
- // Aggregates
- Count,
- Sum,
- Product,
- Max,
- Min,
- Any,
- All,
-
- // Arrays
- ArrayConcat,
- ArraySlice,
- ArrayReverse,
-
- // Conversions
- ToNumber,
-
- // Casts (DEPRECATED)
- CastObject,
- CastNull,
- CastBoolean,
- CastString,
- CastSet,
- CastArray,
-
- // Regular Expressions
- RegexIsValid,
- RegexMatch,
- RegexMatchDeprecated,
- RegexSplit,
- GlobsMatch,
- RegexTemplateMatch,
- RegexFind,
- RegexFindAllStringSubmatch,
- RegexReplace,
-
- // Sets
- SetDiff,
- Intersection,
- Union,
-
- // Strings
- AnyPrefixMatch,
- AnySuffixMatch,
- Concat,
- FormatInt,
- IndexOf,
- IndexOfN,
- Substring,
- Lower,
- Upper,
- Contains,
- StringCount,
- StartsWith,
- EndsWith,
- Split,
- Replace,
- ReplaceN,
- Trim,
- TrimLeft,
- TrimPrefix,
- TrimRight,
- TrimSuffix,
- TrimSpace,
- Sprintf,
- StringReverse,
- RenderTemplate,
-
- // Numbers
- NumbersRange,
- NumbersRangeStep,
- RandIntn,
-
- // Encoding
- JSONMarshal,
- JSONMarshalWithOptions,
- JSONUnmarshal,
- JSONIsValid,
- Base64Encode,
- Base64Decode,
- Base64IsValid,
- Base64UrlEncode,
- Base64UrlEncodeNoPad,
- Base64UrlDecode,
- URLQueryDecode,
- URLQueryEncode,
- URLQueryEncodeObject,
- URLQueryDecodeObject,
- YAMLMarshal,
- YAMLUnmarshal,
- YAMLIsValid,
- HexEncode,
- HexDecode,
-
- // Object Manipulation
- ObjectUnion,
- ObjectUnionN,
- ObjectRemove,
- ObjectFilter,
- ObjectGet,
- ObjectKeys,
- ObjectSubset,
-
- // JSON Object Manipulation
- JSONFilter,
- JSONRemove,
- JSONPatch,
-
- // Tokens
- JWTDecode,
- JWTVerifyRS256,
- JWTVerifyRS384,
- JWTVerifyRS512,
- JWTVerifyPS256,
- JWTVerifyPS384,
- JWTVerifyPS512,
- JWTVerifyES256,
- JWTVerifyES384,
- JWTVerifyES512,
- JWTVerifyHS256,
- JWTVerifyHS384,
- JWTVerifyHS512,
- JWTDecodeVerify,
- JWTEncodeSignRaw,
- JWTEncodeSign,
-
- // Time
- NowNanos,
- ParseNanos,
- ParseRFC3339Nanos,
- ParseDurationNanos,
- Format,
- Date,
- Clock,
- Weekday,
- AddDate,
- Diff,
-
- // Crypto
- CryptoX509ParseCertificates,
- CryptoX509ParseAndVerifyCertificates,
- CryptoX509ParseAndVerifyCertificatesWithOptions,
- CryptoMd5,
- CryptoSha1,
- CryptoSha256,
- CryptoX509ParseCertificateRequest,
- CryptoX509ParseRSAPrivateKey,
- CryptoX509ParseKeyPair,
- CryptoParsePrivateKeys,
- CryptoHmacMd5,
- CryptoHmacSha1,
- CryptoHmacSha256,
- CryptoHmacSha512,
- CryptoHmacEqual,
-
- // Graphs
- WalkBuiltin,
- ReachableBuiltin,
- ReachablePathsBuiltin,
-
- // Sort
- Sort,
-
- // Types
- IsNumber,
- IsString,
- IsBoolean,
- IsArray,
- IsSet,
- IsObject,
- IsNull,
- TypeNameBuiltin,
-
- // HTTP
- HTTPSend,
-
- // GraphQL
- GraphQLParse,
- GraphQLParseAndVerify,
- GraphQLParseQuery,
- GraphQLParseSchema,
- GraphQLIsValid,
- GraphQLSchemaIsValid,
-
- // JSON Schema
- JSONSchemaVerify,
- JSONMatchSchema,
-
- // Cloud Provider Helpers
- ProvidersAWSSignReqObj,
-
- // Rego
- RegoParseModule,
- RegoMetadataChain,
- RegoMetadataRule,
-
- // OPA
- OPARuntime,
-
- // Tracing
- Trace,
-
- // Networking
- NetCIDROverlap,
- NetCIDRIntersects,
- NetCIDRContains,
- NetCIDRContainsMatches,
- NetCIDRExpand,
- NetCIDRMerge,
- NetLookupIPAddr,
- NetCIDRIsValid,
-
- // Glob
- GlobMatch,
- GlobQuoteMeta,
-
- // Units
- UnitsParse,
- UnitsParseBytes,
-
- // UUIDs
- UUIDRFC4122,
- UUIDParse,
-
- // SemVers
- SemVerIsValid,
- SemVerCompare,
-
- // Printing
- Print,
- InternalPrint,
-}
+var DefaultBuiltins = v1.DefaultBuiltins
// BuiltinMap provides a convenient mapping of built-in names to
// built-in definitions.
-var BuiltinMap map[string]*Builtin
+var BuiltinMap = v1.BuiltinMap
// Deprecated: Builtins can now be directly annotated with the
// Nondeterministic property, and when set to true, will be ignored
// for partial evaluation.
-var IgnoreDuringPartialEval = []*Builtin{
- RandIntn,
- UUIDRFC4122,
- JWTDecodeVerify,
- JWTEncodeSignRaw,
- JWTEncodeSign,
- NowNanos,
- HTTPSend,
- OPARuntime,
- NetLookupIPAddr,
-}
+var IgnoreDuringPartialEval = v1.IgnoreDuringPartialEval
/**
* Unification
*/
// Equality represents the "=" operator.
-var Equality = &Builtin{
- Name: "eq",
- Infix: "=",
- Decl: types.NewFunction(
- types.Args(types.A, types.A),
- types.B,
- ),
-}
+var Equality = v1.Equality
/**
* Assignment
*/
// Assign represents the assignment (":=") operator.
-var Assign = &Builtin{
- Name: "assign",
- Infix: ":=",
- Decl: types.NewFunction(
- types.Args(types.A, types.A),
- types.B,
- ),
-}
+var Assign = v1.Assign
// Member represents the `in` (infix) operator.
-var Member = &Builtin{
- Name: "internal.member_2",
- Infix: "in",
- Decl: types.NewFunction(
- types.Args(
- types.A,
- types.A,
- ),
- types.B,
- ),
-}
+var Member = v1.Member
// MemberWithKey represents the `in` (infix) operator when used
// with two terms on the lhs, i.e., `k, v in obj`.
-var MemberWithKey = &Builtin{
- Name: "internal.member_3",
- Infix: "in",
- Decl: types.NewFunction(
- types.Args(
- types.A,
- types.A,
- types.A,
- ),
- types.B,
- ),
-}
+var MemberWithKey = v1.MemberWithKey
-/**
- * Comparisons
- */
-var comparison = category("comparison")
-
-var GreaterThan = &Builtin{
- Name: "gt",
- Infix: ">",
- Categories: comparison,
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.A),
- types.Named("y", types.A),
- ),
- types.Named("result", types.B).Description("true if `x` is greater than `y`; false otherwise"),
- ),
-}
+var GreaterThan = v1.GreaterThan
-var GreaterThanEq = &Builtin{
- Name: "gte",
- Infix: ">=",
- Categories: comparison,
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.A),
- types.Named("y", types.A),
- ),
- types.Named("result", types.B).Description("true if `x` is greater or equal to `y`; false otherwise"),
- ),
-}
+var GreaterThanEq = v1.GreaterThanEq
// LessThan represents the "<" comparison operator.
-var LessThan = &Builtin{
- Name: "lt",
- Infix: "<",
- Categories: comparison,
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.A),
- types.Named("y", types.A),
- ),
- types.Named("result", types.B).Description("true if `x` is less than `y`; false otherwise"),
- ),
-}
+var LessThan = v1.LessThan
-var LessThanEq = &Builtin{
- Name: "lte",
- Infix: "<=",
- Categories: comparison,
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.A),
- types.Named("y", types.A),
- ),
- types.Named("result", types.B).Description("true if `x` is less than or equal to `y`; false otherwise"),
- ),
-}
+var LessThanEq = v1.LessThanEq
-var NotEqual = &Builtin{
- Name: "neq",
- Infix: "!=",
- Categories: comparison,
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.A),
- types.Named("y", types.A),
- ),
- types.Named("result", types.B).Description("true if `x` is not equal to `y`; false otherwise"),
- ),
-}
+var NotEqual = v1.NotEqual
// Equal represents the "==" comparison operator.
-var Equal = &Builtin{
- Name: "equal",
- Infix: "==",
- Categories: comparison,
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.A),
- types.Named("y", types.A),
- ),
- types.Named("result", types.B).Description("true if `x` is equal to `y`; false otherwise"),
- ),
-}
+var Equal = v1.Equal
-/**
- * Arithmetic
- */
-var number = category("numbers")
-
-var Plus = &Builtin{
- Name: "plus",
- Infix: "+",
- Description: "Plus adds two numbers together.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.N),
- types.Named("y", types.N),
- ),
- types.Named("z", types.N).Description("the sum of `x` and `y`"),
- ),
- Categories: number,
-}
+var Plus = v1.Plus
-var Minus = &Builtin{
- Name: "minus",
- Infix: "-",
- Description: "Minus subtracts the second number from the first number or computes the difference between two sets.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.NewAny(types.N, types.NewSet(types.A))),
- types.Named("y", types.NewAny(types.N, types.NewSet(types.A))),
- ),
- types.Named("z", types.NewAny(types.N, types.NewSet(types.A))).Description("the difference of `x` and `y`"),
- ),
- Categories: category("sets", "numbers"),
-}
+var Minus = v1.Minus
-var Multiply = &Builtin{
- Name: "mul",
- Infix: "*",
- Description: "Multiplies two numbers.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.N),
- types.Named("y", types.N),
- ),
- types.Named("z", types.N).Description("the product of `x` and `y`"),
- ),
- Categories: number,
-}
+var Multiply = v1.Multiply
-var Divide = &Builtin{
- Name: "div",
- Infix: "/",
- Description: "Divides the first number by the second number.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.N).Description("the dividend"),
- types.Named("y", types.N).Description("the divisor"),
- ),
- types.Named("z", types.N).Description("the result of `x` divided by `y`"),
- ),
- Categories: number,
-}
+var Divide = v1.Divide
-var Round = &Builtin{
- Name: "round",
- Description: "Rounds the number to the nearest integer.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.N).Description("the number to round"),
- ),
- types.Named("y", types.N).Description("the result of rounding `x`"),
- ),
- Categories: number,
-}
+var Round = v1.Round
-var Ceil = &Builtin{
- Name: "ceil",
- Description: "Rounds the number _up_ to the nearest integer.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.N).Description("the number to round"),
- ),
- types.Named("y", types.N).Description("the result of rounding `x` _up_"),
- ),
- Categories: number,
-}
+var Ceil = v1.Ceil
-var Floor = &Builtin{
- Name: "floor",
- Description: "Rounds the number _down_ to the nearest integer.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.N).Description("the number to round"),
- ),
- types.Named("y", types.N).Description("the result of rounding `x` _down_"),
- ),
- Categories: number,
-}
+var Floor = v1.Floor
-var Abs = &Builtin{
- Name: "abs",
- Description: "Returns the number without its sign.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.N),
- ),
- types.Named("y", types.N).Description("the absolute value of `x`"),
- ),
- Categories: number,
-}
+var Abs = v1.Abs
-var Rem = &Builtin{
- Name: "rem",
- Infix: "%",
- Description: "Returns the remainder for of `x` divided by `y`, for `y != 0`.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.N),
- types.Named("y", types.N),
- ),
- types.Named("z", types.N).Description("the remainder"),
- ),
- Categories: number,
-}
+var Rem = v1.Rem
/**
* Bitwise
*/
-var BitsOr = &Builtin{
- Name: "bits.or",
- Description: "Returns the bitwise \"OR\" of two integers.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.N),
- types.Named("y", types.N),
- ),
- types.Named("z", types.N),
- ),
-}
+var BitsOr = v1.BitsOr
-var BitsAnd = &Builtin{
- Name: "bits.and",
- Description: "Returns the bitwise \"AND\" of two integers.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.N),
- types.Named("y", types.N),
- ),
- types.Named("z", types.N),
- ),
-}
+var BitsAnd = v1.BitsAnd
-var BitsNegate = &Builtin{
- Name: "bits.negate",
- Description: "Returns the bitwise negation (flip) of an integer.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.N),
- ),
- types.Named("z", types.N),
- ),
-}
+var BitsNegate = v1.BitsNegate
-var BitsXOr = &Builtin{
- Name: "bits.xor",
- Description: "Returns the bitwise \"XOR\" (exclusive-or) of two integers.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.N),
- types.Named("y", types.N),
- ),
- types.Named("z", types.N),
- ),
-}
+var BitsXOr = v1.BitsXOr
-var BitsShiftLeft = &Builtin{
- Name: "bits.lsh",
- Description: "Returns a new integer with its bits shifted `s` bits to the left.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.N),
- types.Named("s", types.N),
- ),
- types.Named("z", types.N),
- ),
-}
+var BitsShiftLeft = v1.BitsShiftLeft
-var BitsShiftRight = &Builtin{
- Name: "bits.rsh",
- Description: "Returns a new integer with its bits shifted `s` bits to the right.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.N),
- types.Named("s", types.N),
- ),
- types.Named("z", types.N),
- ),
-}
+var BitsShiftRight = v1.BitsShiftRight
/**
* Sets
*/
-var sets = category("sets")
-
-var And = &Builtin{
- Name: "and",
- Infix: "&",
- Description: "Returns the intersection of two sets.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.NewSet(types.A)),
- types.Named("y", types.NewSet(types.A)),
- ),
- types.Named("z", types.NewSet(types.A)).Description("the intersection of `x` and `y`"),
- ),
- Categories: sets,
-}
+var And = v1.And
// Or performs a union operation on sets.
-var Or = &Builtin{
- Name: "or",
- Infix: "|",
- Description: "Returns the union of two sets.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.NewSet(types.A)),
- types.Named("y", types.NewSet(types.A)),
- ),
- types.Named("z", types.NewSet(types.A)).Description("the union of `x` and `y`"),
- ),
- Categories: sets,
-}
+var Or = v1.Or
-var Intersection = &Builtin{
- Name: "intersection",
- Description: "Returns the intersection of the given input sets.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("xs", types.NewSet(types.NewSet(types.A))).Description("set of sets to intersect"),
- ),
- types.Named("y", types.NewSet(types.A)).Description("the intersection of all `xs` sets"),
- ),
- Categories: sets,
-}
+var Intersection = v1.Intersection
-var Union = &Builtin{
- Name: "union",
- Description: "Returns the union of the given input sets.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("xs", types.NewSet(types.NewSet(types.A))).Description("set of sets to merge"),
- ),
- types.Named("y", types.NewSet(types.A)).Description("the union of all `xs` sets"),
- ),
- Categories: sets,
-}
+var Union = v1.Union
/**
* Aggregates
*/
-var aggregates = category("aggregates")
-
-var Count = &Builtin{
- Name: "count",
- Description: " Count takes a collection or string and returns the number of elements (or characters) in it.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("collection", types.NewAny(
- types.NewSet(types.A),
- types.NewArray(nil, types.A),
- types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
- types.S,
- )).Description("the set/array/object/string to be counted"),
- ),
- types.Named("n", types.N).Description("the count of elements, key/val pairs, or characters, respectively."),
- ),
- Categories: aggregates,
-}
+var Count = v1.Count
-var Sum = &Builtin{
- Name: "sum",
- Description: "Sums elements of an array or set of numbers.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("collection", types.NewAny(
- types.NewSet(types.N),
- types.NewArray(nil, types.N),
- )),
- ),
- types.Named("n", types.N).Description("the sum of all elements"),
- ),
- Categories: aggregates,
-}
+var Sum = v1.Sum
-var Product = &Builtin{
- Name: "product",
- Description: "Muliplies elements of an array or set of numbers",
- Decl: types.NewFunction(
- types.Args(
- types.Named("collection", types.NewAny(
- types.NewSet(types.N),
- types.NewArray(nil, types.N),
- )),
- ),
- types.Named("n", types.N).Description("the product of all elements"),
- ),
- Categories: aggregates,
-}
+var Product = v1.Product
-var Max = &Builtin{
- Name: "max",
- Description: "Returns the maximum value in a collection.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("collection", types.NewAny(
- types.NewSet(types.A),
- types.NewArray(nil, types.A),
- )),
- ),
- types.Named("n", types.A).Description("the maximum of all elements"),
- ),
- Categories: aggregates,
-}
+var Max = v1.Max
-var Min = &Builtin{
- Name: "min",
- Description: "Returns the minimum value in a collection.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("collection", types.NewAny(
- types.NewSet(types.A),
- types.NewArray(nil, types.A),
- )),
- ),
- types.Named("n", types.A).Description("the minimum of all elements"),
- ),
- Categories: aggregates,
-}
+var Min = v1.Min
/**
* Sorting
*/
-var Sort = &Builtin{
- Name: "sort",
- Description: "Returns a sorted array.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("collection", types.NewAny(
- types.NewArray(nil, types.A),
- types.NewSet(types.A),
- )).Description("the array or set to be sorted"),
- ),
- types.Named("n", types.NewArray(nil, types.A)).Description("the sorted array"),
- ),
- Categories: aggregates,
-}
+var Sort = v1.Sort
/**
* Arrays
*/
-var ArrayConcat = &Builtin{
- Name: "array.concat",
- Description: "Concatenates two arrays.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.NewArray(nil, types.A)),
- types.Named("y", types.NewArray(nil, types.A)),
- ),
- types.Named("z", types.NewArray(nil, types.A)).Description("the concatenation of `x` and `y`"),
- ),
-}
+var ArrayConcat = v1.ArrayConcat
-var ArraySlice = &Builtin{
- Name: "array.slice",
- Description: "Returns a slice of a given array. If `start` is greater or equal than `stop`, `slice` is `[]`.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be sliced"),
- types.Named("start", types.NewNumber()).Description("the start index of the returned slice; if less than zero, it's clamped to 0"),
- types.Named("stop", types.NewNumber()).Description("the stop index of the returned slice; if larger than `count(arr)`, it's clamped to `count(arr)`"),
- ),
- types.Named("slice", types.NewArray(nil, types.A)).Description("the subslice of `array`, from `start` to `end`, including `arr[start]`, but excluding `arr[end]`"),
- ),
-} // NOTE(sr): this function really needs examples
-
-var ArrayReverse = &Builtin{
- Name: "array.reverse",
- Description: "Returns the reverse of a given array.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be reversed"),
- ),
- types.Named("rev", types.NewArray(nil, types.A)).Description("an array containing the elements of `arr` in reverse order"),
- ),
-}
+var ArraySlice = v1.ArraySlice
+
+var ArrayReverse = v1.ArrayReverse
/**
* Conversions
*/
-var conversions = category("conversions")
-
-var ToNumber = &Builtin{
- Name: "to_number",
- Description: "Converts a string, bool, or number value to a number: Strings are converted to numbers using `strconv.Atoi`, Boolean `false` is converted to 0 and `true` is converted to 1.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.NewAny(
- types.N,
- types.S,
- types.B,
- types.NewNull(),
- )),
- ),
- types.Named("num", types.N),
- ),
- Categories: conversions,
-}
+
+var ToNumber = v1.ToNumber
/**
* Regular Expressions
*/
-var RegexMatch = &Builtin{
- Name: "regex.match",
- Description: "Matches a string against a regular expression.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("pattern", types.S).Description("regular expression"),
- types.Named("value", types.S).Description("value to match against `pattern`"),
- ),
- types.Named("result", types.B),
- ),
-}
+var RegexMatch = v1.RegexMatch
-var RegexIsValid = &Builtin{
- Name: "regex.is_valid",
- Description: "Checks if a string is a valid regular expression: the detailed syntax for patterns is defined by https://github.com/google/re2/wiki/Syntax.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("pattern", types.S).Description("regular expression"),
- ),
- types.Named("result", types.B),
- ),
-}
+var RegexIsValid = v1.RegexIsValid
-var RegexFindAllStringSubmatch = &Builtin{
- Name: "regex.find_all_string_submatch_n",
- Description: "Returns all successive matches of the expression.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("pattern", types.S).Description("regular expression"),
- types.Named("value", types.S).Description("string to match"),
- types.Named("number", types.N).Description("number of matches to return; `-1` means all matches"),
- ),
- types.Named("output", types.NewArray(nil, types.NewArray(nil, types.S))),
- ),
-}
+var RegexFindAllStringSubmatch = v1.RegexFindAllStringSubmatch
-var RegexTemplateMatch = &Builtin{
- Name: "regex.template_match",
- Description: "Matches a string against a pattern, where there pattern may be glob-like",
- Decl: types.NewFunction(
- types.Args(
- types.Named("template", types.S).Description("template expression containing `0..n` regular expressions"),
- types.Named("value", types.S).Description("string to match"),
- types.Named("delimiter_start", types.S).Description("start delimiter of the regular expression in `template`"),
- types.Named("delimiter_end", types.S).Description("end delimiter of the regular expression in `template`"),
- ),
- types.Named("result", types.B),
- ),
-} // TODO(sr): example:`regex.template_match("urn:foo:{.*}", "urn:foo:bar:baz", "{", "}")`` returns ``true``.
-
-var RegexSplit = &Builtin{
- Name: "regex.split",
- Description: "Splits the input string by the occurrences of the given pattern.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("pattern", types.S).Description("regular expression"),
- types.Named("value", types.S).Description("string to match"),
- ),
- types.Named("output", types.NewArray(nil, types.S)).Description("the parts obtained by splitting `value`"),
- ),
-}
+var RegexTemplateMatch = v1.RegexTemplateMatch
+
+var RegexSplit = v1.RegexSplit
// RegexFind takes two strings and a number, the pattern, the value and number of match values to
// return, -1 means all match values.
-var RegexFind = &Builtin{
- Name: "regex.find_n",
- Description: "Returns the specified number of matches when matching the input against the pattern.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("pattern", types.S).Description("regular expression"),
- types.Named("value", types.S).Description("string to match"),
- types.Named("number", types.N).Description("number of matches to return, if `-1`, returns all matches"),
- ),
- types.Named("output", types.NewArray(nil, types.S)).Description("collected matches"),
- ),
-}
+var RegexFind = v1.RegexFind
// GlobsMatch takes two strings regexp-style strings and evaluates to true if their
// intersection matches a non-empty set of non-empty strings.
// Examples:
// - "a.a." and ".b.b" -> true.
// - "[a-z]*" and [0-9]+" -> not true.
-var GlobsMatch = &Builtin{
- Name: "regex.globs_match",
- Description: `Checks if the intersection of two glob-style regular expressions matches a non-empty set of non-empty strings.
-The set of regex symbols is limited for this builtin: only ` + "`.`, `*`, `+`, `[`, `-`, `]` and `\\` are treated as special symbols.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("glob1", types.S),
- types.Named("glob2", types.S),
- ),
- types.Named("result", types.B),
- ),
-}
+var GlobsMatch = v1.GlobsMatch
/**
* Strings
*/
-var stringsCat = category("strings")
-
-var AnyPrefixMatch = &Builtin{
- Name: "strings.any_prefix_match",
- Description: "Returns true if any of the search strings begins with any of the base strings.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("search", types.NewAny(
- types.S,
- types.NewSet(types.S),
- types.NewArray(nil, types.S),
- )).Description("search string(s)"),
- types.Named("base", types.NewAny(
- types.S,
- types.NewSet(types.S),
- types.NewArray(nil, types.S),
- )).Description("base string(s)"),
- ),
- types.Named("result", types.B).Description("result of the prefix check"),
- ),
- Categories: stringsCat,
-}
-var AnySuffixMatch = &Builtin{
- Name: "strings.any_suffix_match",
- Description: "Returns true if any of the search strings ends with any of the base strings.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("search", types.NewAny(
- types.S,
- types.NewSet(types.S),
- types.NewArray(nil, types.S),
- )).Description("search string(s)"),
- types.Named("base", types.NewAny(
- types.S,
- types.NewSet(types.S),
- types.NewArray(nil, types.S),
- )).Description("base string(s)"),
- ),
- types.Named("result", types.B).Description("result of the suffix check"),
- ),
- Categories: stringsCat,
-}
+var AnyPrefixMatch = v1.AnyPrefixMatch
-var Concat = &Builtin{
- Name: "concat",
- Description: "Joins a set or array of strings with a delimiter.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("delimiter", types.S),
- types.Named("collection", types.NewAny(
- types.NewSet(types.S),
- types.NewArray(nil, types.S),
- )).Description("strings to join"),
- ),
- types.Named("output", types.S),
- ),
- Categories: stringsCat,
-}
+var AnySuffixMatch = v1.AnySuffixMatch
-var FormatInt = &Builtin{
- Name: "format_int",
- Description: "Returns the string representation of the number in the given base after rounding it down to an integer value.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("number", types.N).Description("number to format"),
- types.Named("base", types.N).Description("base of number representation to use"),
- ),
- types.Named("output", types.S).Description("formatted number"),
- ),
- Categories: stringsCat,
-}
+var Concat = v1.Concat
-var IndexOf = &Builtin{
- Name: "indexof",
- Description: "Returns the index of a substring contained inside a string.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("haystack", types.S).Description("string to search in"),
- types.Named("needle", types.S).Description("substring to look for"),
- ),
- types.Named("output", types.N).Description("index of first occurrence, `-1` if not found"),
- ),
- Categories: stringsCat,
-}
+var FormatInt = v1.FormatInt
-var IndexOfN = &Builtin{
- Name: "indexof_n",
- Description: "Returns a list of all the indexes of a substring contained inside a string.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("haystack", types.S).Description("string to search in"),
- types.Named("needle", types.S).Description("substring to look for"),
- ),
- types.Named("output", types.NewArray(nil, types.N)).Description("all indices at which `needle` occurs in `haystack`, may be empty"),
- ),
- Categories: stringsCat,
-}
+var IndexOf = v1.IndexOf
-var Substring = &Builtin{
- Name: "substring",
- Description: "Returns the portion of a string for a given `offset` and a `length`. If `length < 0`, `output` is the remainder of the string.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("value", types.S),
- types.Named("offset", types.N).Description("offset, must be positive"),
- types.Named("length", types.N).Description("length of the substring starting from `offset`"),
- ),
- types.Named("output", types.S).Description("substring of `value` from `offset`, of length `length`"),
- ),
- Categories: stringsCat,
-}
+var IndexOfN = v1.IndexOfN
-var Contains = &Builtin{
- Name: "contains",
- Description: "Returns `true` if the search string is included in the base string",
- Decl: types.NewFunction(
- types.Args(
- types.Named("haystack", types.S).Description("string to search in"),
- types.Named("needle", types.S).Description("substring to look for"),
- ),
- types.Named("result", types.B).Description("result of the containment check"),
- ),
- Categories: stringsCat,
-}
+var Substring = v1.Substring
-var StringCount = &Builtin{
- Name: "strings.count",
- Description: "Returns the number of non-overlapping instances of a substring in a string.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("search", types.S).Description("string to search in"),
- types.Named("substring", types.S).Description("substring to look for"),
- ),
- types.Named("output", types.N).Description("count of occurrences, `0` if not found"),
- ),
- Categories: stringsCat,
-}
+var Contains = v1.Contains
-var StartsWith = &Builtin{
- Name: "startswith",
- Description: "Returns true if the search string begins with the base string.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("search", types.S).Description("search string"),
- types.Named("base", types.S).Description("base string"),
- ),
- types.Named("result", types.B).Description("result of the prefix check"),
- ),
- Categories: stringsCat,
-}
+var StringCount = v1.StringCount
-var EndsWith = &Builtin{
- Name: "endswith",
- Description: "Returns true if the search string ends with the base string.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("search", types.S).Description("search string"),
- types.Named("base", types.S).Description("base string"),
- ),
- types.Named("result", types.B).Description("result of the suffix check"),
- ),
- Categories: stringsCat,
-}
+var StartsWith = v1.StartsWith
-var Lower = &Builtin{
- Name: "lower",
- Description: "Returns the input string but with all characters in lower-case.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S).Description("string that is converted to lower-case"),
- ),
- types.Named("y", types.S).Description("lower-case of x"),
- ),
- Categories: stringsCat,
-}
+var EndsWith = v1.EndsWith
-var Upper = &Builtin{
- Name: "upper",
- Description: "Returns the input string but with all characters in upper-case.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S).Description("string that is converted to upper-case"),
- ),
- types.Named("y", types.S).Description("upper-case of x"),
- ),
- Categories: stringsCat,
-}
+var Lower = v1.Lower
-var Split = &Builtin{
- Name: "split",
- Description: "Split returns an array containing elements of the input string split on a delimiter.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S).Description("string that is split"),
- types.Named("delimiter", types.S).Description("delimiter used for splitting"),
- ),
- types.Named("ys", types.NewArray(nil, types.S)).Description("split parts"),
- ),
- Categories: stringsCat,
-}
+var Upper = v1.Upper
-var Replace = &Builtin{
- Name: "replace",
- Description: "Replace replaces all instances of a sub-string.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S).Description("string being processed"),
- types.Named("old", types.S).Description("substring to replace"),
- types.Named("new", types.S).Description("string to replace `old` with"),
- ),
- types.Named("y", types.S).Description("string with replaced substrings"),
- ),
- Categories: stringsCat,
-}
+var Split = v1.Split
-var ReplaceN = &Builtin{
- Name: "strings.replace_n",
- Description: `Replaces a string from a list of old, new string pairs.
-Replacements are performed in the order they appear in the target string, without overlapping matches.
-The old string comparisons are done in argument order.`,
- Decl: types.NewFunction(
- types.Args(
- types.Named("patterns", types.NewObject(
- nil,
- types.NewDynamicProperty(
- types.S,
- types.S)),
- ).Description("replacement pairs"),
- types.Named("value", types.S).Description("string to replace substring matches in"),
- ),
- types.Named("output", types.S),
- ),
-}
+var Replace = v1.Replace
-var RegexReplace = &Builtin{
- Name: "regex.replace",
- Description: `Find and replaces the text using the regular expression pattern.`,
- Decl: types.NewFunction(
- types.Args(
- types.Named("s", types.S).Description("string being processed"),
- types.Named("pattern", types.S).Description("regex pattern to be applied"),
- types.Named("value", types.S).Description("regex value"),
- ),
- types.Named("output", types.S),
- ),
-}
+var ReplaceN = v1.ReplaceN
-var Trim = &Builtin{
- Name: "trim",
- Description: "Returns `value` with all leading or trailing instances of the `cutset` characters removed.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("value", types.S).Description("string to trim"),
- types.Named("cutset", types.S).Description("string of characters that are cut off"),
- ),
- types.Named("output", types.S).Description("string trimmed of `cutset` characters"),
- ),
- Categories: stringsCat,
-}
+var RegexReplace = v1.RegexReplace
-var TrimLeft = &Builtin{
- Name: "trim_left",
- Description: "Returns `value` with all leading instances of the `cutset` characters removed.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("value", types.S).Description("string to trim"),
- types.Named("cutset", types.S).Description("string of characters that are cut off on the left"),
- ),
- types.Named("output", types.S).Description("string left-trimmed of `cutset` characters"),
- ),
- Categories: stringsCat,
-}
+var Trim = v1.Trim
-var TrimPrefix = &Builtin{
- Name: "trim_prefix",
- Description: "Returns `value` without the prefix. If `value` doesn't start with `prefix`, it is returned unchanged.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("value", types.S).Description("string to trim"),
- types.Named("prefix", types.S).Description("prefix to cut off"),
- ),
- types.Named("output", types.S).Description("string with `prefix` cut off"),
- ),
- Categories: stringsCat,
-}
+var TrimLeft = v1.TrimLeft
-var TrimRight = &Builtin{
- Name: "trim_right",
- Description: "Returns `value` with all trailing instances of the `cutset` characters removed.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("value", types.S).Description("string to trim"),
- types.Named("cutset", types.S).Description("string of characters that are cut off on the right"),
- ),
- types.Named("output", types.S).Description("string right-trimmed of `cutset` characters"),
- ),
- Categories: stringsCat,
-}
+var TrimPrefix = v1.TrimPrefix
-var TrimSuffix = &Builtin{
- Name: "trim_suffix",
- Description: "Returns `value` without the suffix. If `value` doesn't end with `suffix`, it is returned unchanged.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("value", types.S).Description("string to trim"),
- types.Named("suffix", types.S).Description("suffix to cut off"),
- ),
- types.Named("output", types.S).Description("string with `suffix` cut off"),
- ),
- Categories: stringsCat,
-}
+var TrimRight = v1.TrimRight
-var TrimSpace = &Builtin{
- Name: "trim_space",
- Description: "Return the given string with all leading and trailing white space removed.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("value", types.S).Description("string to trim"),
- ),
- types.Named("output", types.S).Description("string leading and trailing white space cut off"),
- ),
- Categories: stringsCat,
-}
+var TrimSuffix = v1.TrimSuffix
-var Sprintf = &Builtin{
- Name: "sprintf",
- Description: "Returns the given string, formatted.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("format", types.S).Description("string with formatting verbs"),
- types.Named("values", types.NewArray(nil, types.A)).Description("arguments to format into formatting verbs"),
- ),
- types.Named("output", types.S).Description("`format` formatted by the values in `values`"),
- ),
- Categories: stringsCat,
-}
+var TrimSpace = v1.TrimSpace
-var StringReverse = &Builtin{
- Name: "strings.reverse",
- Description: "Reverses a given string.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S),
- ),
- types.Named("y", types.S),
- ),
- Categories: stringsCat,
-}
+var Sprintf = v1.Sprintf
-var RenderTemplate = &Builtin{
- Name: "strings.render_template",
- Description: `Renders a templated string with given template variables injected. For a given templated string and key/value mapping, values will be injected into the template where they are referenced by key.
- For examples of templating syntax, see https://pkg.go.dev/text/template`,
- Decl: types.NewFunction(
- types.Args(
- types.Named("value", types.S).Description("a templated string"),
- types.Named("vars", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("a mapping of template variable keys to values"),
- ),
- types.Named("result", types.S).Description("rendered template with template variables injected"),
- ),
- Categories: stringsCat,
-}
+var StringReverse = v1.StringReverse
+
+var RenderTemplate = v1.RenderTemplate
/**
* Numbers
@@ -1354,82 +236,19 @@ var RenderTemplate = &Builtin{
// RandIntn returns a random number 0 - n
// Marked non-deterministic because it relies on RNG internally.
-var RandIntn = &Builtin{
- Name: "rand.intn",
- Description: "Returns a random integer between `0` and `n` (`n` exclusive). If `n` is `0`, then `y` is always `0`. For any given argument pair (`str`, `n`), the output will be consistent throughout a query evaluation.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("str", types.S),
- types.Named("n", types.N),
- ),
- types.Named("y", types.N).Description("random integer in the range `[0, abs(n))`"),
- ),
- Categories: number,
- Nondeterministic: true,
-}
+var RandIntn = v1.RandIntn
-var NumbersRange = &Builtin{
- Name: "numbers.range",
- Description: "Returns an array of numbers in the given (inclusive) range. If `a==b`, then `range == [a]`; if `a > b`, then `range` is in descending order.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("a", types.N),
- types.Named("b", types.N),
- ),
- types.Named("range", types.NewArray(nil, types.N)).Description("the range between `a` and `b`"),
- ),
-}
+var NumbersRange = v1.NumbersRange
-var NumbersRangeStep = &Builtin{
- Name: "numbers.range_step",
- Description: `Returns an array of numbers in the given (inclusive) range incremented by a positive step.
- If "a==b", then "range == [a]"; if "a > b", then "range" is in descending order.
- If the provided "step" is less then 1, an error will be thrown.
- If "b" is not in the range of the provided "step", "b" won't be included in the result.
- `,
- Decl: types.NewFunction(
- types.Args(
- types.Named("a", types.N),
- types.Named("b", types.N),
- types.Named("step", types.N),
- ),
- types.Named("range", types.NewArray(nil, types.N)).Description("the range between `a` and `b` in `step` increments"),
- ),
-}
+var NumbersRangeStep = v1.NumbersRangeStep
/**
* Units
*/
-var UnitsParse = &Builtin{
- Name: "units.parse",
- Description: `Converts strings like "10G", "5K", "4M", "1500m" and the like into a number.
-This number can be a non-integer, such as 1.5, 0.22, etc. Supports standard metric decimal and
-binary SI units (e.g., K, Ki, M, Mi, G, Gi etc.) m, K, M, G, T, P, and E are treated as decimal
-units and Ki, Mi, Gi, Ti, Pi, and Ei are treated as binary units.
-
-Note that 'm' and 'M' are case-sensitive, to allow distinguishing between "milli" and "mega" units respectively. Other units are case-insensitive.`,
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S).Description("the unit to parse"),
- ),
- types.Named("y", types.N).Description("the parsed number"),
- ),
-}
+var UnitsParse = v1.UnitsParse
-var UnitsParseBytes = &Builtin{
- Name: "units.parse_bytes",
- Description: `Converts strings like "10GB", "5K", "4mb" into an integer number of bytes.
-Supports standard byte units (e.g., KB, KiB, etc.) KB, MB, GB, and TB are treated as decimal
-units and KiB, MiB, GiB, and TiB are treated as binary units. The bytes symbol (b/B) in the
-unit is optional and omitting it wil give the same result (e.g. Mi and MiB).`,
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S).Description("the byte unit to parse"),
- ),
- types.Named("y", types.N).Description("the parsed number"),
- ),
-}
+var UnitsParseBytes = v1.UnitsParseBytes
//
/**
@@ -1438,1372 +257,241 @@ unit is optional and omitting it wil give the same result (e.g. Mi and MiB).`,
// UUIDRFC4122 returns a version 4 UUID string.
// Marked non-deterministic because it relies on RNG internally.
-var UUIDRFC4122 = &Builtin{
- Name: "uuid.rfc4122",
- Description: "Returns a new UUIDv4.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("k", types.S),
- ),
- types.Named("output", types.S).Description("a version 4 UUID; for any given `k`, the output will be consistent throughout a query evaluation"),
- ),
- Nondeterministic: true,
-}
+var UUIDRFC4122 = v1.UUIDRFC4122
-var UUIDParse = &Builtin{
- Name: "uuid.parse",
- Description: "Parses the string value as an UUID and returns an object with the well-defined fields of the UUID if valid.",
- Categories: nil,
- Decl: types.NewFunction(
- types.Args(
- types.Named("uuid", types.S),
- ),
- types.Named("result", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("Properties of UUID if valid (version, variant, etc). Undefined otherwise."),
- ),
- Relation: false,
-}
+var UUIDParse = v1.UUIDParse
/**
* JSON
*/
-var objectCat = category("object")
-
-var JSONFilter = &Builtin{
- Name: "json.filter",
- Description: "Filters the object. " +
- "For example: `json.filter({\"a\": {\"b\": \"x\", \"c\": \"y\"}}, [\"a/b\"])` will result in `{\"a\": {\"b\": \"x\"}}`). " +
- "Paths are not filtered in-order and are deduplicated before being evaluated.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("object", types.NewObject(
- nil,
- types.NewDynamicProperty(types.A, types.A),
- )),
- types.Named("paths", types.NewAny(
- types.NewArray(
- nil,
- types.NewAny(
- types.S,
- types.NewArray(
- nil,
- types.A,
- ),
- ),
- ),
- types.NewSet(
- types.NewAny(
- types.S,
- types.NewArray(
- nil,
- types.A,
- ),
- ),
- ),
- )).Description("JSON string paths"),
- ),
- types.Named("filtered", types.A).Description("remaining data from `object` with only keys specified in `paths`"),
- ),
- Categories: objectCat,
-}
+var JSONFilter = v1.JSONFilter
-var JSONRemove = &Builtin{
- Name: "json.remove",
- Description: "Removes paths from an object. " +
- "For example: `json.remove({\"a\": {\"b\": \"x\", \"c\": \"y\"}}, [\"a/b\"])` will result in `{\"a\": {\"c\": \"y\"}}`. " +
- "Paths are not removed in-order and are deduplicated before being evaluated.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("object", types.NewObject(
- nil,
- types.NewDynamicProperty(types.A, types.A),
- )),
- types.Named("paths", types.NewAny(
- types.NewArray(
- nil,
- types.NewAny(
- types.S,
- types.NewArray(
- nil,
- types.A,
- ),
- ),
- ),
- types.NewSet(
- types.NewAny(
- types.S,
- types.NewArray(
- nil,
- types.A,
- ),
- ),
- ),
- )).Description("JSON string paths"),
- ),
- types.Named("output", types.A).Description("result of removing all keys specified in `paths`"),
- ),
- Categories: objectCat,
-}
+var JSONRemove = v1.JSONRemove
-var JSONPatch = &Builtin{
- Name: "json.patch",
- Description: "Patches an object according to RFC6902. " +
- "For example: `json.patch({\"a\": {\"foo\": 1}}, [{\"op\": \"add\", \"path\": \"/a/bar\", \"value\": 2}])` results in `{\"a\": {\"foo\": 1, \"bar\": 2}`. " +
- "The patches are applied atomically: if any of them fails, the result will be undefined. " +
- "Additionally works on sets, where a value contained in the set is considered to be its path.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("object", types.A), // TODO(sr): types.A?
- types.Named("patches", types.NewArray(
- nil,
- types.NewObject(
- []*types.StaticProperty{
- {Key: "op", Value: types.S},
- {Key: "path", Value: types.A},
- },
- types.NewDynamicProperty(types.A, types.A),
- ),
- )),
- ),
- types.Named("output", types.A).Description("result obtained after consecutively applying all patch operations in `patches`"),
- ),
- Categories: objectCat,
-}
+var JSONPatch = v1.JSONPatch
-var ObjectSubset = &Builtin{
- Name: "object.subset",
- Description: "Determines if an object `sub` is a subset of another object `super`." +
- "Object `sub` is a subset of object `super` if and only if every key in `sub` is also in `super`, " +
- "**and** for all keys which `sub` and `super` share, they have the same value. " +
- "This function works with objects, sets, arrays and a set of array and set." +
- "If both arguments are objects, then the operation is recursive, e.g. " +
- "`{\"c\": {\"x\": {10, 15, 20}}` is a subset of `{\"a\": \"b\", \"c\": {\"x\": {10, 15, 20, 25}, \"y\": \"z\"}`. " +
- "If both arguments are sets, then this function checks if every element of `sub` is a member of `super`, " +
- "but does not attempt to recurse. If both arguments are arrays, " +
- "then this function checks if `sub` appears contiguously in order within `super`, " +
- "and also does not attempt to recurse. If `super` is array and `sub` is set, " +
- "then this function checks if `super` contains every element of `sub` with no consideration of ordering, " +
- "and also does not attempt to recurse.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("super", types.NewAny(types.NewObject(
- nil,
- types.NewDynamicProperty(types.A, types.A),
- ),
- types.NewSet(types.A),
- types.NewArray(nil, types.A),
- )).Description("object to test if sub is a subset of"),
- types.Named("sub", types.NewAny(types.NewObject(
- nil,
- types.NewDynamicProperty(types.A, types.A),
- ),
- types.NewSet(types.A),
- types.NewArray(nil, types.A),
- )).Description("object to test if super is a superset of"),
- ),
- types.Named("result", types.A).Description("`true` if `sub` is a subset of `super`"),
- ),
-}
+var ObjectSubset = v1.ObjectSubset
-var ObjectUnion = &Builtin{
- Name: "object.union",
- Description: "Creates a new object of the asymmetric union of two objects. " +
- "For example: `object.union({\"a\": 1, \"b\": 2, \"c\": {\"d\": 3}}, {\"a\": 7, \"c\": {\"d\": 4, \"e\": 5}})` will result in `{\"a\": 7, \"b\": 2, \"c\": {\"d\": 4, \"e\": 5}}`.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("a", types.NewObject(
- nil,
- types.NewDynamicProperty(types.A, types.A),
- )),
- types.Named("b", types.NewObject(
- nil,
- types.NewDynamicProperty(types.A, types.A),
- )),
- ),
- types.Named("output", types.A).Description("a new object which is the result of an asymmetric recursive union of two objects where conflicts are resolved by choosing the key from the right-hand object `b`"),
- ), // TODO(sr): types.A? ^^^^^^^ (also below)
-}
+var ObjectUnion = v1.ObjectUnion
-var ObjectUnionN = &Builtin{
- Name: "object.union_n",
- Description: "Creates a new object that is the asymmetric union of all objects merged from left to right. " +
- "For example: `object.union_n([{\"a\": 1}, {\"b\": 2}, {\"a\": 3}])` will result in `{\"b\": 2, \"a\": 3}`.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("objects", types.NewArray(
- nil,
- types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
- )),
- ),
- types.Named("output", types.A).Description("asymmetric recursive union of all objects in `objects`, merged from left to right, where conflicts are resolved by choosing the key from the right-hand object"),
- ),
-}
+var ObjectUnionN = v1.ObjectUnionN
-var ObjectRemove = &Builtin{
- Name: "object.remove",
- Description: "Removes specified keys from an object.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("object", types.NewObject(
- nil,
- types.NewDynamicProperty(types.A, types.A),
- )).Description("object to remove keys from"),
- types.Named("keys", types.NewAny(
- types.NewArray(nil, types.A),
- types.NewSet(types.A),
- types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
- )).Description("keys to remove from x"),
- ),
- types.Named("output", types.A).Description("result of removing the specified `keys` from `object`"),
- ),
-}
+var ObjectRemove = v1.ObjectRemove
-var ObjectFilter = &Builtin{
- Name: "object.filter",
- Description: "Filters the object by keeping only specified keys. " +
- "For example: `object.filter({\"a\": {\"b\": \"x\", \"c\": \"y\"}, \"d\": \"z\"}, [\"a\"])` will result in `{\"a\": {\"b\": \"x\", \"c\": \"y\"}}`).",
- Decl: types.NewFunction(
- types.Args(
- types.Named("object", types.NewObject(
- nil,
- types.NewDynamicProperty(types.A, types.A),
- )).Description("object to filter keys"),
- types.Named("keys", types.NewAny(
- types.NewArray(nil, types.A),
- types.NewSet(types.A),
- types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
- )),
- ),
- types.Named("filtered", types.A).Description("remaining data from `object` with only keys specified in `keys`"),
- ),
-}
+var ObjectFilter = v1.ObjectFilter
-var ObjectGet = &Builtin{
- Name: "object.get",
- Description: "Returns value of an object's key if present, otherwise a default. " +
- "If the supplied `key` is an `array`, then `object.get` will search through a nested object or array using each key in turn. " +
- "For example: `object.get({\"a\": [{ \"b\": true }]}, [\"a\", 0, \"b\"], false)` results in `true`.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("object", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("object to get `key` from"),
- types.Named("key", types.A).Description("key to lookup in `object`"),
- types.Named("default", types.A).Description("default to use when lookup fails"),
- ),
- types.Named("value", types.A).Description("`object[key]` if present, otherwise `default`"),
- ),
-}
+var ObjectGet = v1.ObjectGet
-var ObjectKeys = &Builtin{
- Name: "object.keys",
- Description: "Returns a set of an object's keys. " +
- "For example: `object.keys({\"a\": 1, \"b\": true, \"c\": \"d\")` results in `{\"a\", \"b\", \"c\"}`.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("object", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("object to get keys from"),
- ),
- types.Named("value", types.NewSet(types.A)).Description("set of `object`'s keys"),
- ),
-}
+var ObjectKeys = v1.ObjectKeys
/*
* Encoding
*/
-var encoding = category("encoding")
-
-var JSONMarshal = &Builtin{
- Name: "json.marshal",
- Description: "Serializes the input term to JSON.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.A).Description("the term to serialize"),
- ),
- types.Named("y", types.S).Description("the JSON string representation of `x`"),
- ),
- Categories: encoding,
-}
-var JSONMarshalWithOptions = &Builtin{
- Name: "json.marshal_with_options",
- Description: "Serializes the input term JSON, with additional formatting options via the `opts` parameter. " +
- "`opts` accepts keys `pretty` (enable multi-line/formatted JSON), `prefix` (string to prefix lines with, default empty string) and `indent` (string to indent with, default `\\t`).",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.A).Description("the term to serialize"),
- types.Named("opts", types.NewObject(
- []*types.StaticProperty{
- types.NewStaticProperty("pretty", types.B),
- types.NewStaticProperty("indent", types.S),
- types.NewStaticProperty("prefix", types.S),
- },
- types.NewDynamicProperty(types.S, types.A),
- )).Description("encoding options"),
- ),
- types.Named("y", types.S).Description("the JSON string representation of `x`, with configured prefix/indent string(s) as appropriate"),
- ),
- Categories: encoding,
-}
+var JSONMarshal = v1.JSONMarshal
-var JSONUnmarshal = &Builtin{
- Name: "json.unmarshal",
- Description: "Deserializes the input string.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S).Description("a JSON string"),
- ),
- types.Named("y", types.A).Description("the term deserialized from `x`"),
- ),
- Categories: encoding,
-}
+var JSONMarshalWithOptions = v1.JSONMarshalWithOptions
-var JSONIsValid = &Builtin{
- Name: "json.is_valid",
- Description: "Verifies the input string is a valid JSON document.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S).Description("a JSON string"),
- ),
- types.Named("result", types.B).Description("`true` if `x` is valid JSON, `false` otherwise"),
- ),
- Categories: encoding,
-}
+var JSONUnmarshal = v1.JSONUnmarshal
-var Base64Encode = &Builtin{
- Name: "base64.encode",
- Description: "Serializes the input string into base64 encoding.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S),
- ),
- types.Named("y", types.S).Description("base64 serialization of `x`"),
- ),
- Categories: encoding,
-}
+var JSONIsValid = v1.JSONIsValid
-var Base64Decode = &Builtin{
- Name: "base64.decode",
- Description: "Deserializes the base64 encoded input string.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S),
- ),
- types.Named("y", types.S).Description("base64 deserialization of `x`"),
- ),
- Categories: encoding,
-}
+var Base64Encode = v1.Base64Encode
-var Base64IsValid = &Builtin{
- Name: "base64.is_valid",
- Description: "Verifies the input string is base64 encoded.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S),
- ),
- types.Named("result", types.B).Description("`true` if `x` is valid base64 encoded value, `false` otherwise"),
- ),
- Categories: encoding,
-}
+var Base64Decode = v1.Base64Decode
-var Base64UrlEncode = &Builtin{
- Name: "base64url.encode",
- Description: "Serializes the input string into base64url encoding.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S),
- ),
- types.Named("y", types.S).Description("base64url serialization of `x`"),
- ),
- Categories: encoding,
-}
+var Base64IsValid = v1.Base64IsValid
-var Base64UrlEncodeNoPad = &Builtin{
- Name: "base64url.encode_no_pad",
- Description: "Serializes the input string into base64url encoding without padding.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S),
- ),
- types.Named("y", types.S).Description("base64url serialization of `x`"),
- ),
- Categories: encoding,
-}
+var Base64UrlEncode = v1.Base64UrlEncode
-var Base64UrlDecode = &Builtin{
- Name: "base64url.decode",
- Description: "Deserializes the base64url encoded input string.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S),
- ),
- types.Named("y", types.S).Description("base64url deserialization of `x`"),
- ),
- Categories: encoding,
-}
+var Base64UrlEncodeNoPad = v1.Base64UrlEncodeNoPad
-var URLQueryDecode = &Builtin{
- Name: "urlquery.decode",
- Description: "Decodes a URL-encoded input string.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S),
- ),
- types.Named("y", types.S).Description("URL-encoding deserialization of `x`"),
- ),
- Categories: encoding,
-}
+var Base64UrlDecode = v1.Base64UrlDecode
-var URLQueryEncode = &Builtin{
- Name: "urlquery.encode",
- Description: "Encodes the input string into a URL-encoded string.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S),
- ),
- types.Named("y", types.S).Description("URL-encoding serialization of `x`"),
- ),
- Categories: encoding,
-}
+var URLQueryDecode = v1.URLQueryDecode
-var URLQueryEncodeObject = &Builtin{
- Name: "urlquery.encode_object",
- Description: "Encodes the given object into a URL encoded query string.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("object", types.NewObject(
- nil,
- types.NewDynamicProperty(
- types.S,
- types.NewAny(
- types.S,
- types.NewArray(nil, types.S),
- types.NewSet(types.S)))))),
- types.Named("y", types.S).Description("the URL-encoded serialization of `object`"),
- ),
- Categories: encoding,
-}
+var URLQueryEncode = v1.URLQueryEncode
-var URLQueryDecodeObject = &Builtin{
- Name: "urlquery.decode_object",
- Description: "Decodes the given URL query string into an object.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S).Description("the query string"),
- ),
- types.Named("object", types.NewObject(nil, types.NewDynamicProperty(
- types.S,
- types.NewArray(nil, types.S)))).Description("the resulting object"),
- ),
- Categories: encoding,
-}
+var URLQueryEncodeObject = v1.URLQueryEncodeObject
-var YAMLMarshal = &Builtin{
- Name: "yaml.marshal",
- Description: "Serializes the input term to YAML.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.A).Description("the term to serialize"),
- ),
- types.Named("y", types.S).Description("the YAML string representation of `x`"),
- ),
- Categories: encoding,
-}
+var URLQueryDecodeObject = v1.URLQueryDecodeObject
-var YAMLUnmarshal = &Builtin{
- Name: "yaml.unmarshal",
- Description: "Deserializes the input string.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S).Description("a YAML string"),
- ),
- types.Named("y", types.A).Description("the term deserialized from `x`"),
- ),
- Categories: encoding,
-}
+var YAMLMarshal = v1.YAMLMarshal
+
+var YAMLUnmarshal = v1.YAMLUnmarshal
// YAMLIsValid verifies the input string is a valid YAML document.
-var YAMLIsValid = &Builtin{
- Name: "yaml.is_valid",
- Description: "Verifies the input string is a valid YAML document.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S).Description("a YAML string"),
- ),
- types.Named("result", types.B).Description("`true` if `x` is valid YAML, `false` otherwise"),
- ),
- Categories: encoding,
-}
+var YAMLIsValid = v1.YAMLIsValid
-var HexEncode = &Builtin{
- Name: "hex.encode",
- Description: "Serializes the input string using hex-encoding.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S),
- ),
- types.Named("y", types.S).Description("serialization of `x` using hex-encoding"),
- ),
- Categories: encoding,
-}
+var HexEncode = v1.HexEncode
-var HexDecode = &Builtin{
- Name: "hex.decode",
- Description: "Deserializes the hex-encoded input string.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S).Description("a hex-encoded string"),
- ),
- types.Named("y", types.S).Description("deserialized from `x`"),
- ),
- Categories: encoding,
-}
+var HexDecode = v1.HexDecode
/**
* Tokens
*/
-var tokensCat = category("tokens")
-
-var JWTDecode = &Builtin{
- Name: "io.jwt.decode",
- Description: "Decodes a JSON Web Token and outputs it as an object.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("jwt", types.S).Description("JWT token to decode"),
- ),
- types.Named("output", types.NewArray([]types.Type{
- types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
- types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
- types.S,
- }, nil)).Description("`[header, payload, sig]`, where `header` and `payload` are objects; `sig` is the hexadecimal representation of the signature on the token."),
- ),
- Categories: tokensCat,
-}
-var JWTVerifyRS256 = &Builtin{
- Name: "io.jwt.verify_rs256",
- Description: "Verifies if a RS256 JWT signature is valid.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
- types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"),
- ),
- types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
- ),
- Categories: tokensCat,
-}
+var JWTDecode = v1.JWTDecode
-var JWTVerifyRS384 = &Builtin{
- Name: "io.jwt.verify_rs384",
- Description: "Verifies if a RS384 JWT signature is valid.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
- types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"),
- ),
- types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
- ),
- Categories: tokensCat,
-}
+var JWTVerifyRS256 = v1.JWTVerifyRS256
-var JWTVerifyRS512 = &Builtin{
- Name: "io.jwt.verify_rs512",
- Description: "Verifies if a RS512 JWT signature is valid.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
- types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"),
- ),
- types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
- ),
- Categories: tokensCat,
-}
+var JWTVerifyRS384 = v1.JWTVerifyRS384
-var JWTVerifyPS256 = &Builtin{
- Name: "io.jwt.verify_ps256",
- Description: "Verifies if a PS256 JWT signature is valid.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
- types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"),
- ),
- types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
- ),
- Categories: tokensCat,
-}
+var JWTVerifyRS512 = v1.JWTVerifyRS512
-var JWTVerifyPS384 = &Builtin{
- Name: "io.jwt.verify_ps384",
- Description: "Verifies if a PS384 JWT signature is valid.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
- types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"),
- ),
- types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
- ),
- Categories: tokensCat,
-}
+var JWTVerifyPS256 = v1.JWTVerifyPS256
-var JWTVerifyPS512 = &Builtin{
- Name: "io.jwt.verify_ps512",
- Description: "Verifies if a PS512 JWT signature is valid.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
- types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"),
- ),
- types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
- ),
- Categories: tokensCat,
-}
+var JWTVerifyPS384 = v1.JWTVerifyPS384
-var JWTVerifyES256 = &Builtin{
- Name: "io.jwt.verify_es256",
- Description: "Verifies if a ES256 JWT signature is valid.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
- types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"),
- ),
- types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
- ),
- Categories: tokensCat,
-}
+var JWTVerifyPS512 = v1.JWTVerifyPS512
-var JWTVerifyES384 = &Builtin{
- Name: "io.jwt.verify_es384",
- Description: "Verifies if a ES384 JWT signature is valid.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
- types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"),
- ),
- types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
- ),
- Categories: tokensCat,
-}
+var JWTVerifyES256 = v1.JWTVerifyES256
-var JWTVerifyES512 = &Builtin{
- Name: "io.jwt.verify_es512",
- Description: "Verifies if a ES512 JWT signature is valid.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
- types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"),
- ),
- types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
- ),
- Categories: tokensCat,
-}
+var JWTVerifyES384 = v1.JWTVerifyES384
-var JWTVerifyHS256 = &Builtin{
- Name: "io.jwt.verify_hs256",
- Description: "Verifies if a HS256 (secret) JWT signature is valid.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
- types.Named("secret", types.S).Description("plain text secret used to verify the signature"),
- ),
- types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
- ),
- Categories: tokensCat,
-}
+var JWTVerifyES512 = v1.JWTVerifyES512
-var JWTVerifyHS384 = &Builtin{
- Name: "io.jwt.verify_hs384",
- Description: "Verifies if a HS384 (secret) JWT signature is valid.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
- types.Named("secret", types.S).Description("plain text secret used to verify the signature"),
- ),
- types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
- ),
- Categories: tokensCat,
-}
+var JWTVerifyHS256 = v1.JWTVerifyHS256
-var JWTVerifyHS512 = &Builtin{
- Name: "io.jwt.verify_hs512",
- Description: "Verifies if a HS512 (secret) JWT signature is valid.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
- types.Named("secret", types.S).Description("plain text secret used to verify the signature"),
- ),
- types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
- ),
- Categories: tokensCat,
-}
+var JWTVerifyHS384 = v1.JWTVerifyHS384
-// Marked non-deterministic because it relies on time internally.
-var JWTDecodeVerify = &Builtin{
- Name: "io.jwt.decode_verify",
- Description: `Verifies a JWT signature under parameterized constraints and decodes the claims if it is valid.
-Supports the following algorithms: HS256, HS384, HS512, RS256, RS384, RS512, ES256, ES384, ES512, PS256, PS384 and PS512.`,
- Decl: types.NewFunction(
- types.Args(
- types.Named("jwt", types.S).Description("JWT token whose signature is to be verified and whose claims are to be checked"),
- types.Named("constraints", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("claim verification constraints"),
- ),
- types.Named("output", types.NewArray([]types.Type{
- types.B,
- types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
- types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
- }, nil)).Description("`[valid, header, payload]`: if the input token is verified and meets the requirements of `constraints` then `valid` is `true`; `header` and `payload` are objects containing the JOSE header and the JWT claim set; otherwise, `valid` is `false`, `header` and `payload` are `{}`"),
- ),
- Categories: tokensCat,
- Nondeterministic: true,
-}
+var JWTVerifyHS512 = v1.JWTVerifyHS512
-var tokenSign = category("tokensign")
+// Marked non-deterministic because it relies on time internally.
+var JWTDecodeVerify = v1.JWTDecodeVerify
// Marked non-deterministic because it relies on RNG internally.
-var JWTEncodeSignRaw = &Builtin{
- Name: "io.jwt.encode_sign_raw",
- Description: "Encodes and optionally signs a JSON Web Token.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("headers", types.S).Description("JWS Protected Header"),
- types.Named("payload", types.S).Description("JWS Payload"),
- types.Named("key", types.S).Description("JSON Web Key (RFC7517)"),
- ),
- types.Named("output", types.S).Description("signed JWT"),
- ),
- Categories: tokenSign,
- Nondeterministic: true,
-}
+var JWTEncodeSignRaw = v1.JWTEncodeSignRaw
// Marked non-deterministic because it relies on RNG internally.
-var JWTEncodeSign = &Builtin{
- Name: "io.jwt.encode_sign",
- Description: "Encodes and optionally signs a JSON Web Token. Inputs are taken as objects, not encoded strings (see `io.jwt.encode_sign_raw`).",
- Decl: types.NewFunction(
- types.Args(
- types.Named("headers", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWS Protected Header"),
- types.Named("payload", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWS Payload"),
- types.Named("key", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JSON Web Key (RFC7517)"),
- ),
- types.Named("output", types.S).Description("signed JWT"),
- ),
- Categories: tokenSign,
- Nondeterministic: true,
-}
+var JWTEncodeSign = v1.JWTEncodeSign
/**
* Time
*/
// Marked non-deterministic because it relies on time directly.
-var NowNanos = &Builtin{
- Name: "time.now_ns",
- Description: "Returns the current time since epoch in nanoseconds.",
- Decl: types.NewFunction(
- nil,
- types.Named("now", types.N).Description("nanoseconds since epoch"),
- ),
- Nondeterministic: true,
-}
+var NowNanos = v1.NowNanos
-var ParseNanos = &Builtin{
- Name: "time.parse_ns",
- Description: "Returns the time in nanoseconds parsed from the string in the given format. `undefined` if the result would be outside the valid time range that can fit within an `int64`.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("layout", types.S).Description("format used for parsing, see the [Go `time` package documentation](https://golang.org/pkg/time/#Parse) for more details"),
- types.Named("value", types.S).Description("input to parse according to `layout`"),
- ),
- types.Named("ns", types.N).Description("`value` in nanoseconds since epoch"),
- ),
-}
+var ParseNanos = v1.ParseNanos
-var ParseRFC3339Nanos = &Builtin{
- Name: "time.parse_rfc3339_ns",
- Description: "Returns the time in nanoseconds parsed from the string in RFC3339 format. `undefined` if the result would be outside the valid time range that can fit within an `int64`.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("value", types.S),
- ),
- types.Named("ns", types.N).Description("`value` in nanoseconds since epoch"),
- ),
-}
+var ParseRFC3339Nanos = v1.ParseRFC3339Nanos
-var ParseDurationNanos = &Builtin{
- Name: "time.parse_duration_ns",
- Description: "Returns the duration in nanoseconds represented by a string.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("duration", types.S).Description("a duration like \"3m\"; see the [Go `time` package documentation](https://golang.org/pkg/time/#ParseDuration) for more details"),
- ),
- types.Named("ns", types.N).Description("the `duration` in nanoseconds"),
- ),
-}
+var ParseDurationNanos = v1.ParseDurationNanos
-var Format = &Builtin{
- Name: "time.format",
- Description: "Returns the formatted timestamp for the nanoseconds since epoch.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.NewAny(
- types.N,
- types.NewArray([]types.Type{types.N, types.S}, nil),
- types.NewArray([]types.Type{types.N, types.S, types.S}, nil),
- )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string; or a three-element array of ns, timezone string and a layout string or golang defined formatting constant (see golang supported time formats)"),
- ),
- types.Named("formatted timestamp", types.S).Description("the formatted timestamp represented for the nanoseconds since the epoch in the supplied timezone (or UTC)"),
- ),
-}
+var Format = v1.Format
-var Date = &Builtin{
- Name: "time.date",
- Description: "Returns the `[year, month, day]` for the nanoseconds since epoch.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.NewAny(
- types.N,
- types.NewArray([]types.Type{types.N, types.S}, nil),
- )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"),
- ),
- types.Named("date", types.NewArray([]types.Type{types.N, types.N, types.N}, nil)).Description("an array of `year`, `month` (1-12), and `day` (1-31)"),
- ),
-}
+var Date = v1.Date
-var Clock = &Builtin{
- Name: "time.clock",
- Description: "Returns the `[hour, minute, second]` of the day for the nanoseconds since epoch.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.NewAny(
- types.N,
- types.NewArray([]types.Type{types.N, types.S}, nil),
- )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"),
- ),
- types.Named("output", types.NewArray([]types.Type{types.N, types.N, types.N}, nil)).
- Description("the `hour`, `minute` (0-59), and `second` (0-59) representing the time of day for the nanoseconds since epoch in the supplied timezone (or UTC)"),
- ),
-}
+var Clock = v1.Clock
-var Weekday = &Builtin{
- Name: "time.weekday",
- Description: "Returns the day of the week (Monday, Tuesday, ...) for the nanoseconds since epoch.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.NewAny(
- types.N,
- types.NewArray([]types.Type{types.N, types.S}, nil),
- )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"),
- ),
- types.Named("day", types.S).Description("the weekday represented by `ns` nanoseconds since the epoch in the supplied timezone (or UTC)"),
- ),
-}
+var Weekday = v1.Weekday
-var AddDate = &Builtin{
- Name: "time.add_date",
- Description: "Returns the nanoseconds since epoch after adding years, months and days to nanoseconds. Month & day values outside their usual ranges after the operation and will be normalized - for example, October 32 would become November 1. `undefined` if the result would be outside the valid time range that can fit within an `int64`.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("ns", types.N).Description("nanoseconds since the epoch"),
- types.Named("years", types.N),
- types.Named("months", types.N),
- types.Named("days", types.N),
- ),
- types.Named("output", types.N).Description("nanoseconds since the epoch representing the input time, with years, months and days added"),
- ),
-}
+var AddDate = v1.AddDate
-var Diff = &Builtin{
- Name: "time.diff",
- Description: "Returns the difference between two unix timestamps in nanoseconds (with optional timezone strings).",
- Decl: types.NewFunction(
- types.Args(
- types.Named("ns1", types.NewAny(
- types.N,
- types.NewArray([]types.Type{types.N, types.S}, nil),
- )),
- types.Named("ns2", types.NewAny(
- types.N,
- types.NewArray([]types.Type{types.N, types.S}, nil),
- )),
- ),
- types.Named("output", types.NewArray([]types.Type{types.N, types.N, types.N, types.N, types.N, types.N}, nil)).Description("difference between `ns1` and `ns2` (in their supplied timezones, if supplied, or UTC) as array of numbers: `[years, months, days, hours, minutes, seconds]`"),
- ),
-}
+var Diff = v1.Diff
/**
* Crypto.
*/
-var CryptoX509ParseCertificates = &Builtin{
- Name: "crypto.x509.parse_certificates",
- Description: `Returns zero or more certificates from the given encoded string containing
-DER certificate data.
-
-If the input is empty, the function will return null. The input string should be a list of one or more
-concatenated PEM blocks. The whole input of concatenated PEM blocks can optionally be Base64 encoded.`,
- Decl: types.NewFunction(
- types.Args(
- types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing one or more certificates or a PEM string of one or more certificates"),
- ),
- types.Named("output", types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)))).Description("parsed X.509 certificates represented as objects"),
- ),
-}
+var CryptoX509ParseCertificates = v1.CryptoX509ParseCertificates
-var CryptoX509ParseAndVerifyCertificates = &Builtin{
- Name: "crypto.x509.parse_and_verify_certificates",
- Description: `Returns one or more certificates from the given string containing PEM
-or base64 encoded DER certificates after verifying the supplied certificates form a complete
-certificate chain back to a trusted root.
-
-The first certificate is treated as the root and the last is treated as the leaf,
-with all others being treated as intermediates.`,
- Decl: types.NewFunction(
- types.Args(
- types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing two or more certificates where the first is a root CA, the last is a leaf certificate, and all others are intermediate CAs"),
- ),
- types.Named("output", types.NewArray([]types.Type{
- types.B,
- types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))),
- }, nil)).Description("array of `[valid, certs]`: if the input certificate chain could be verified then `valid` is `true` and `certs` is an array of X.509 certificates represented as objects; if the input certificate chain could not be verified then `valid` is `false` and `certs` is `[]`"),
- ),
-}
+var CryptoX509ParseAndVerifyCertificates = v1.CryptoX509ParseAndVerifyCertificates
-var CryptoX509ParseAndVerifyCertificatesWithOptions = &Builtin{
- Name: "crypto.x509.parse_and_verify_certificates_with_options",
- Description: `Returns one or more certificates from the given string containing PEM
-or base64 encoded DER certificates after verifying the supplied certificates form a complete
-certificate chain back to a trusted root. A config option passed as the second argument can
-be used to configure the validation options used.
-
-The first certificate is treated as the root and the last is treated as the leaf,
-with all others being treated as intermediates.`,
-
- Decl: types.NewFunction(
- types.Args(
- types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing two or more certificates where the first is a root CA, the last is a leaf certificate, and all others are intermediate CAs"),
- types.Named("options", types.NewObject(
- nil,
- types.NewDynamicProperty(types.S, types.A),
- )).Description("object containing extra configs to verify the validity of certificates. `options` object supports four fields which maps to same fields in [x509.VerifyOptions struct](https://pkg.go.dev/crypto/x509#VerifyOptions). `DNSName`, `CurrentTime`: Nanoseconds since the Unix Epoch as a number, `MaxConstraintComparisons` and `KeyUsages`. `KeyUsages` is list and can have possible values as in: `\"KeyUsageAny\"`, `\"KeyUsageServerAuth\"`, `\"KeyUsageClientAuth\"`, `\"KeyUsageCodeSigning\"`, `\"KeyUsageEmailProtection\"`, `\"KeyUsageIPSECEndSystem\"`, `\"KeyUsageIPSECTunnel\"`, `\"KeyUsageIPSECUser\"`, `\"KeyUsageTimeStamping\"`, `\"KeyUsageOCSPSigning\"`, `\"KeyUsageMicrosoftServerGatedCrypto\"`, `\"KeyUsageNetscapeServerGatedCrypto\"`, `\"KeyUsageMicrosoftCommercialCodeSigning\"`, `\"KeyUsageMicrosoftKernelCodeSigning\"` "),
- ),
- types.Named("output", types.NewArray([]types.Type{
- types.B,
- types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))),
- }, nil)).Description("array of `[valid, certs]`: if the input certificate chain could be verified then `valid` is `true` and `certs` is an array of X.509 certificates represented as objects; if the input certificate chain could not be verified then `valid` is `false` and `certs` is `[]`"),
- ),
-}
+var CryptoX509ParseAndVerifyCertificatesWithOptions = v1.CryptoX509ParseAndVerifyCertificatesWithOptions
-var CryptoX509ParseCertificateRequest = &Builtin{
- Name: "crypto.x509.parse_certificate_request",
- Description: "Returns a PKCS #10 certificate signing request from the given PEM-encoded PKCS#10 certificate signing request.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("csr", types.S).Description("base64 string containing either a PEM encoded or DER CSR or a string containing a PEM CSR"),
- ),
- types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("X.509 CSR represented as an object"),
- ),
-}
+var CryptoX509ParseCertificateRequest = v1.CryptoX509ParseCertificateRequest
-var CryptoX509ParseKeyPair = &Builtin{
- Name: "crypto.x509.parse_keypair",
- Description: "Returns a valid key pair",
- Decl: types.NewFunction(
- types.Args(
- types.Named("cert", types.S).Description("string containing PEM or base64 encoded DER certificates"),
- types.Named("pem", types.S).Description("string containing PEM or base64 encoded DER keys"),
- ),
- types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("if key pair is valid, returns the tls.certificate(https://pkg.go.dev/crypto/tls#Certificate) as an object. If the key pair is invalid, nil and an error are returned."),
- ),
-}
-var CryptoX509ParseRSAPrivateKey = &Builtin{
- Name: "crypto.x509.parse_rsa_private_key",
- Description: "Returns a JWK for signing a JWT from the given PEM-encoded RSA private key.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("pem", types.S).Description("base64 string containing a PEM encoded RSA private key"),
- ),
- types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWK as an object"),
- ),
-}
+var CryptoX509ParseKeyPair = v1.CryptoX509ParseKeyPair
+var CryptoX509ParseRSAPrivateKey = v1.CryptoX509ParseRSAPrivateKey
-var CryptoParsePrivateKeys = &Builtin{
- Name: "crypto.parse_private_keys",
- Description: `Returns zero or more private keys from the given encoded string containing DER certificate data.
-
-If the input is empty, the function will return null. The input string should be a list of one or more concatenated PEM blocks. The whole input of concatenated PEM blocks can optionally be Base64 encoded.`,
- Decl: types.NewFunction(
- types.Args(
- types.Named("keys", types.S).Description("PEM encoded data containing one or more private keys as concatenated blocks. Optionally Base64 encoded."),
- ),
- types.Named("output", types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)))).Description("parsed private keys represented as objects"),
- ),
-}
+var CryptoParsePrivateKeys = v1.CryptoParsePrivateKeys
-var CryptoMd5 = &Builtin{
- Name: "crypto.md5",
- Description: "Returns a string representing the input string hashed with the MD5 function",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S),
- ),
- types.Named("y", types.S).Description("MD5-hash of `x`"),
- ),
-}
+var CryptoMd5 = v1.CryptoMd5
-var CryptoSha1 = &Builtin{
- Name: "crypto.sha1",
- Description: "Returns a string representing the input string hashed with the SHA1 function",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S),
- ),
- types.Named("y", types.S).Description("SHA1-hash of `x`"),
- ),
-}
+var CryptoSha1 = v1.CryptoSha1
-var CryptoSha256 = &Builtin{
- Name: "crypto.sha256",
- Description: "Returns a string representing the input string hashed with the SHA256 function",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S),
- ),
- types.Named("y", types.S).Description("SHA256-hash of `x`"),
- ),
-}
+var CryptoSha256 = v1.CryptoSha256
-var CryptoHmacMd5 = &Builtin{
- Name: "crypto.hmac.md5",
- Description: "Returns a string representing the MD5 HMAC of the input message using the input key.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S).Description("input string"),
- types.Named("key", types.S).Description("key to use"),
- ),
- types.Named("y", types.S).Description("MD5-HMAC of `x`"),
- ),
-}
+var CryptoHmacMd5 = v1.CryptoHmacMd5
-var CryptoHmacSha1 = &Builtin{
- Name: "crypto.hmac.sha1",
- Description: "Returns a string representing the SHA1 HMAC of the input message using the input key.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S).Description("input string"),
- types.Named("key", types.S).Description("key to use"),
- ),
- types.Named("y", types.S).Description("SHA1-HMAC of `x`"),
- ),
-}
+var CryptoHmacSha1 = v1.CryptoHmacSha1
-var CryptoHmacSha256 = &Builtin{
- Name: "crypto.hmac.sha256",
- Description: "Returns a string representing the SHA256 HMAC of the input message using the input key.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S).Description("input string"),
- types.Named("key", types.S).Description("key to use"),
- ),
- types.Named("y", types.S).Description("SHA256-HMAC of `x`"),
- ),
-}
+var CryptoHmacSha256 = v1.CryptoHmacSha256
-var CryptoHmacSha512 = &Builtin{
- Name: "crypto.hmac.sha512",
- Description: "Returns a string representing the SHA512 HMAC of the input message using the input key.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.S).Description("input string"),
- types.Named("key", types.S).Description("key to use"),
- ),
- types.Named("y", types.S).Description("SHA512-HMAC of `x`"),
- ),
-}
+var CryptoHmacSha512 = v1.CryptoHmacSha512
-var CryptoHmacEqual = &Builtin{
- Name: "crypto.hmac.equal",
- Description: "Returns a boolean representing the result of comparing two MACs for equality without leaking timing information.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("mac1", types.S).Description("mac1 to compare"),
- types.Named("mac2", types.S).Description("mac2 to compare"),
- ),
- types.Named("result", types.B).Description("`true` if the MACs are equals, `false` otherwise"),
- ),
-}
+var CryptoHmacEqual = v1.CryptoHmacEqual
/**
* Graphs.
*/
-var graphs = category("graph")
-
-var WalkBuiltin = &Builtin{
- Name: "walk",
- Relation: true,
- Description: "Generates `[path, value]` tuples for all nested documents of `x` (recursively). Queries can use `walk` to traverse documents nested under `x`.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.A),
- ),
- types.Named("output", types.NewArray(
- []types.Type{
- types.NewArray(nil, types.A),
- types.A,
- },
- nil,
- )).Description("pairs of `path` and `value`: `path` is an array representing the pointer to `value` in `x`. If `path` is assigned a wildcard (`_`), the `walk` function will skip path creation entirely for faster evaluation."),
- ),
- Categories: graphs,
-}
-var ReachableBuiltin = &Builtin{
- Name: "graph.reachable",
- Description: "Computes the set of reachable nodes in the graph from a set of starting nodes.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("graph", types.NewObject(
- nil,
- types.NewDynamicProperty(
- types.A,
- types.NewAny(
- types.NewSet(types.A),
- types.NewArray(nil, types.A)),
- )),
- ).Description("object containing a set or array of neighboring vertices"),
- types.Named("initial", types.NewAny(types.NewSet(types.A), types.NewArray(nil, types.A))).Description("set or array of root vertices"),
- ),
- types.Named("output", types.NewSet(types.A)).Description("set of vertices reachable from the `initial` vertices in the directed `graph`"),
- ),
-}
+var WalkBuiltin = v1.WalkBuiltin
-var ReachablePathsBuiltin = &Builtin{
- Name: "graph.reachable_paths",
- Description: "Computes the set of reachable paths in the graph from a set of starting nodes.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("graph", types.NewObject(
- nil,
- types.NewDynamicProperty(
- types.A,
- types.NewAny(
- types.NewSet(types.A),
- types.NewArray(nil, types.A)),
- )),
- ).Description("object containing a set or array of root vertices"),
- types.Named("initial", types.NewAny(types.NewSet(types.A), types.NewArray(nil, types.A))).Description("initial paths"), // TODO(sr): copied. is that correct?
- ),
- types.Named("output", types.NewSet(types.NewArray(nil, types.A))).Description("paths reachable from the `initial` vertices in the directed `graph`"),
- ),
-}
+var ReachableBuiltin = v1.ReachableBuiltin
+
+var ReachablePathsBuiltin = v1.ReachablePathsBuiltin
/**
* Type
*/
-var typesCat = category("types")
-
-var IsNumber = &Builtin{
- Name: "is_number",
- Description: "Returns `true` if the input value is a number.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.A),
- ),
- types.Named("result", types.B).Description("`true` if `x` is a number, `false` otherwise."),
- ),
- Categories: typesCat,
-}
-var IsString = &Builtin{
- Name: "is_string",
- Description: "Returns `true` if the input value is a string.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.A),
- ),
- types.Named("result", types.B).Description("`true` if `x` is a string, `false` otherwise."),
- ),
- Categories: typesCat,
-}
+var IsNumber = v1.IsNumber
-var IsBoolean = &Builtin{
- Name: "is_boolean",
- Description: "Returns `true` if the input value is a boolean.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.A),
- ),
- types.Named("result", types.B).Description("`true` if `x` is an boolean, `false` otherwise."),
- ),
- Categories: typesCat,
-}
+var IsString = v1.IsString
-var IsArray = &Builtin{
- Name: "is_array",
- Description: "Returns `true` if the input value is an array.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.A),
- ),
- types.Named("result", types.B).Description("`true` if `x` is an array, `false` otherwise."),
- ),
- Categories: typesCat,
-}
+var IsBoolean = v1.IsBoolean
-var IsSet = &Builtin{
- Name: "is_set",
- Description: "Returns `true` if the input value is a set.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.A),
- ),
- types.Named("result", types.B).Description("`true` if `x` is a set, `false` otherwise."),
- ),
- Categories: typesCat,
-}
+var IsArray = v1.IsArray
-var IsObject = &Builtin{
- Name: "is_object",
- Description: "Returns true if the input value is an object",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.A),
- ),
- types.Named("result", types.B).Description("`true` if `x` is an object, `false` otherwise."),
- ),
- Categories: typesCat,
-}
+var IsSet = v1.IsSet
-var IsNull = &Builtin{
- Name: "is_null",
- Description: "Returns `true` if the input value is null.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.A),
- ),
- types.Named("result", types.B).Description("`true` if `x` is null, `false` otherwise."),
- ),
- Categories: typesCat,
-}
+var IsObject = v1.IsObject
+
+var IsNull = v1.IsNull
/**
* Type Name
*/
// TypeNameBuiltin returns the type of the input.
-var TypeNameBuiltin = &Builtin{
- Name: "type_name",
- Description: "Returns the type of its input value.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("x", types.A),
- ),
- types.Named("type", types.S).Description(`one of "null", "boolean", "number", "string", "array", "object", "set"`),
- ),
- Categories: typesCat,
-}
+var TypeNameBuiltin = v1.TypeNameBuiltin
/**
* HTTP Request
*/
// Marked non-deterministic because HTTP request results can be non-deterministic.
-var HTTPSend = &Builtin{
- Name: "http.send",
- Description: "Returns a HTTP response to the given HTTP request.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("request", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))),
- ),
- types.Named("response", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))),
- ),
- Nondeterministic: true,
-}
+var HTTPSend = v1.HTTPSend
/**
* GraphQL
*/
// GraphQLParse returns a pair of AST objects from parsing/validation.
-var GraphQLParse = &Builtin{
- Name: "graphql.parse",
- Description: "Returns AST objects for a given GraphQL query and schema after validating the query against the schema. Returns undefined if errors were encountered during parsing or validation. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))),
- types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))),
- ),
- types.Named("output", types.NewArray([]types.Type{
- types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
- types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
- }, nil)).Description("`output` is of the form `[query_ast, schema_ast]`. If the GraphQL query is valid given the provided schema, then `query_ast` and `schema_ast` are objects describing the ASTs for the query and schema."),
- ),
-}
+var GraphQLParse = v1.GraphQLParse
// GraphQLParseAndVerify returns a boolean and a pair of AST object from parsing/validation.
-var GraphQLParseAndVerify = &Builtin{
- Name: "graphql.parse_and_verify",
- Description: "Returns a boolean indicating success or failure alongside the parsed ASTs for a given GraphQL query and schema after validating the query against the schema. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))),
- types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))),
- ),
- types.Named("output", types.NewArray([]types.Type{
- types.B,
- types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
- types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
- }, nil)).Description(" `output` is of the form `[valid, query_ast, schema_ast]`. If the query is valid given the provided schema, then `valid` is `true`, and `query_ast` and `schema_ast` are objects describing the ASTs for the GraphQL query and schema. Otherwise, `valid` is `false` and `query_ast` and `schema_ast` are `{}`."),
- ),
-}
+var GraphQLParseAndVerify = v1.GraphQLParseAndVerify
// GraphQLParseQuery parses the input GraphQL query and returns a JSON
// representation of its AST.
-var GraphQLParseQuery = &Builtin{
- Name: "graphql.parse_query",
- Description: "Returns an AST object for a GraphQL query.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("query", types.S),
- ),
- types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("AST object for the GraphQL query."),
- ),
-}
+var GraphQLParseQuery = v1.GraphQLParseQuery
// GraphQLParseSchema parses the input GraphQL schema and returns a JSON
// representation of its AST.
-var GraphQLParseSchema = &Builtin{
- Name: "graphql.parse_schema",
- Description: "Returns an AST object for a GraphQL schema.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("schema", types.S),
- ),
- types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("AST object for the GraphQL schema."),
- ),
-}
+var GraphQLParseSchema = v1.GraphQLParseSchema
// GraphQLIsValid returns true if a GraphQL query is valid with a given
// schema, and returns false for all other inputs.
-var GraphQLIsValid = &Builtin{
- Name: "graphql.is_valid",
- Description: "Checks that a GraphQL query is valid against a given schema. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))),
- types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))),
- ),
- types.Named("output", types.B).Description("`true` if the query is valid under the given schema. `false` otherwise."),
- ),
-}
+var GraphQLIsValid = v1.GraphQLIsValid
// GraphQLSchemaIsValid returns true if the input is valid GraphQL schema,
// and returns false for all other inputs.
-var GraphQLSchemaIsValid = &Builtin{
- Name: "graphql.schema_is_valid",
- Description: "Checks that the input is a valid GraphQL schema. The schema can be either a GraphQL string or an AST object from the other GraphQL builtin functions.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))),
- ),
- types.Named("output", types.B).Description("`true` if the schema is a valid GraphQL schema. `false` otherwise."),
- ),
-}
+var GraphQLSchemaIsValid = v1.GraphQLSchemaIsValid
/**
* JSON Schema
@@ -2811,313 +499,76 @@ var GraphQLSchemaIsValid = &Builtin{
// JSONSchemaVerify returns empty string if the input is valid JSON schema
// and returns error string for all other inputs.
-var JSONSchemaVerify = &Builtin{
- Name: "json.verify_schema",
- Description: "Checks that the input is a valid JSON schema object. The schema can be either a JSON string or an JSON object.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))).
- Description("the schema to verify"),
- ),
- types.Named("output", types.NewArray([]types.Type{
- types.B,
- types.NewAny(types.S, types.Null{}),
- }, nil)).
- Description("`output` is of the form `[valid, error]`. If the schema is valid, then `valid` is `true`, and `error` is `null`. Otherwise, `valid` is `false` and `error` is a string describing the error."),
- ),
- Categories: objectCat,
-}
+var JSONSchemaVerify = v1.JSONSchemaVerify
// JSONMatchSchema returns empty array if the document matches the JSON schema,
// and returns non-empty array with error objects otherwise.
-var JSONMatchSchema = &Builtin{
- Name: "json.match_schema",
- Description: "Checks that the document matches the JSON schema.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("document", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))).
- Description("document to verify by schema"),
- types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))).
- Description("schema to verify document by"),
- ),
- types.Named("output", types.NewArray([]types.Type{
- types.B,
- types.NewArray(
- nil, types.NewObject(
- []*types.StaticProperty{
- {Key: "error", Value: types.S},
- {Key: "type", Value: types.S},
- {Key: "field", Value: types.S},
- {Key: "desc", Value: types.S},
- },
- nil,
- ),
- ),
- }, nil)).
- Description("`output` is of the form `[match, errors]`. If the document is valid given the schema, then `match` is `true`, and `errors` is an empty array. Otherwise, `match` is `false` and `errors` is an array of objects describing the error(s)."),
- ),
- Categories: objectCat,
-}
+var JSONMatchSchema = v1.JSONMatchSchema
/**
* Cloud Provider Helper Functions
*/
-var providersAWSCat = category("providers.aws")
-
-var ProvidersAWSSignReqObj = &Builtin{
- Name: "providers.aws.sign_req",
- Description: "Signs an HTTP request object for Amazon Web Services. Currently implements [AWS Signature Version 4 request signing](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) by the `Authorization` header method.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("request", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))),
- types.Named("aws_config", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))),
- types.Named("time_ns", types.N),
- ),
- types.Named("signed_request", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))),
- ),
- Categories: providersAWSCat,
-}
+
+var ProvidersAWSSignReqObj = v1.ProvidersAWSSignReqObj
/**
* Rego
*/
-var RegoParseModule = &Builtin{
- Name: "rego.parse_module",
- Description: "Parses the input Rego string and returns an object representation of the AST.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("filename", types.S).Description("file name to attach to AST nodes' locations"),
- types.Named("rego", types.S).Description("Rego module"),
- ),
- types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), // TODO(tsandall): import AST schema
- ),
-}
+var RegoParseModule = v1.RegoParseModule
-var RegoMetadataChain = &Builtin{
- Name: "rego.metadata.chain",
- Description: `Returns the chain of metadata for the active rule.
-Ordered starting at the active rule, going outward to the most distant node in its package ancestry.
-A chain entry is a JSON document with two members: "path", an array representing the path of the node; and "annotations", a JSON document containing the annotations declared for the node.
-The first entry in the chain always points to the active rule, even if it has no declared annotations (in which case the "annotations" member is not present).`,
- Decl: types.NewFunction(
- types.Args(),
- types.Named("chain", types.NewArray(nil, types.A)).Description("each array entry represents a node in the path ancestry (chain) of the active rule that also has declared annotations"),
- ),
-}
+var RegoMetadataChain = v1.RegoMetadataChain
// RegoMetadataRule returns the metadata for the active rule
-var RegoMetadataRule = &Builtin{
- Name: "rego.metadata.rule",
- Description: "Returns annotations declared for the active rule and using the _rule_ scope.",
- Decl: types.NewFunction(
- types.Args(),
- types.Named("output", types.A).Description("\"rule\" scope annotations for this rule; empty object if no annotations exist"),
- ),
-}
+var RegoMetadataRule = v1.RegoMetadataRule
/**
* OPA
*/
// Marked non-deterministic because of unpredictable config/environment-dependent results.
-var OPARuntime = &Builtin{
- Name: "opa.runtime",
- Description: "Returns an object that describes the runtime environment where OPA is deployed.",
- Decl: types.NewFunction(
- nil,
- types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).
- Description("includes a `config` key if OPA was started with a configuration file; an `env` key containing the environment variables that the OPA process was started with; includes `version` and `commit` keys containing the version and build commit of OPA."),
- ),
- Nondeterministic: true,
-}
+var OPARuntime = v1.OPARuntime
/**
* Trace
*/
-var tracing = category("tracing")
-
-var Trace = &Builtin{
- Name: "trace",
- Description: "Emits `note` as a `Note` event in the query explanation. Query explanations show the exact expressions evaluated by OPA during policy execution. For example, `trace(\"Hello There!\")` includes `Note \"Hello There!\"` in the query explanation. To include variables in the message, use `sprintf`. For example, `person := \"Bob\"; trace(sprintf(\"Hello There! %v\", [person]))` will emit `Note \"Hello There! Bob\"` inside of the explanation.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("note", types.S).Description("the note to include"),
- ),
- types.Named("result", types.B).Description("always `true`"),
- ),
- Categories: tracing,
-}
+
+var Trace = v1.Trace
/**
* Glob
*/
-var GlobMatch = &Builtin{
- Name: "glob.match",
- Description: "Parses and matches strings against the glob notation. Not to be confused with `regex.globs_match`.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("pattern", types.S),
- types.Named("delimiters", types.NewAny(
- types.NewArray(nil, types.S),
- types.NewNull(),
- )).Description("glob pattern delimiters, e.g. `[\".\", \":\"]`, defaults to `[\".\"]` if unset. If `delimiters` is `null`, glob match without delimiter."),
- types.Named("match", types.S),
- ),
- types.Named("result", types.B).Description("true if `match` can be found in `pattern` which is separated by `delimiters`"),
- ),
-}
+var GlobMatch = v1.GlobMatch
-var GlobQuoteMeta = &Builtin{
- Name: "glob.quote_meta",
- Description: "Returns a string which represents a version of the pattern where all asterisks have been escaped.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("pattern", types.S),
- ),
- types.Named("output", types.S).Description("the escaped string of `pattern`"),
- ),
- // TODO(sr): example for this was: Calling ``glob.quote_meta("*.github.com", output)`` returns ``\\*.github.com`` as ``output``.
-}
+var GlobQuoteMeta = v1.GlobQuoteMeta
/**
* Networking
*/
-var NetCIDRIntersects = &Builtin{
- Name: "net.cidr_intersects",
- Description: "Checks if a CIDR intersects with another CIDR (e.g. `192.168.0.0/16` overlaps with `192.168.1.0/24`). Supports both IPv4 and IPv6 notations.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("cidr1", types.S),
- types.Named("cidr2", types.S),
- ),
- types.Named("result", types.B),
- ),
-}
+var NetCIDRIntersects = v1.NetCIDRIntersects
-var NetCIDRExpand = &Builtin{
- Name: "net.cidr_expand",
- Description: "Expands CIDR to set of hosts (e.g., `net.cidr_expand(\"192.168.0.0/30\")` generates 4 hosts: `{\"192.168.0.0\", \"192.168.0.1\", \"192.168.0.2\", \"192.168.0.3\"}`).",
- Decl: types.NewFunction(
- types.Args(
- types.Named("cidr", types.S),
- ),
- types.Named("hosts", types.NewSet(types.S)).Description("set of IP addresses the CIDR `cidr` expands to"),
- ),
-}
+var NetCIDRExpand = v1.NetCIDRExpand
-var NetCIDRContains = &Builtin{
- Name: "net.cidr_contains",
- Description: "Checks if a CIDR or IP is contained within another CIDR. `output` is `true` if `cidr_or_ip` (e.g. `127.0.0.64/26` or `127.0.0.1`) is contained within `cidr` (e.g. `127.0.0.1/24`) and `false` otherwise. Supports both IPv4 and IPv6 notations.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("cidr", types.S),
- types.Named("cidr_or_ip", types.S),
- ),
- types.Named("result", types.B),
- ),
-}
+var NetCIDRContains = v1.NetCIDRContains
-var NetCIDRContainsMatches = &Builtin{
- Name: "net.cidr_contains_matches",
- Description: "Checks if collections of cidrs or ips are contained within another collection of cidrs and returns matches. " +
- "This function is similar to `net.cidr_contains` except it allows callers to pass collections of CIDRs or IPs as arguments and returns the matches (as opposed to a boolean result indicating a match between two CIDRs/IPs).",
- Decl: types.NewFunction(
- types.Args(
- types.Named("cidrs", netCidrContainsMatchesOperandType),
- types.Named("cidrs_or_ips", netCidrContainsMatchesOperandType),
- ),
- types.Named("output", types.NewSet(types.NewArray([]types.Type{types.A, types.A}, nil))).Description("tuples identifying matches where `cidrs_or_ips` are contained within `cidrs`"),
- ),
-}
+var NetCIDRContainsMatches = v1.NetCIDRContainsMatches
-var NetCIDRMerge = &Builtin{
- Name: "net.cidr_merge",
- Description: "Merges IP addresses and subnets into the smallest possible list of CIDRs (e.g., `net.cidr_merge([\"192.0.128.0/24\", \"192.0.129.0/24\"])` generates `{\"192.0.128.0/23\"}`." +
- `This function merges adjacent subnets where possible, those contained within others and also removes any duplicates.
-Supports both IPv4 and IPv6 notations. IPv6 inputs need a prefix length (e.g. "/128").`,
- Decl: types.NewFunction(
- types.Args(
- types.Named("addrs", types.NewAny(
- types.NewArray(nil, types.NewAny(types.S)),
- types.NewSet(types.S),
- )).Description("CIDRs or IP addresses"),
- ),
- types.Named("output", types.NewSet(types.S)).Description("smallest possible set of CIDRs obtained after merging the provided list of IP addresses and subnets in `addrs`"),
- ),
-}
+var NetCIDRMerge = v1.NetCIDRMerge
-var NetCIDRIsValid = &Builtin{
- Name: "net.cidr_is_valid",
- Description: "Parses an IPv4/IPv6 CIDR and returns a boolean indicating if the provided CIDR is valid.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("cidr", types.S),
- ),
- types.Named("result", types.B),
- ),
-}
-
-var netCidrContainsMatchesOperandType = types.NewAny(
- types.S,
- types.NewArray(nil, types.NewAny(
- types.S,
- types.NewArray(nil, types.A),
- )),
- types.NewSet(types.NewAny(
- types.S,
- types.NewArray(nil, types.A),
- )),
- types.NewObject(nil, types.NewDynamicProperty(
- types.S,
- types.NewAny(
- types.S,
- types.NewArray(nil, types.A),
- ),
- )),
-)
+var NetCIDRIsValid = v1.NetCIDRIsValid
// Marked non-deterministic because DNS resolution results can be non-deterministic.
-var NetLookupIPAddr = &Builtin{
- Name: "net.lookup_ip_addr",
- Description: "Returns the set of IP addresses (both v4 and v6) that the passed-in `name` resolves to using the standard name resolution mechanisms available.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("name", types.S).Description("domain name to resolve"),
- ),
- types.Named("addrs", types.NewSet(types.S)).Description("IP addresses (v4 and v6) that `name` resolves to"),
- ),
- Nondeterministic: true,
-}
+var NetLookupIPAddr = v1.NetLookupIPAddr
/**
* Semantic Versions
*/
-var SemVerIsValid = &Builtin{
- Name: "semver.is_valid",
- Description: "Validates that the input is a valid SemVer string.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("vsn", types.A),
- ),
- types.Named("result", types.B).Description("`true` if `vsn` is a valid SemVer; `false` otherwise"),
- ),
-}
+var SemVerIsValid = v1.SemVerIsValid
-var SemVerCompare = &Builtin{
- Name: "semver.compare",
- Description: "Compares valid SemVer formatted version strings.",
- Decl: types.NewFunction(
- types.Args(
- types.Named("a", types.S),
- types.Named("b", types.S),
- ),
- types.Named("result", types.N).Description("`-1` if `a < b`; `1` if `a > b`; `0` if `a == b`"),
- ),
-}
+var SemVerCompare = v1.SemVerCompare
/**
* Printing
@@ -3128,248 +579,56 @@ var SemVerCompare = &Builtin{
// operands may be of any type. Furthermore, unlike other built-in functions,
// undefined operands DO NOT cause the print() function to fail during
// evaluation.
-var Print = &Builtin{
- Name: "print",
- Decl: types.NewVariadicFunction(nil, types.A, nil),
-}
+var Print = v1.Print
// InternalPrint represents the internal implementation of the print() function.
// The compiler rewrites print() calls to refer to the internal implementation.
-var InternalPrint = &Builtin{
- Name: "internal.print",
- Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.NewSet(types.A))}, nil),
-}
+var InternalPrint = v1.InternalPrint
/**
* Deprecated built-ins.
*/
// SetDiff has been replaced by the minus built-in.
-var SetDiff = &Builtin{
- Name: "set_diff",
- Decl: types.NewFunction(
- types.Args(
- types.NewSet(types.A),
- types.NewSet(types.A),
- ),
- types.NewSet(types.A),
- ),
- deprecated: true,
-}
+var SetDiff = v1.SetDiff
// NetCIDROverlap has been replaced by the `net.cidr_contains` built-in.
-var NetCIDROverlap = &Builtin{
- Name: "net.cidr_overlap",
- Decl: types.NewFunction(
- types.Args(
- types.S,
- types.S,
- ),
- types.B,
- ),
- deprecated: true,
-}
+var NetCIDROverlap = v1.NetCIDROverlap
// CastArray checks the underlying type of the input. If it is array or set, an array
// containing the values is returned. If it is not an array, an error is thrown.
-var CastArray = &Builtin{
- Name: "cast_array",
- Decl: types.NewFunction(
- types.Args(types.A),
- types.NewArray(nil, types.A),
- ),
- deprecated: true,
-}
+var CastArray = v1.CastArray
// CastSet checks the underlying type of the input.
// If it is a set, the set is returned.
// If it is an array, the array is returned in set form (all duplicates removed)
// If neither, an error is thrown
-var CastSet = &Builtin{
- Name: "cast_set",
- Decl: types.NewFunction(
- types.Args(types.A),
- types.NewSet(types.A),
- ),
- deprecated: true,
-}
+var CastSet = v1.CastSet
// CastString returns input if it is a string; if not returns error.
// For formatting variables, see sprintf
-var CastString = &Builtin{
- Name: "cast_string",
- Decl: types.NewFunction(
- types.Args(types.A),
- types.S,
- ),
- deprecated: true,
-}
+var CastString = v1.CastString
// CastBoolean returns input if it is a boolean; if not returns error.
-var CastBoolean = &Builtin{
- Name: "cast_boolean",
- Decl: types.NewFunction(
- types.Args(types.A),
- types.B,
- ),
- deprecated: true,
-}
+var CastBoolean = v1.CastBoolean
// CastNull returns null if input is null; if not returns error.
-var CastNull = &Builtin{
- Name: "cast_null",
- Decl: types.NewFunction(
- types.Args(types.A),
- types.NewNull(),
- ),
- deprecated: true,
-}
+var CastNull = v1.CastNull
// CastObject returns the given object if it is null; throws an error otherwise
-var CastObject = &Builtin{
- Name: "cast_object",
- Decl: types.NewFunction(
- types.Args(types.A),
- types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
- ),
- deprecated: true,
-}
+var CastObject = v1.CastObject
// RegexMatchDeprecated declares `re_match` which has been deprecated. Use `regex.match` instead.
-var RegexMatchDeprecated = &Builtin{
- Name: "re_match",
- Decl: types.NewFunction(
- types.Args(
- types.S,
- types.S,
- ),
- types.B,
- ),
- deprecated: true,
-}
+var RegexMatchDeprecated = v1.RegexMatchDeprecated
// All takes a list and returns true if all of the items
// are true. A collection of length 0 returns true.
-var All = &Builtin{
- Name: "all",
- Decl: types.NewFunction(
- types.Args(
- types.NewAny(
- types.NewSet(types.A),
- types.NewArray(nil, types.A),
- ),
- ),
- types.B,
- ),
- deprecated: true,
-}
+var All = v1.All
// Any takes a collection and returns true if any of the items
// is true. A collection of length 0 returns false.
-var Any = &Builtin{
- Name: "any",
- Decl: types.NewFunction(
- types.Args(
- types.NewAny(
- types.NewSet(types.A),
- types.NewArray(nil, types.A),
- ),
- ),
- types.B,
- ),
- deprecated: true,
-}
+var Any = v1.Any
// Builtin represents a built-in function supported by OPA. Every built-in
// function is uniquely identified by a name.
-type Builtin struct {
- Name string `json:"name"` // Unique name of built-in function, e.g., (arg1,arg2,...,argN)
- Description string `json:"description,omitempty"` // Description of what the built-in function does.
-
- // Categories of the built-in function. Omitted for namespaced
- // built-ins, i.e. "array.concat" is taken to be of the "array" category.
- // "minus" for example, is part of two categories: numbers and sets. (NOTE(sr): aspirational)
- Categories []string `json:"categories,omitempty"`
-
- Decl *types.Function `json:"decl"` // Built-in function type declaration.
- Infix string `json:"infix,omitempty"` // Unique name of infix operator. Default should be unset.
- Relation bool `json:"relation,omitempty"` // Indicates if the built-in acts as a relation.
- deprecated bool // Indicates if the built-in has been deprecated.
- Nondeterministic bool `json:"nondeterministic,omitempty"` // Indicates if the built-in returns non-deterministic results.
-}
-
-// category is a helper for specifying a Builtin's Categories
-func category(cs ...string) []string {
- return cs
-}
-
-// Minimal returns a shallow copy of b with the descriptions and categories and
-// named arguments stripped out.
-func (b *Builtin) Minimal() *Builtin {
- cpy := *b
- fargs := b.Decl.FuncArgs()
- if fargs.Variadic != nil {
- cpy.Decl = types.NewVariadicFunction(fargs.Args, fargs.Variadic, b.Decl.Result())
- } else {
- cpy.Decl = types.NewFunction(fargs.Args, b.Decl.Result())
- }
- cpy.Categories = nil
- cpy.Description = ""
- return &cpy
-}
-
-// IsDeprecated returns true if the Builtin function is deprecated and will be removed in a future release.
-func (b *Builtin) IsDeprecated() bool {
- return b.deprecated
-}
-
-// IsDeterministic returns true if the Builtin function returns non-deterministic results.
-func (b *Builtin) IsNondeterministic() bool {
- return b.Nondeterministic
-}
-
-// Expr creates a new expression for the built-in with the given operands.
-func (b *Builtin) Expr(operands ...*Term) *Expr {
- ts := make([]*Term, len(operands)+1)
- ts[0] = NewTerm(b.Ref())
- for i := range operands {
- ts[i+1] = operands[i]
- }
- return &Expr{
- Terms: ts,
- }
-}
-
-// Call creates a new term for the built-in with the given operands.
-func (b *Builtin) Call(operands ...*Term) *Term {
- call := make(Call, len(operands)+1)
- call[0] = NewTerm(b.Ref())
- for i := range operands {
- call[i+1] = operands[i]
- }
- return NewTerm(call)
-}
-
-// Ref returns a Ref that refers to the built-in function.
-func (b *Builtin) Ref() Ref {
- parts := strings.Split(b.Name, ".")
- ref := make(Ref, len(parts))
- ref[0] = VarTerm(parts[0])
- for i := 1; i < len(parts); i++ {
- ref[i] = StringTerm(parts[i])
- }
- return ref
-}
-
-// IsTargetPos returns true if a variable in the i-th position will be bound by
-// evaluating the call expression.
-func (b *Builtin) IsTargetPos(i int) bool {
- return len(b.Decl.FuncArgs().Args) == i
-}
-
-func init() {
- BuiltinMap = map[string]*Builtin{}
- for _, b := range DefaultBuiltins {
- RegisterBuiltin(b)
- }
-}
+type Builtin = v1.Builtin
diff --git a/vendor/github.com/open-policy-agent/opa/ast/capabilities.go b/vendor/github.com/open-policy-agent/opa/ast/capabilities.go
index 3b95d79e57..bc7278a885 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/capabilities.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/capabilities.go
@@ -5,228 +5,54 @@
package ast
import (
- "bytes"
- _ "embed"
- "encoding/json"
- "fmt"
"io"
- "os"
- "sort"
- "strings"
- caps "github.com/open-policy-agent/opa/capabilities"
- "github.com/open-policy-agent/opa/internal/semver"
- "github.com/open-policy-agent/opa/internal/wasm/sdk/opa/capabilities"
- "github.com/open-policy-agent/opa/util"
+ v1 "github.com/open-policy-agent/opa/v1/ast"
)
// VersonIndex contains an index from built-in function name, language feature,
// and future rego keyword to version number. During the build, this is used to
// create an index of the minimum version required for the built-in/feature/kw.
-type VersionIndex struct {
- Builtins map[string]semver.Version `json:"builtins"`
- Features map[string]semver.Version `json:"features"`
- Keywords map[string]semver.Version `json:"keywords"`
-}
-
-// NOTE(tsandall): this file is generated by internal/cmd/genversionindex/main.go
-// and run as part of go:generate. We generate the version index as part of the
-// build process because it's relatively expensive to build (it takes ~500ms on
-// my machine) and never changes.
-//
-//go:embed version_index.json
-var versionIndexBs []byte
-
-var minVersionIndex = func() VersionIndex {
- var vi VersionIndex
- err := json.Unmarshal(versionIndexBs, &vi)
- if err != nil {
- panic(err)
- }
- return vi
-}()
+type VersionIndex = v1.VersionIndex
// In the compiler, we used this to check that we're OK working with ref heads.
// If this isn't present, we'll fail. This is to ensure that older versions of
// OPA can work with policies that we're compiling -- if they don't know ref
// heads, they wouldn't be able to parse them.
-const FeatureRefHeadStringPrefixes = "rule_head_ref_string_prefixes"
-const FeatureRefHeads = "rule_head_refs"
-const FeatureRegoV1Import = "rego_v1_import"
+const FeatureRefHeadStringPrefixes = v1.FeatureRefHeadStringPrefixes
+const FeatureRefHeads = v1.FeatureRefHeads
+const FeatureRegoV1 = v1.FeatureRegoV1
+const FeatureRegoV1Import = v1.FeatureRegoV1Import
// Capabilities defines a structure containing data that describes the capabilities
// or features supported by a particular version of OPA.
-type Capabilities struct {
- Builtins []*Builtin `json:"builtins,omitempty"`
- FutureKeywords []string `json:"future_keywords,omitempty"`
- WasmABIVersions []WasmABIVersion `json:"wasm_abi_versions,omitempty"`
-
- // Features is a bit of a mixed bag for checking that an older version of OPA
- // is able to do what needs to be done.
- // TODO(sr): find better words ^^
- Features []string `json:"features,omitempty"`
-
- // allow_net is an array of hostnames or IP addresses, that an OPA instance is
- // allowed to connect to.
- // If omitted, ANY host can be connected to. If empty, NO host can be connected to.
- // As of now, this only controls fetching remote refs for using JSON Schemas in
- // the type checker.
- // TODO(sr): support ports to further restrict connection peers
- // TODO(sr): support restricting `http.send` using the same mechanism (see https://github.com/open-policy-agent/opa/issues/3665)
- AllowNet []string `json:"allow_net,omitempty"`
-}
+type Capabilities = v1.Capabilities
// WasmABIVersion captures the Wasm ABI version. Its `Minor` version is indicating
// backwards-compatible changes.
-type WasmABIVersion struct {
- Version int `json:"version"`
- Minor int `json:"minor_version"`
-}
+type WasmABIVersion = v1.WasmABIVersion
// CapabilitiesForThisVersion returns the capabilities of this version of OPA.
func CapabilitiesForThisVersion() *Capabilities {
- f := &Capabilities{}
-
- for _, vers := range capabilities.ABIVersions() {
- f.WasmABIVersions = append(f.WasmABIVersions, WasmABIVersion{Version: vers[0], Minor: vers[1]})
- }
-
- f.Builtins = make([]*Builtin, len(Builtins))
- copy(f.Builtins, Builtins)
- sort.Slice(f.Builtins, func(i, j int) bool {
- return f.Builtins[i].Name < f.Builtins[j].Name
- })
-
- for kw := range futureKeywords {
- f.FutureKeywords = append(f.FutureKeywords, kw)
- }
- sort.Strings(f.FutureKeywords)
-
- f.Features = []string{
- FeatureRefHeadStringPrefixes,
- FeatureRefHeads,
- FeatureRegoV1Import,
- }
-
- return f
+ return v1.CapabilitiesForThisVersion(v1.CapabilitiesRegoVersion(DefaultRegoVersion))
}
// LoadCapabilitiesJSON loads a JSON serialized capabilities structure from the reader r.
func LoadCapabilitiesJSON(r io.Reader) (*Capabilities, error) {
- d := util.NewJSONDecoder(r)
- var c Capabilities
- return &c, d.Decode(&c)
+ return v1.LoadCapabilitiesJSON(r)
}
// LoadCapabilitiesVersion loads a JSON serialized capabilities structure from the specific version.
func LoadCapabilitiesVersion(version string) (*Capabilities, error) {
- cvs, err := LoadCapabilitiesVersions()
- if err != nil {
- return nil, err
- }
-
- for _, cv := range cvs {
- if cv == version {
- cont, err := caps.FS.ReadFile(cv + ".json")
- if err != nil {
- return nil, err
- }
-
- return LoadCapabilitiesJSON(bytes.NewReader(cont))
- }
-
- }
- return nil, fmt.Errorf("no capabilities version found %v", version)
+ return v1.LoadCapabilitiesVersion(version)
}
// LoadCapabilitiesFile loads a JSON serialized capabilities structure from a file.
func LoadCapabilitiesFile(file string) (*Capabilities, error) {
- fd, err := os.Open(file)
- if err != nil {
- return nil, err
- }
- defer fd.Close()
- return LoadCapabilitiesJSON(fd)
+ return v1.LoadCapabilitiesFile(file)
}
// LoadCapabilitiesVersions loads all capabilities versions
func LoadCapabilitiesVersions() ([]string, error) {
- ents, err := caps.FS.ReadDir(".")
- if err != nil {
- return nil, err
- }
-
- capabilitiesVersions := make([]string, 0, len(ents))
- for _, ent := range ents {
- capabilitiesVersions = append(capabilitiesVersions, strings.Replace(ent.Name(), ".json", "", 1))
- }
- return capabilitiesVersions, nil
-}
-
-// MinimumCompatibleVersion returns the minimum compatible OPA version based on
-// the built-ins, features, and keywords in c.
-func (c *Capabilities) MinimumCompatibleVersion() (string, bool) {
-
- var maxVersion semver.Version
-
- // this is the oldest OPA release that includes capabilities
- if err := maxVersion.Set("0.17.0"); err != nil {
- panic("unreachable")
- }
-
- for _, bi := range c.Builtins {
- v, ok := minVersionIndex.Builtins[bi.Name]
- if !ok {
- return "", false
- }
- if v.Compare(maxVersion) > 0 {
- maxVersion = v
- }
- }
-
- for _, kw := range c.FutureKeywords {
- v, ok := minVersionIndex.Keywords[kw]
- if !ok {
- return "", false
- }
- if v.Compare(maxVersion) > 0 {
- maxVersion = v
- }
- }
-
- for _, feat := range c.Features {
- v, ok := minVersionIndex.Features[feat]
- if !ok {
- return "", false
- }
- if v.Compare(maxVersion) > 0 {
- maxVersion = v
- }
- }
-
- return maxVersion.String(), true
-}
-
-func (c *Capabilities) ContainsFeature(feature string) bool {
- for _, f := range c.Features {
- if f == feature {
- return true
- }
- }
- return false
-}
-
-// addBuiltinSorted inserts a built-in into c in sorted order. An existing built-in with the same name
-// will be overwritten.
-func (c *Capabilities) addBuiltinSorted(bi *Builtin) {
- i := sort.Search(len(c.Builtins), func(x int) bool {
- return c.Builtins[x].Name >= bi.Name
- })
- if i < len(c.Builtins) && bi.Name == c.Builtins[i].Name {
- c.Builtins[i] = bi
- return
- }
- c.Builtins = append(c.Builtins, nil)
- copy(c.Builtins[i+1:], c.Builtins[i:])
- c.Builtins[i] = bi
+ return v1.LoadCapabilitiesVersions()
}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/check.go b/vendor/github.com/open-policy-agent/opa/ast/check.go
index 03d31123cf..4cf00436df 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/check.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/check.go
@@ -5,1317 +5,18 @@
package ast
import (
- "fmt"
- "sort"
- "strings"
-
- "github.com/open-policy-agent/opa/types"
- "github.com/open-policy-agent/opa/util"
+ v1 "github.com/open-policy-agent/opa/v1/ast"
)
-type varRewriter func(Ref) Ref
-
-// exprChecker defines the interface for executing type checking on a single
-// expression. The exprChecker must update the provided TypeEnv with inferred
-// types of vars.
-type exprChecker func(*TypeEnv, *Expr) *Error
-
-// typeChecker implements type checking on queries and rules. Errors are
-// accumulated on the typeChecker so that a single run can report multiple
-// issues.
-type typeChecker struct {
- builtins map[string]*Builtin
- required *Capabilities
- errs Errors
- exprCheckers map[string]exprChecker
- varRewriter varRewriter
- ss *SchemaSet
- allowNet []string
- input types.Type
- allowUndefinedFuncs bool
- schemaTypes map[string]types.Type
-}
-
-// newTypeChecker returns a new typeChecker object that has no errors.
-func newTypeChecker() *typeChecker {
- return &typeChecker{
- builtins: make(map[string]*Builtin),
- schemaTypes: make(map[string]types.Type),
- exprCheckers: map[string]exprChecker{
- "eq": checkExprEq,
- },
- }
-}
-
-func (tc *typeChecker) newEnv(exist *TypeEnv) *TypeEnv {
- if exist != nil {
- return exist.wrap()
- }
- env := newTypeEnv(tc.copy)
- if tc.input != nil {
- env.tree.Put(InputRootRef, tc.input)
- }
- return env
-}
-
-func (tc *typeChecker) copy() *typeChecker {
- return newTypeChecker().
- WithVarRewriter(tc.varRewriter).
- WithSchemaSet(tc.ss).
- WithAllowNet(tc.allowNet).
- WithInputType(tc.input).
- WithAllowUndefinedFunctionCalls(tc.allowUndefinedFuncs).
- WithBuiltins(tc.builtins).
- WithRequiredCapabilities(tc.required)
-}
-
-func (tc *typeChecker) WithRequiredCapabilities(c *Capabilities) *typeChecker {
- tc.required = c
- return tc
-}
-
-func (tc *typeChecker) WithBuiltins(builtins map[string]*Builtin) *typeChecker {
- tc.builtins = builtins
- return tc
-}
-
-func (tc *typeChecker) WithSchemaSet(ss *SchemaSet) *typeChecker {
- tc.ss = ss
- return tc
-}
-
-func (tc *typeChecker) WithAllowNet(hosts []string) *typeChecker {
- tc.allowNet = hosts
- return tc
-}
-
-func (tc *typeChecker) WithVarRewriter(f varRewriter) *typeChecker {
- tc.varRewriter = f
- return tc
-}
-
-func (tc *typeChecker) WithInputType(tpe types.Type) *typeChecker {
- tc.input = tpe
- return tc
-}
-
-// WithAllowUndefinedFunctionCalls sets the type checker to allow references to undefined functions.
-// Additionally, the 'CheckUndefinedFuncs' and 'CheckSafetyRuleBodies' compiler stages are skipped.
-func (tc *typeChecker) WithAllowUndefinedFunctionCalls(allow bool) *typeChecker {
- tc.allowUndefinedFuncs = allow
- return tc
-}
-
-// Env returns a type environment for the specified built-ins with any other
-// global types configured on the checker. In practice, this is the default
-// environment that other statements will be checked against.
-func (tc *typeChecker) Env(builtins map[string]*Builtin) *TypeEnv {
- env := tc.newEnv(nil)
- for _, bi := range builtins {
- env.tree.Put(bi.Ref(), bi.Decl)
- }
- return env
-}
-
-// CheckBody runs type checking on the body and returns a TypeEnv if no errors
-// are found. The resulting TypeEnv wraps the provided one. The resulting
-// TypeEnv will be able to resolve types of vars contained in the body.
-func (tc *typeChecker) CheckBody(env *TypeEnv, body Body) (*TypeEnv, Errors) {
-
- errors := []*Error{}
- env = tc.newEnv(env)
-
- WalkExprs(body, func(expr *Expr) bool {
-
- closureErrs := tc.checkClosures(env, expr)
- for _, err := range closureErrs {
- errors = append(errors, err)
- }
-
- hasClosureErrors := len(closureErrs) > 0
-
- vis := newRefChecker(env, tc.varRewriter)
- NewGenericVisitor(vis.Visit).Walk(expr)
- for _, err := range vis.errs {
- errors = append(errors, err)
- }
-
- hasRefErrors := len(vis.errs) > 0
-
- if err := tc.checkExpr(env, expr); err != nil {
- // Suppress this error if a more actionable one has occurred. In
- // this case, if an error occurred in a ref or closure contained in
- // this expression, and the error is due to a nil type, then it's
- // likely to be the result of the more specific error.
- skip := (hasClosureErrors || hasRefErrors) && causedByNilType(err)
- if !skip {
- errors = append(errors, err)
- }
- }
- return true
- })
-
- tc.err(errors)
- return env, errors
-}
-
-// CheckTypes runs type checking on the rules returns a TypeEnv if no errors
-// are found. The resulting TypeEnv wraps the provided one. The resulting
-// TypeEnv will be able to resolve types of refs that refer to rules.
-func (tc *typeChecker) CheckTypes(env *TypeEnv, sorted []util.T, as *AnnotationSet) (*TypeEnv, Errors) {
- env = tc.newEnv(env)
- for _, s := range sorted {
- tc.checkRule(env, as, s.(*Rule))
- }
- tc.errs.Sort()
- return env, tc.errs
-}
-
-func (tc *typeChecker) checkClosures(env *TypeEnv, expr *Expr) Errors {
- var result Errors
- WalkClosures(expr, func(x interface{}) bool {
- switch x := x.(type) {
- case *ArrayComprehension:
- _, errs := tc.copy().CheckBody(env, x.Body)
- if len(errs) > 0 {
- result = errs
- return true
- }
- case *SetComprehension:
- _, errs := tc.copy().CheckBody(env, x.Body)
- if len(errs) > 0 {
- result = errs
- return true
- }
- case *ObjectComprehension:
- _, errs := tc.copy().CheckBody(env, x.Body)
- if len(errs) > 0 {
- result = errs
- return true
- }
- }
- return false
- })
- return result
-}
-
-func (tc *typeChecker) getSchemaType(schemaAnnot *SchemaAnnotation, rule *Rule) (types.Type, *Error) {
- if refType, exists := tc.schemaTypes[schemaAnnot.Schema.String()]; exists {
- return refType, nil
- }
-
- refType, err := processAnnotation(tc.ss, schemaAnnot, rule, tc.allowNet)
- if err != nil {
- return nil, err
- }
-
- if refType == nil {
- return nil, nil
- }
-
- tc.schemaTypes[schemaAnnot.Schema.String()] = refType
- return refType, nil
-
-}
-
-func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) {
-
- env = env.wrap()
-
- schemaAnnots := getRuleAnnotation(as, rule)
- for _, schemaAnnot := range schemaAnnots {
- refType, err := tc.getSchemaType(schemaAnnot, rule)
- if err != nil {
- tc.err([]*Error{err})
- continue
- }
-
- ref := schemaAnnot.Path
- // if we do not have a ref or a reftype, we should not evaluate this rule.
- if ref == nil || refType == nil {
- continue
- }
-
- prefixRef, t := getPrefix(env, ref)
- if t == nil || len(prefixRef) == len(ref) {
- env.tree.Put(ref, refType)
- } else {
- newType, err := override(ref[len(prefixRef):], t, refType, rule)
- if err != nil {
- tc.err([]*Error{err})
- continue
- }
- env.tree.Put(prefixRef, newType)
- }
- }
-
- cpy, err := tc.CheckBody(env, rule.Body)
- env = env.next
- path := rule.Ref()
-
- if len(err) > 0 {
- // if the rule/function contains an error, add it to the type env so
- // that expressions that refer to this rule/function do not encounter
- // type errors.
- env.tree.Put(path, types.A)
- return
- }
-
- var tpe types.Type
-
- if len(rule.Head.Args) > 0 {
- // If args are not referred to in body, infer as any.
- WalkVars(rule.Head.Args, func(v Var) bool {
- if cpy.Get(v) == nil {
- cpy.tree.PutOne(v, types.A)
- }
- return false
- })
-
- // Construct function type.
- args := make([]types.Type, len(rule.Head.Args))
- for i := 0; i < len(rule.Head.Args); i++ {
- args[i] = cpy.Get(rule.Head.Args[i])
- }
-
- f := types.NewFunction(args, cpy.Get(rule.Head.Value))
-
- tpe = f
- } else {
- switch rule.Head.RuleKind() {
- case SingleValue:
- typeV := cpy.Get(rule.Head.Value)
- if !path.IsGround() {
- // e.g. store object[string: whatever] at data.p.q.r, not data.p.q.r[x] or data.p.q.r[x].y[z]
- objPath := path.DynamicSuffix()
- path = path.GroundPrefix()
-
- var err error
- tpe, err = nestedObject(cpy, objPath, typeV)
- if err != nil {
- tc.err([]*Error{NewError(TypeErr, rule.Head.Location, err.Error())})
- tpe = nil
- }
- } else {
- if typeV != nil {
- tpe = typeV
- }
- }
- case MultiValue:
- typeK := cpy.Get(rule.Head.Key)
- if typeK != nil {
- tpe = types.NewSet(typeK)
- }
- }
- }
-
- if tpe != nil {
- env.tree.Insert(path, tpe, env)
- }
-}
-
-// nestedObject creates a nested structure of object types, where each term on path corresponds to a level in the
-// nesting. Each term in the path only contributes to the dynamic portion of its corresponding object.
-func nestedObject(env *TypeEnv, path Ref, tpe types.Type) (types.Type, error) {
- if len(path) == 0 {
- return tpe, nil
- }
-
- k := path[0]
- typeV, err := nestedObject(env, path[1:], tpe)
- if err != nil {
- return nil, err
- }
- if typeV == nil {
- return nil, nil
- }
-
- var dynamicProperty *types.DynamicProperty
- typeK := env.Get(k)
- if typeK == nil {
- return nil, nil
- }
- dynamicProperty = types.NewDynamicProperty(typeK, typeV)
-
- return types.NewObject(nil, dynamicProperty), nil
-}
-
-func (tc *typeChecker) checkExpr(env *TypeEnv, expr *Expr) *Error {
- if err := tc.checkExprWith(env, expr, 0); err != nil {
- return err
- }
- if !expr.IsCall() {
- return nil
- }
-
- operator := expr.Operator().String()
-
- // If the type checker wasn't provided with a required capabilities
- // structure then just skip. In some cases, type checking might be run
- // without the need to record what builtins are required.
- if tc.required != nil {
- if bi, ok := tc.builtins[operator]; ok {
- tc.required.addBuiltinSorted(bi)
- }
- }
-
- checker := tc.exprCheckers[operator]
- if checker != nil {
- return checker(env, expr)
- }
-
- return tc.checkExprBuiltin(env, expr)
-}
-
-func (tc *typeChecker) checkExprBuiltin(env *TypeEnv, expr *Expr) *Error {
-
- args := expr.Operands()
- pre := getArgTypes(env, args)
-
- // NOTE(tsandall): undefined functions will have been caught earlier in the
- // compiler. We check for undefined functions before the safety check so
- // that references to non-existent functions result in undefined function
- // errors as opposed to unsafe var errors.
- //
- // We cannot run type checking before the safety check because part of the
- // type checker relies on reordering (in particular for references to local
- // vars).
- name := expr.Operator()
- tpe := env.Get(name)
-
- if tpe == nil {
- if tc.allowUndefinedFuncs {
- return nil
- }
- return NewError(TypeErr, expr.Location, "undefined function %v", name)
- }
-
- // check if the expression refers to a function that contains an error
- _, ok := tpe.(types.Any)
- if ok {
- return nil
- }
-
- ftpe, ok := tpe.(*types.Function)
- if !ok {
- return NewError(TypeErr, expr.Location, "undefined function %v", name)
- }
-
- fargs := ftpe.FuncArgs()
- namedFargs := ftpe.NamedFuncArgs()
-
- if ftpe.Result() != nil {
- fargs.Args = append(fargs.Args, ftpe.Result())
- namedFargs.Args = append(namedFargs.Args, ftpe.NamedResult())
- }
-
- if len(args) > len(fargs.Args) && fargs.Variadic == nil {
- return newArgError(expr.Location, name, "too many arguments", pre, namedFargs)
- }
-
- if len(args) < len(ftpe.FuncArgs().Args) {
- return newArgError(expr.Location, name, "too few arguments", pre, namedFargs)
- }
-
- for i := range args {
- if !unify1(env, args[i], fargs.Arg(i), false) {
- post := make([]types.Type, len(args))
- for i := range args {
- post[i] = env.Get(args[i])
- }
- return newArgError(expr.Location, name, "invalid argument(s)", post, namedFargs)
- }
- }
-
- return nil
-}
-
-func checkExprEq(env *TypeEnv, expr *Expr) *Error {
-
- pre := getArgTypes(env, expr.Operands())
- exp := Equality.Decl.FuncArgs()
-
- if len(pre) < len(exp.Args) {
- return newArgError(expr.Location, expr.Operator(), "too few arguments", pre, exp)
- }
-
- if len(exp.Args) < len(pre) {
- return newArgError(expr.Location, expr.Operator(), "too many arguments", pre, exp)
- }
-
- a, b := expr.Operand(0), expr.Operand(1)
- typeA, typeB := env.Get(a), env.Get(b)
-
- if !unify2(env, a, typeA, b, typeB) {
- err := NewError(TypeErr, expr.Location, "match error")
- err.Details = &UnificationErrDetail{
- Left: typeA,
- Right: typeB,
- }
- return err
- }
-
- return nil
-}
-
-func (tc *typeChecker) checkExprWith(env *TypeEnv, expr *Expr, i int) *Error {
- if i == len(expr.With) {
- return nil
- }
-
- target, value := expr.With[i].Target, expr.With[i].Value
- targetType, valueType := env.Get(target), env.Get(value)
-
- if t, ok := targetType.(*types.Function); ok { // built-in function replacement
- switch v := valueType.(type) {
- case *types.Function: // ...by function
- if !unifies(targetType, valueType) {
- return newArgError(expr.With[i].Loc(), target.Value.(Ref), "arity mismatch", v.FuncArgs().Args, t.NamedFuncArgs())
- }
- default: // ... by value, nothing to check
- }
- }
-
- return tc.checkExprWith(env, expr, i+1)
-}
-
-func unify2(env *TypeEnv, a *Term, typeA types.Type, b *Term, typeB types.Type) bool {
-
- nilA := types.Nil(typeA)
- nilB := types.Nil(typeB)
-
- if nilA && !nilB {
- return unify1(env, a, typeB, false)
- } else if nilB && !nilA {
- return unify1(env, b, typeA, false)
- } else if !nilA && !nilB {
- return unifies(typeA, typeB)
- }
-
- switch a.Value.(type) {
- case *Array:
- return unify2Array(env, a, b)
- case *object:
- return unify2Object(env, a, b)
- case Var:
- switch b.Value.(type) {
- case Var:
- return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false)
- case *Array:
- return unify2Array(env, b, a)
- case *object:
- return unify2Object(env, b, a)
- }
- }
-
- return false
-}
-
-func unify2Array(env *TypeEnv, a *Term, b *Term) bool {
- arr := a.Value.(*Array)
- switch bv := b.Value.(type) {
- case *Array:
- if arr.Len() == bv.Len() {
- for i := 0; i < arr.Len(); i++ {
- if !unify2(env, arr.Elem(i), env.Get(arr.Elem(i)), bv.Elem(i), env.Get(bv.Elem(i))) {
- return false
- }
- }
- return true
- }
- case Var:
- return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false)
- }
- return false
-}
-
-func unify2Object(env *TypeEnv, a *Term, b *Term) bool {
- obj := a.Value.(Object)
- switch bv := b.Value.(type) {
- case *object:
- cv := obj.Intersect(bv)
- if obj.Len() == bv.Len() && bv.Len() == len(cv) {
- for i := range cv {
- if !unify2(env, cv[i][1], env.Get(cv[i][1]), cv[i][2], env.Get(cv[i][2])) {
- return false
- }
- }
- return true
- }
- case Var:
- return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false)
- }
- return false
-}
-
-func unify1(env *TypeEnv, term *Term, tpe types.Type, union bool) bool {
- switch v := term.Value.(type) {
- case *Array:
- switch tpe := tpe.(type) {
- case *types.Array:
- return unify1Array(env, v, tpe, union)
- case types.Any:
- if types.Compare(tpe, types.A) == 0 {
- for i := 0; i < v.Len(); i++ {
- unify1(env, v.Elem(i), types.A, true)
- }
- return true
- }
- unifies := false
- for i := range tpe {
- unifies = unify1(env, term, tpe[i], true) || unifies
- }
- return unifies
- }
- return false
- case *object:
- switch tpe := tpe.(type) {
- case *types.Object:
- return unify1Object(env, v, tpe, union)
- case types.Any:
- if types.Compare(tpe, types.A) == 0 {
- v.Foreach(func(key, value *Term) {
- unify1(env, key, types.A, true)
- unify1(env, value, types.A, true)
- })
- return true
- }
- unifies := false
- for i := range tpe {
- unifies = unify1(env, term, tpe[i], true) || unifies
- }
- return unifies
- }
- return false
- case Set:
- switch tpe := tpe.(type) {
- case *types.Set:
- return unify1Set(env, v, tpe, union)
- case types.Any:
- if types.Compare(tpe, types.A) == 0 {
- v.Foreach(func(elem *Term) {
- unify1(env, elem, types.A, true)
- })
- return true
- }
- unifies := false
- for i := range tpe {
- unifies = unify1(env, term, tpe[i], true) || unifies
- }
- return unifies
- }
- return false
- case Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension:
- return unifies(env.Get(v), tpe)
- case Var:
- if !union {
- if exist := env.Get(v); exist != nil {
- return unifies(exist, tpe)
- }
- env.tree.PutOne(term.Value, tpe)
- } else {
- env.tree.PutOne(term.Value, types.Or(env.Get(v), tpe))
- }
- return true
- default:
- if !IsConstant(v) {
- panic("unreachable")
- }
- return unifies(env.Get(term), tpe)
- }
-}
-
-func unify1Array(env *TypeEnv, val *Array, tpe *types.Array, union bool) bool {
- if val.Len() != tpe.Len() && tpe.Dynamic() == nil {
- return false
- }
- for i := 0; i < val.Len(); i++ {
- if !unify1(env, val.Elem(i), tpe.Select(i), union) {
- return false
- }
- }
- return true
-}
-
-func unify1Object(env *TypeEnv, val Object, tpe *types.Object, union bool) bool {
- if val.Len() != len(tpe.Keys()) && tpe.DynamicValue() == nil {
- return false
- }
- stop := val.Until(func(k, v *Term) bool {
- if IsConstant(k.Value) {
- if child := selectConstant(tpe, k); child != nil {
- if !unify1(env, v, child, union) {
- return true
- }
- } else {
- return true
- }
- } else {
- // Inferring type of value under dynamic key would involve unioning
- // with all property values of tpe whose keys unify. For now, type
- // these values as Any. We can investigate stricter inference in
- // the future.
- unify1(env, v, types.A, union)
- }
- return false
- })
- return !stop
-}
-
-func unify1Set(env *TypeEnv, val Set, tpe *types.Set, union bool) bool {
- of := types.Values(tpe)
- return !val.Until(func(elem *Term) bool {
- return !unify1(env, elem, of, union)
- })
-}
-
-func (tc *typeChecker) err(errors []*Error) {
- tc.errs = append(tc.errs, errors...)
-}
-
-type refChecker struct {
- env *TypeEnv
- errs Errors
- varRewriter varRewriter
-}
-
-func rewriteVarsNop(node Ref) Ref {
- return node
-}
-
-func newRefChecker(env *TypeEnv, f varRewriter) *refChecker {
-
- if f == nil {
- f = rewriteVarsNop
- }
-
- return &refChecker{
- env: env,
- errs: nil,
- varRewriter: f,
- }
-}
-
-func (rc *refChecker) Visit(x interface{}) bool {
- switch x := x.(type) {
- case *ArrayComprehension, *ObjectComprehension, *SetComprehension:
- return true
- case *Expr:
- switch terms := x.Terms.(type) {
- case []*Term:
- for i := 1; i < len(terms); i++ {
- NewGenericVisitor(rc.Visit).Walk(terms[i])
- }
- return true
- case *Term:
- NewGenericVisitor(rc.Visit).Walk(terms)
- return true
- }
- case Ref:
- if err := rc.checkApply(rc.env, x); err != nil {
- rc.errs = append(rc.errs, err)
- return true
- }
- if err := rc.checkRef(rc.env, rc.env.tree, x, 0); err != nil {
- rc.errs = append(rc.errs, err)
- }
- }
- return false
-}
-
-func (rc *refChecker) checkApply(curr *TypeEnv, ref Ref) *Error {
- switch tpe := curr.Get(ref).(type) {
- case *types.Function: // NOTE(sr): We don't support first-class functions, except for `with`.
- return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), len(ref)-1, tpe)
- }
-
- return nil
-}
-
-func (rc *refChecker) checkRef(curr *TypeEnv, node *typeTreeNode, ref Ref, idx int) *Error {
-
- if idx == len(ref) {
- return nil
- }
-
- head := ref[idx]
-
- // NOTE(sr): as long as package statements are required, this isn't possible:
- // the shortest possible rule ref is data.a.b (b is idx 2), idx 1 and 2 need to
- // be strings or vars.
- if idx == 1 || idx == 2 {
- switch head.Value.(type) {
- case Var, String: // OK
- default:
- have := rc.env.Get(head.Value)
- return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, have, types.S, getOneOfForNode(node))
- }
- }
-
- if v, ok := head.Value.(Var); ok && idx != 0 {
- tpe := types.Keys(rc.env.getRefRecExtent(node))
- if exist := rc.env.Get(v); exist != nil {
- if !unifies(tpe, exist) {
- return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, tpe, getOneOfForNode(node))
- }
- } else {
- rc.env.tree.PutOne(v, tpe)
- }
- }
-
- child := node.Child(head.Value)
- if child == nil {
- // NOTE(sr): idx is reset on purpose: we start over
- switch {
- case curr.next != nil:
- next := curr.next
- return rc.checkRef(next, next.tree, ref, 0)
-
- case RootDocumentNames.Contains(ref[0]):
- if idx != 0 {
- node.Children().Iter(func(_, child util.T) bool {
- _ = rc.checkRef(curr, child.(*typeTreeNode), ref, idx+1) // ignore error
- return false
- })
- return nil
- }
- return rc.checkRefLeaf(types.A, ref, 1)
-
- default:
- return rc.checkRefLeaf(types.A, ref, 0)
- }
- }
-
- if child.Leaf() {
- return rc.checkRefLeaf(child.Value(), ref, idx+1)
- }
-
- return rc.checkRef(curr, child, ref, idx+1)
-}
-
-func (rc *refChecker) checkRefLeaf(tpe types.Type, ref Ref, idx int) *Error {
-
- if idx == len(ref) {
- return nil
- }
-
- head := ref[idx]
-
- keys := types.Keys(tpe)
- if keys == nil {
- return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), idx-1, tpe)
- }
-
- switch value := head.Value.(type) {
-
- case Var:
- if exist := rc.env.Get(value); exist != nil {
- if !unifies(exist, keys) {
- return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe))
- }
- } else {
- rc.env.tree.PutOne(value, types.Keys(tpe))
- }
-
- case Ref:
- if exist := rc.env.Get(value); exist != nil {
- if !unifies(exist, keys) {
- return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe))
- }
- }
-
- case *Array, Object, Set:
- if !unify1(rc.env, head, keys, false) {
- return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, rc.env.Get(head), keys, nil)
- }
-
- default:
- child := selectConstant(tpe, head)
- if child == nil {
- return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, nil, types.Keys(tpe), getOneOfForType(tpe))
- }
- return rc.checkRefLeaf(child, ref, idx+1)
- }
-
- return rc.checkRefLeaf(types.Values(tpe), ref, idx+1)
-}
-
-func unifies(a, b types.Type) bool {
-
- if a == nil || b == nil {
- return false
- }
-
- anyA, ok1 := a.(types.Any)
- if ok1 {
- if unifiesAny(anyA, b) {
- return true
- }
- }
-
- anyB, ok2 := b.(types.Any)
- if ok2 {
- if unifiesAny(anyB, a) {
- return true
- }
- }
-
- if ok1 || ok2 {
- return false
- }
-
- switch a := a.(type) {
- case types.Null:
- _, ok := b.(types.Null)
- return ok
- case types.Boolean:
- _, ok := b.(types.Boolean)
- return ok
- case types.Number:
- _, ok := b.(types.Number)
- return ok
- case types.String:
- _, ok := b.(types.String)
- return ok
- case *types.Array:
- b, ok := b.(*types.Array)
- if !ok {
- return false
- }
- return unifiesArrays(a, b)
- case *types.Object:
- b, ok := b.(*types.Object)
- if !ok {
- return false
- }
- return unifiesObjects(a, b)
- case *types.Set:
- b, ok := b.(*types.Set)
- if !ok {
- return false
- }
- return unifies(types.Values(a), types.Values(b))
- case *types.Function:
- // NOTE(sr): variadic functions can only be internal ones, and we've forbidden
- // their replacement via `with`; so we disregard variadic here
- if types.Arity(a) == types.Arity(b) {
- b := b.(*types.Function)
- for i := range a.FuncArgs().Args {
- if !unifies(a.FuncArgs().Arg(i), b.FuncArgs().Arg(i)) {
- return false
- }
- }
- return true
- }
- return false
- default:
- panic("unreachable")
- }
-}
-
-func unifiesAny(a types.Any, b types.Type) bool {
- if _, ok := b.(*types.Function); ok {
- return false
- }
- for i := range a {
- if unifies(a[i], b) {
- return true
- }
- }
- return len(a) == 0
-}
-
-func unifiesArrays(a, b *types.Array) bool {
-
- if !unifiesArraysStatic(a, b) {
- return false
- }
-
- if !unifiesArraysStatic(b, a) {
- return false
- }
-
- return a.Dynamic() == nil || b.Dynamic() == nil || unifies(a.Dynamic(), b.Dynamic())
-}
-
-func unifiesArraysStatic(a, b *types.Array) bool {
- if a.Len() != 0 {
- for i := 0; i < a.Len(); i++ {
- if !unifies(a.Select(i), b.Select(i)) {
- return false
- }
- }
- }
- return true
-}
-
-func unifiesObjects(a, b *types.Object) bool {
- if !unifiesObjectsStatic(a, b) {
- return false
- }
-
- if !unifiesObjectsStatic(b, a) {
- return false
- }
-
- return a.DynamicValue() == nil || b.DynamicValue() == nil || unifies(a.DynamicValue(), b.DynamicValue())
-}
-
-func unifiesObjectsStatic(a, b *types.Object) bool {
- for _, k := range a.Keys() {
- if !unifies(a.Select(k), b.Select(k)) {
- return false
- }
- }
- return true
-}
-
-// typeErrorCause defines an interface to determine the reason for a type
-// error. The type error details implement this interface so that type checking
-// can report more actionable errors.
-type typeErrorCause interface {
- nilType() bool
-}
-
-func causedByNilType(err *Error) bool {
- cause, ok := err.Details.(typeErrorCause)
- if !ok {
- return false
- }
- return cause.nilType()
-}
-
-// ArgErrDetail represents a generic argument error.
-type ArgErrDetail struct {
- Have []types.Type `json:"have"`
- Want types.FuncArgs `json:"want"`
-}
-
-// Lines returns the string representation of the detail.
-func (d *ArgErrDetail) Lines() []string {
- lines := make([]string, 2)
- lines[0] = "have: " + formatArgs(d.Have)
- lines[1] = "want: " + fmt.Sprint(d.Want)
- return lines
-}
-
-func (d *ArgErrDetail) nilType() bool {
- for i := range d.Have {
- if types.Nil(d.Have[i]) {
- return true
- }
- }
- return false
-}
-
// UnificationErrDetail describes a type mismatch error when two values are
// unified (e.g., x = [1,2,y]).
-type UnificationErrDetail struct {
- Left types.Type `json:"a"`
- Right types.Type `json:"b"`
-}
-
-func (a *UnificationErrDetail) nilType() bool {
- return types.Nil(a.Left) || types.Nil(a.Right)
-}
-
-// Lines returns the string representation of the detail.
-func (a *UnificationErrDetail) Lines() []string {
- lines := make([]string, 2)
- lines[0] = fmt.Sprint("left : ", types.Sprint(a.Left))
- lines[1] = fmt.Sprint("right : ", types.Sprint(a.Right))
- return lines
-}
+type UnificationErrDetail = v1.UnificationErrDetail
// RefErrUnsupportedDetail describes an undefined reference error where the
// referenced value does not support dereferencing (e.g., scalars).
-type RefErrUnsupportedDetail struct {
- Ref Ref `json:"ref"` // invalid ref
- Pos int `json:"pos"` // invalid element
- Have types.Type `json:"have"` // referenced type
-}
-
-// Lines returns the string representation of the detail.
-func (r *RefErrUnsupportedDetail) Lines() []string {
- lines := []string{
- r.Ref.String(),
- strings.Repeat("^", len(r.Ref[:r.Pos+1].String())),
- fmt.Sprintf("have: %v", r.Have),
- }
- return lines
-}
+type RefErrUnsupportedDetail = v1.RefErrUnsupportedDetail
// RefErrInvalidDetail describes an undefined reference error where the referenced
// value does not support the reference operand (e.g., missing object key,
// invalid key type, etc.)
-type RefErrInvalidDetail struct {
- Ref Ref `json:"ref"` // invalid ref
- Pos int `json:"pos"` // invalid element
- Have types.Type `json:"have,omitempty"` // type of invalid element (for var/ref elements)
- Want types.Type `json:"want"` // allowed type (for non-object values)
- OneOf []Value `json:"oneOf"` // allowed values (e.g., for object keys)
-}
-
-// Lines returns the string representation of the detail.
-func (r *RefErrInvalidDetail) Lines() []string {
- lines := []string{r.Ref.String()}
- offset := len(r.Ref[:r.Pos].String()) + 1
- pad := strings.Repeat(" ", offset)
- lines = append(lines, fmt.Sprintf("%s^", pad))
- if r.Have != nil {
- lines = append(lines, fmt.Sprintf("%shave (type): %v", pad, r.Have))
- } else {
- lines = append(lines, fmt.Sprintf("%shave: %v", pad, r.Ref[r.Pos]))
- }
- if len(r.OneOf) > 0 {
- lines = append(lines, fmt.Sprintf("%swant (one of): %v", pad, r.OneOf))
- } else {
- lines = append(lines, fmt.Sprintf("%swant (type): %v", pad, r.Want))
- }
- return lines
-}
-
-func formatArgs(args []types.Type) string {
- buf := make([]string, len(args))
- for i := range args {
- buf[i] = types.Sprint(args[i])
- }
- return "(" + strings.Join(buf, ", ") + ")"
-}
-
-func newRefErrInvalid(loc *Location, ref Ref, idx int, have, want types.Type, oneOf []Value) *Error {
- err := newRefError(loc, ref)
- err.Details = &RefErrInvalidDetail{
- Ref: ref,
- Pos: idx,
- Have: have,
- Want: want,
- OneOf: oneOf,
- }
- return err
-}
-
-func newRefErrUnsupported(loc *Location, ref Ref, idx int, have types.Type) *Error {
- err := newRefError(loc, ref)
- err.Details = &RefErrUnsupportedDetail{
- Ref: ref,
- Pos: idx,
- Have: have,
- }
- return err
-}
-
-func newRefError(loc *Location, ref Ref) *Error {
- return NewError(TypeErr, loc, "undefined ref: %v", ref)
-}
-
-func newArgError(loc *Location, builtinName Ref, msg string, have []types.Type, want types.FuncArgs) *Error {
- err := NewError(TypeErr, loc, "%v: %v", builtinName, msg)
- err.Details = &ArgErrDetail{
- Have: have,
- Want: want,
- }
- return err
-}
-
-func getOneOfForNode(node *typeTreeNode) (result []Value) {
- node.Children().Iter(func(k, _ util.T) bool {
- result = append(result, k.(Value))
- return false
- })
-
- sortValueSlice(result)
- return result
-}
-
-func getOneOfForType(tpe types.Type) (result []Value) {
- switch tpe := tpe.(type) {
- case *types.Object:
- for _, k := range tpe.Keys() {
- v, err := InterfaceToValue(k)
- if err != nil {
- panic(err)
- }
- result = append(result, v)
- }
-
- case types.Any:
- for _, object := range tpe {
- objRes := getOneOfForType(object)
- result = append(result, objRes...)
- }
- }
-
- result = removeDuplicate(result)
- sortValueSlice(result)
- return result
-}
-
-func sortValueSlice(sl []Value) {
- sort.Slice(sl, func(i, j int) bool {
- return sl[i].Compare(sl[j]) < 0
- })
-}
-
-func removeDuplicate(list []Value) []Value {
- seen := make(map[Value]bool)
- var newResult []Value
- for _, item := range list {
- if !seen[item] {
- newResult = append(newResult, item)
- seen[item] = true
- }
- }
- return newResult
-}
-
-func getArgTypes(env *TypeEnv, args []*Term) []types.Type {
- pre := make([]types.Type, len(args))
- for i := range args {
- pre[i] = env.Get(args[i])
- }
- return pre
-}
-
-// getPrefix returns the shortest prefix of ref that exists in env
-func getPrefix(env *TypeEnv, ref Ref) (Ref, types.Type) {
- if len(ref) == 1 {
- t := env.Get(ref)
- if t != nil {
- return ref, t
- }
- }
- for i := 1; i < len(ref); i++ {
- t := env.Get(ref[:i])
- if t != nil {
- return ref[:i], t
- }
- }
- return nil, nil
-}
-
-// override takes a type t and returns a type obtained from t where the path represented by ref within it has type o (overriding the original type of that path)
-func override(ref Ref, t types.Type, o types.Type, rule *Rule) (types.Type, *Error) {
- var newStaticProps []*types.StaticProperty
- obj, ok := t.(*types.Object)
- if !ok {
- newType, err := getObjectType(ref, o, rule, types.NewDynamicProperty(types.A, types.A))
- if err != nil {
- return nil, err
- }
- return newType, nil
- }
- found := false
- if ok {
- staticProps := obj.StaticProperties()
- for _, prop := range staticProps {
- valueCopy := prop.Value
- key, err := InterfaceToValue(prop.Key)
- if err != nil {
- return nil, NewError(TypeErr, rule.Location, "unexpected error in override: %s", err.Error())
- }
- if len(ref) > 0 && ref[0].Value.Compare(key) == 0 {
- found = true
- if len(ref) == 1 {
- valueCopy = o
- } else {
- newVal, err := override(ref[1:], valueCopy, o, rule)
- if err != nil {
- return nil, err
- }
- valueCopy = newVal
- }
- }
- newStaticProps = append(newStaticProps, types.NewStaticProperty(prop.Key, valueCopy))
- }
- }
-
- // ref[0] is not a top-level key in staticProps, so it must be added
- if !found {
- newType, err := getObjectType(ref, o, rule, obj.DynamicProperties())
- if err != nil {
- return nil, err
- }
- newStaticProps = append(newStaticProps, newType.StaticProperties()...)
- }
- return types.NewObject(newStaticProps, obj.DynamicProperties()), nil
-}
-
-func getKeys(ref Ref, rule *Rule) ([]interface{}, *Error) {
- keys := []interface{}{}
- for _, refElem := range ref {
- key, err := JSON(refElem.Value)
- if err != nil {
- return nil, NewError(TypeErr, rule.Location, "error getting key from value: %s", err.Error())
- }
- keys = append(keys, key)
- }
- return keys, nil
-}
-
-func getObjectTypeRec(keys []interface{}, o types.Type, d *types.DynamicProperty) *types.Object {
- if len(keys) == 1 {
- staticProps := []*types.StaticProperty{types.NewStaticProperty(keys[0], o)}
- return types.NewObject(staticProps, d)
- }
-
- staticProps := []*types.StaticProperty{types.NewStaticProperty(keys[0], getObjectTypeRec(keys[1:], o, d))}
- return types.NewObject(staticProps, d)
-}
-
-func getObjectType(ref Ref, o types.Type, rule *Rule, d *types.DynamicProperty) (*types.Object, *Error) {
- keys, err := getKeys(ref, rule)
- if err != nil {
- return nil, err
- }
- return getObjectTypeRec(keys, o, d), nil
-}
-
-func getRuleAnnotation(as *AnnotationSet, rule *Rule) (result []*SchemaAnnotation) {
-
- for _, x := range as.GetSubpackagesScope(rule.Module.Package.Path) {
- result = append(result, x.Schemas...)
- }
-
- if x := as.GetPackageScope(rule.Module.Package); x != nil {
- result = append(result, x.Schemas...)
- }
-
- if x := as.GetDocumentScope(rule.Ref().GroundPrefix()); x != nil {
- result = append(result, x.Schemas...)
- }
-
- for _, x := range as.GetRuleScope(rule) {
- result = append(result, x.Schemas...)
- }
-
- return result
-}
-
-func processAnnotation(ss *SchemaSet, annot *SchemaAnnotation, rule *Rule, allowNet []string) (types.Type, *Error) {
-
- var schema interface{}
-
- if annot.Schema != nil {
- if ss == nil {
- return nil, nil
- }
- schema = ss.Get(annot.Schema)
- if schema == nil {
- return nil, NewError(TypeErr, rule.Location, "undefined schema: %v", annot.Schema)
- }
- } else if annot.Definition != nil {
- schema = *annot.Definition
- }
-
- tpe, err := loadSchema(schema, allowNet)
- if err != nil {
- return nil, NewError(TypeErr, rule.Location, err.Error())
- }
-
- return tpe, nil
-}
-
-func errAnnotationRedeclared(a *Annotations, other *Location) *Error {
- return NewError(TypeErr, a.Location, "%v annotation redeclared: %v", a.Scope, other)
-}
+type RefErrInvalidDetail = v1.RefErrInvalidDetail
diff --git a/vendor/github.com/open-policy-agent/opa/ast/compare.go b/vendor/github.com/open-policy-agent/opa/ast/compare.go
index 3bb6f2a75d..5e617e992f 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/compare.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/compare.go
@@ -5,9 +5,7 @@
package ast
import (
- "encoding/json"
- "fmt"
- "math/big"
+ v1 "github.com/open-policy-agent/opa/v1/ast"
)
// Compare returns an integer indicating whether two AST values are less than,
@@ -36,361 +34,6 @@ import (
// Sets are considered equal if and only if the symmetric difference of a and b
// is empty.
// Other comparisons are consistent but not defined.
-func Compare(a, b interface{}) int {
-
- if t, ok := a.(*Term); ok {
- if t == nil {
- a = nil
- } else {
- a = t.Value
- }
- }
-
- if t, ok := b.(*Term); ok {
- if t == nil {
- b = nil
- } else {
- b = t.Value
- }
- }
-
- if a == nil {
- if b == nil {
- return 0
- }
- return -1
- }
- if b == nil {
- return 1
- }
-
- sortA := sortOrder(a)
- sortB := sortOrder(b)
-
- if sortA < sortB {
- return -1
- } else if sortB < sortA {
- return 1
- }
-
- switch a := a.(type) {
- case Null:
- return 0
- case Boolean:
- b := b.(Boolean)
- if a.Equal(b) {
- return 0
- }
- if !a {
- return -1
- }
- return 1
- case Number:
- if ai, err := json.Number(a).Int64(); err == nil {
- if bi, err := json.Number(b.(Number)).Int64(); err == nil {
- if ai == bi {
- return 0
- }
- if ai < bi {
- return -1
- }
- return 1
- }
- }
-
- // We use big.Rat for comparing big numbers.
- // It replaces big.Float due to following reason:
- // big.Float comes with a default precision of 64, and setting a
- // larger precision results in more memory being allocated
- // (regardless of the actual number we are parsing with SetString).
- //
- // Note: If we're so close to zero that big.Float says we are zero, do
- // *not* big.Rat).SetString on the original string it'll potentially
- // take very long.
- var bigA, bigB *big.Rat
- fa, ok := new(big.Float).SetString(string(a))
- if !ok {
- panic("illegal value")
- }
- if fa.IsInt() {
- if i, _ := fa.Int64(); i == 0 {
- bigA = new(big.Rat).SetInt64(0)
- }
- }
- if bigA == nil {
- bigA, ok = new(big.Rat).SetString(string(a))
- if !ok {
- panic("illegal value")
- }
- }
-
- fb, ok := new(big.Float).SetString(string(b.(Number)))
- if !ok {
- panic("illegal value")
- }
- if fb.IsInt() {
- if i, _ := fb.Int64(); i == 0 {
- bigB = new(big.Rat).SetInt64(0)
- }
- }
- if bigB == nil {
- bigB, ok = new(big.Rat).SetString(string(b.(Number)))
- if !ok {
- panic("illegal value")
- }
- }
-
- return bigA.Cmp(bigB)
- case String:
- b := b.(String)
- if a.Equal(b) {
- return 0
- }
- if a < b {
- return -1
- }
- return 1
- case Var:
- b := b.(Var)
- if a.Equal(b) {
- return 0
- }
- if a < b {
- return -1
- }
- return 1
- case Ref:
- b := b.(Ref)
- return termSliceCompare(a, b)
- case *Array:
- b := b.(*Array)
- return termSliceCompare(a.elems, b.elems)
- case *lazyObj:
- return Compare(a.force(), b)
- case *object:
- if x, ok := b.(*lazyObj); ok {
- b = x.force()
- }
- b := b.(*object)
- return a.Compare(b)
- case Set:
- b := b.(Set)
- return a.Compare(b)
- case *ArrayComprehension:
- b := b.(*ArrayComprehension)
- if cmp := Compare(a.Term, b.Term); cmp != 0 {
- return cmp
- }
- return Compare(a.Body, b.Body)
- case *ObjectComprehension:
- b := b.(*ObjectComprehension)
- if cmp := Compare(a.Key, b.Key); cmp != 0 {
- return cmp
- }
- if cmp := Compare(a.Value, b.Value); cmp != 0 {
- return cmp
- }
- return Compare(a.Body, b.Body)
- case *SetComprehension:
- b := b.(*SetComprehension)
- if cmp := Compare(a.Term, b.Term); cmp != 0 {
- return cmp
- }
- return Compare(a.Body, b.Body)
- case Call:
- b := b.(Call)
- return termSliceCompare(a, b)
- case *Expr:
- b := b.(*Expr)
- return a.Compare(b)
- case *SomeDecl:
- b := b.(*SomeDecl)
- return a.Compare(b)
- case *Every:
- b := b.(*Every)
- return a.Compare(b)
- case *With:
- b := b.(*With)
- return a.Compare(b)
- case Body:
- b := b.(Body)
- return a.Compare(b)
- case *Head:
- b := b.(*Head)
- return a.Compare(b)
- case *Rule:
- b := b.(*Rule)
- return a.Compare(b)
- case Args:
- b := b.(Args)
- return termSliceCompare(a, b)
- case *Import:
- b := b.(*Import)
- return a.Compare(b)
- case *Package:
- b := b.(*Package)
- return a.Compare(b)
- case *Annotations:
- b := b.(*Annotations)
- return a.Compare(b)
- case *Module:
- b := b.(*Module)
- return a.Compare(b)
- }
- panic(fmt.Sprintf("illegal value: %T", a))
-}
-
-type termSlice []*Term
-
-func (s termSlice) Less(i, j int) bool { return Compare(s[i].Value, s[j].Value) < 0 }
-func (s termSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x }
-func (s termSlice) Len() int { return len(s) }
-
-func sortOrder(x interface{}) int {
- switch x.(type) {
- case Null:
- return 0
- case Boolean:
- return 1
- case Number:
- return 2
- case String:
- return 3
- case Var:
- return 4
- case Ref:
- return 5
- case *Array:
- return 6
- case Object:
- return 7
- case Set:
- return 8
- case *ArrayComprehension:
- return 9
- case *ObjectComprehension:
- return 10
- case *SetComprehension:
- return 11
- case Call:
- return 12
- case Args:
- return 13
- case *Expr:
- return 100
- case *SomeDecl:
- return 101
- case *Every:
- return 102
- case *With:
- return 110
- case *Head:
- return 120
- case Body:
- return 200
- case *Rule:
- return 1000
- case *Import:
- return 1001
- case *Package:
- return 1002
- case *Annotations:
- return 1003
- case *Module:
- return 10000
- }
- panic(fmt.Sprintf("illegal value: %T", x))
-}
-
-func importsCompare(a, b []*Import) int {
- minLen := len(a)
- if len(b) < minLen {
- minLen = len(b)
- }
- for i := 0; i < minLen; i++ {
- if cmp := a[i].Compare(b[i]); cmp != 0 {
- return cmp
- }
- }
- if len(a) < len(b) {
- return -1
- }
- if len(b) < len(a) {
- return 1
- }
- return 0
-}
-
-func annotationsCompare(a, b []*Annotations) int {
- minLen := len(a)
- if len(b) < minLen {
- minLen = len(b)
- }
- for i := 0; i < minLen; i++ {
- if cmp := a[i].Compare(b[i]); cmp != 0 {
- return cmp
- }
- }
- if len(a) < len(b) {
- return -1
- }
- if len(b) < len(a) {
- return 1
- }
- return 0
-}
-
-func rulesCompare(a, b []*Rule) int {
- minLen := len(a)
- if len(b) < minLen {
- minLen = len(b)
- }
- for i := 0; i < minLen; i++ {
- if cmp := a[i].Compare(b[i]); cmp != 0 {
- return cmp
- }
- }
- if len(a) < len(b) {
- return -1
- }
- if len(b) < len(a) {
- return 1
- }
- return 0
-}
-
-func termSliceCompare(a, b []*Term) int {
- minLen := len(a)
- if len(b) < minLen {
- minLen = len(b)
- }
- for i := 0; i < minLen; i++ {
- if cmp := Compare(a[i], b[i]); cmp != 0 {
- return cmp
- }
- }
- if len(a) < len(b) {
- return -1
- } else if len(b) < len(a) {
- return 1
- }
- return 0
-}
-
-func withSliceCompare(a, b []*With) int {
- minLen := len(a)
- if len(b) < minLen {
- minLen = len(b)
- }
- for i := 0; i < minLen; i++ {
- if cmp := Compare(a[i], b[i]); cmp != 0 {
- return cmp
- }
- }
- if len(a) < len(b) {
- return -1
- } else if len(b) < len(a) {
- return 1
- }
- return 0
+func Compare(a, b any) int {
+ return v1.Compare(a, b)
}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/compile.go b/vendor/github.com/open-policy-agent/opa/ast/compile.go
index 9025f862b2..5a3daa910a 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/compile.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/compile.go
@@ -5,5882 +5,123 @@
package ast
import (
- "errors"
- "fmt"
- "io"
- "sort"
- "strconv"
- "strings"
-
- "github.com/open-policy-agent/opa/ast/location"
- "github.com/open-policy-agent/opa/internal/debug"
- "github.com/open-policy-agent/opa/internal/gojsonschema"
- "github.com/open-policy-agent/opa/metrics"
- "github.com/open-policy-agent/opa/types"
- "github.com/open-policy-agent/opa/util"
+ v1 "github.com/open-policy-agent/opa/v1/ast"
)
// CompileErrorLimitDefault is the default number errors a compiler will allow before
// exiting.
const CompileErrorLimitDefault = 10
-var errLimitReached = NewError(CompileErr, nil, "error limit reached")
-
// Compiler contains the state of a compilation process.
-type Compiler struct {
-
- // Errors contains errors that occurred during the compilation process.
- // If there are one or more errors, the compilation process is considered
- // "failed".
- Errors Errors
-
- // Modules contains the compiled modules. The compiled modules are the
- // output of the compilation process. If the compilation process failed,
- // there is no guarantee about the state of the modules.
- Modules map[string]*Module
-
- // ModuleTree organizes the modules into a tree where each node is keyed by
- // an element in the module's package path. E.g., given modules containing
- // the following package directives: "a", "a.b", "a.c", and "a.b", the
- // resulting module tree would be:
- //
- // root
- // |
- // +--- data (no modules)
- // |
- // +--- a (1 module)
- // |
- // +--- b (2 modules)
- // |
- // +--- c (1 module)
- //
- ModuleTree *ModuleTreeNode
-
- // RuleTree organizes rules into a tree where each node is keyed by an
- // element in the rule's path. The rule path is the concatenation of the
- // containing package and the stringified rule name. E.g., given the
- // following module:
- //
- // package ex
- // p[1] { true }
- // p[2] { true }
- // q = true
- // a.b.c = 3
- //
- // root
- // |
- // +--- data (no rules)
- // |
- // +--- ex (no rules)
- // |
- // +--- p (2 rules)
- // |
- // +--- q (1 rule)
- // |
- // +--- a
- // |
- // +--- b
- // |
- // +--- c (1 rule)
- //
- // Another example with general refs containing vars at arbitrary locations:
- //
- // package ex
- // a.b[x].d { x := "c" } # R1
- // a.b.c[x] { x := "d" } # R2
- // a.b[x][y] { x := "c"; y := "d" } # R3
- // p := true # R4
- //
- // root
- // |
- // +--- data (no rules)
- // |
- // +--- ex (no rules)
- // |
- // +--- a
- // | |
- // | +--- b (R1, R3)
- // | |
- // | +--- c (R2)
- // |
- // +--- p (R4)
- RuleTree *TreeNode
-
- // Graph contains dependencies between rules. An edge (u,v) is added to the
- // graph if rule 'u' refers to the virtual document defined by 'v'.
- Graph *Graph
-
- // TypeEnv holds type information for values inferred by the compiler.
- TypeEnv *TypeEnv
-
- // RewrittenVars is a mapping of variables that have been rewritten
- // with the key being the generated name and value being the original.
- RewrittenVars map[Var]Var
-
- // Capabliities required by the modules that were compiled.
- Required *Capabilities
-
- localvargen *localVarGenerator
- moduleLoader ModuleLoader
- ruleIndices *util.HashMap
- stages []stage
- maxErrs int
- sorted []string // list of sorted module names
- pathExists func([]string) (bool, error)
- after map[string][]CompilerStageDefinition
- metrics metrics.Metrics
- capabilities *Capabilities // user-supplied capabilities
- imports map[string][]*Import // saved imports from stripping
- builtins map[string]*Builtin // universe of built-in functions
- customBuiltins map[string]*Builtin // user-supplied custom built-in functions (deprecated: use capabilities)
- unsafeBuiltinsMap map[string]struct{} // user-supplied set of unsafe built-ins functions to block (deprecated: use capabilities)
- deprecatedBuiltinsMap map[string]struct{} // set of deprecated, but not removed, built-in functions
- enablePrintStatements bool // indicates if print statements should be elided (default)
- comprehensionIndices map[*Term]*ComprehensionIndex // comprehension key index
- initialized bool // indicates if init() has been called
- debug debug.Debug // emits debug information produced during compilation
- schemaSet *SchemaSet // user-supplied schemas for input and data documents
- inputType types.Type // global input type retrieved from schema set
- annotationSet *AnnotationSet // hierarchical set of annotations
- strict bool // enforce strict compilation checks
- keepModules bool // whether to keep the unprocessed, parse modules (below)
- parsedModules map[string]*Module // parsed, but otherwise unprocessed modules, kept track of when keepModules is true
- useTypeCheckAnnotations bool // whether to provide annotated information (schemas) to the type checker
- allowUndefinedFuncCalls bool // don't error on calls to unknown functions.
- evalMode CompilerEvalMode //
- rewriteTestRulesForTracing bool // rewrite test rules to capture dynamic values for tracing.
-}
+type Compiler = v1.Compiler
// CompilerStage defines the interface for stages in the compiler.
-type CompilerStage func(*Compiler) *Error
+type CompilerStage = v1.CompilerStage
// CompilerEvalMode allows toggling certain stages that are only
// needed for certain modes, Concretely, only "topdown" mode will
// have the compiler build comprehension and rule indices.
-type CompilerEvalMode int
+type CompilerEvalMode = v1.CompilerEvalMode
const (
// EvalModeTopdown (default) instructs the compiler to build rule
// and comprehension indices used by topdown evaluation.
- EvalModeTopdown CompilerEvalMode = iota
+ EvalModeTopdown = v1.EvalModeTopdown
// EvalModeIR makes the compiler skip the stages for comprehension
// and rule indices.
- EvalModeIR
+ EvalModeIR = v1.EvalModeIR
)
// CompilerStageDefinition defines a compiler stage
-type CompilerStageDefinition struct {
- Name string
- MetricName string
- Stage CompilerStage
-}
+type CompilerStageDefinition = v1.CompilerStageDefinition
// RulesOptions defines the options for retrieving rules by Ref from the
// compiler.
-type RulesOptions struct {
- // IncludeHiddenModules determines if the result contains hidden modules,
- // currently only the "system" namespace, i.e. "data.system.*".
- IncludeHiddenModules bool
-}
+type RulesOptions = v1.RulesOptions
// QueryContext contains contextual information for running an ad-hoc query.
//
// Ad-hoc queries can be run in the context of a package and imports may be
// included to provide concise access to data.
-type QueryContext struct {
- Package *Package
- Imports []*Import
-}
+type QueryContext = v1.QueryContext
// NewQueryContext returns a new QueryContext object.
func NewQueryContext() *QueryContext {
- return &QueryContext{}
-}
-
-// WithPackage sets the pkg on qc.
-func (qc *QueryContext) WithPackage(pkg *Package) *QueryContext {
- if qc == nil {
- qc = NewQueryContext()
- }
- qc.Package = pkg
- return qc
-}
-
-// WithImports sets the imports on qc.
-func (qc *QueryContext) WithImports(imports []*Import) *QueryContext {
- if qc == nil {
- qc = NewQueryContext()
- }
- qc.Imports = imports
- return qc
-}
-
-// Copy returns a deep copy of qc.
-func (qc *QueryContext) Copy() *QueryContext {
- if qc == nil {
- return nil
- }
- cpy := *qc
- if cpy.Package != nil {
- cpy.Package = qc.Package.Copy()
- }
- cpy.Imports = make([]*Import, len(qc.Imports))
- for i := range qc.Imports {
- cpy.Imports[i] = qc.Imports[i].Copy()
- }
- return &cpy
+ return v1.NewQueryContext()
}
// QueryCompiler defines the interface for compiling ad-hoc queries.
-type QueryCompiler interface {
-
- // Compile should be called to compile ad-hoc queries. The return value is
- // the compiled version of the query.
- Compile(q Body) (Body, error)
-
- // TypeEnv returns the type environment built after running type checking
- // on the query.
- TypeEnv() *TypeEnv
-
- // WithContext sets the QueryContext on the QueryCompiler. Subsequent calls
- // to Compile will take the QueryContext into account.
- WithContext(qctx *QueryContext) QueryCompiler
-
- // WithEnablePrintStatements enables print statements in queries compiled
- // with the QueryCompiler.
- WithEnablePrintStatements(yes bool) QueryCompiler
-
- // WithUnsafeBuiltins sets the built-in functions to treat as unsafe and not
- // allow inside of queries. By default the query compiler inherits the
- // compiler's unsafe built-in functions. This function allows callers to
- // override that set. If an empty (non-nil) map is provided, all built-ins
- // are allowed.
- WithUnsafeBuiltins(unsafe map[string]struct{}) QueryCompiler
-
- // WithStageAfter registers a stage to run during query compilation after
- // the named stage.
- WithStageAfter(after string, stage QueryCompilerStageDefinition) QueryCompiler
-
- // RewrittenVars maps generated vars in the compiled query to vars from the
- // parsed query. For example, given the query "input := 1" the rewritten
- // query would be "__local0__ = 1". The mapping would then be {__local0__: input}.
- RewrittenVars() map[Var]Var
-
- // ComprehensionIndex returns an index data structure for the given comprehension
- // term. If no index is found, returns nil.
- ComprehensionIndex(term *Term) *ComprehensionIndex
-
- // WithStrict enables strict mode for the query compiler.
- WithStrict(strict bool) QueryCompiler
-}
+type QueryCompiler = v1.QueryCompiler
// QueryCompilerStage defines the interface for stages in the query compiler.
-type QueryCompilerStage func(QueryCompiler, Body) (Body, error)
+type QueryCompilerStage = v1.QueryCompilerStage
// QueryCompilerStageDefinition defines a QueryCompiler stage
-type QueryCompilerStageDefinition struct {
- Name string
- MetricName string
- Stage QueryCompilerStage
-}
-
-type stage struct {
- name string
- metricName string
- f func()
-}
+type QueryCompilerStageDefinition = v1.QueryCompilerStageDefinition
// NewCompiler returns a new empty compiler.
func NewCompiler() *Compiler {
-
- c := &Compiler{
- Modules: map[string]*Module{},
- RewrittenVars: map[Var]Var{},
- Required: &Capabilities{},
- ruleIndices: util.NewHashMap(func(a, b util.T) bool {
- r1, r2 := a.(Ref), b.(Ref)
- return r1.Equal(r2)
- }, func(x util.T) int {
- return x.(Ref).Hash()
- }),
- maxErrs: CompileErrorLimitDefault,
- after: map[string][]CompilerStageDefinition{},
- unsafeBuiltinsMap: map[string]struct{}{},
- deprecatedBuiltinsMap: map[string]struct{}{},
- comprehensionIndices: map[*Term]*ComprehensionIndex{},
- debug: debug.Discard(),
- }
-
- c.ModuleTree = NewModuleTree(nil)
- c.RuleTree = NewRuleTree(c.ModuleTree)
-
- c.stages = []stage{
- // Reference resolution should run first as it may be used to lazily
- // load additional modules. If any stages run before resolution, they
- // need to be re-run after resolution.
- {"ResolveRefs", "compile_stage_resolve_refs", c.resolveAllRefs},
- // The local variable generator must be initialized after references are
- // resolved and the dynamic module loader has run but before subsequent
- // stages that need to generate variables.
- {"InitLocalVarGen", "compile_stage_init_local_var_gen", c.initLocalVarGen},
- {"RewriteRuleHeadRefs", "compile_stage_rewrite_rule_head_refs", c.rewriteRuleHeadRefs},
- {"CheckKeywordOverrides", "compile_stage_check_keyword_overrides", c.checkKeywordOverrides},
- {"CheckDuplicateImports", "compile_stage_check_duplicate_imports", c.checkDuplicateImports},
- {"RemoveImports", "compile_stage_remove_imports", c.removeImports},
- {"SetModuleTree", "compile_stage_set_module_tree", c.setModuleTree},
- {"SetRuleTree", "compile_stage_set_rule_tree", c.setRuleTree}, // depends on RewriteRuleHeadRefs
- {"RewriteLocalVars", "compile_stage_rewrite_local_vars", c.rewriteLocalVars},
- {"CheckVoidCalls", "compile_stage_check_void_calls", c.checkVoidCalls},
- {"RewritePrintCalls", "compile_stage_rewrite_print_calls", c.rewritePrintCalls},
- {"RewriteExprTerms", "compile_stage_rewrite_expr_terms", c.rewriteExprTerms},
- {"ParseMetadataBlocks", "compile_stage_parse_metadata_blocks", c.parseMetadataBlocks},
- {"SetAnnotationSet", "compile_stage_set_annotationset", c.setAnnotationSet},
- {"RewriteRegoMetadataCalls", "compile_stage_rewrite_rego_metadata_calls", c.rewriteRegoMetadataCalls},
- {"SetGraph", "compile_stage_set_graph", c.setGraph},
- {"RewriteComprehensionTerms", "compile_stage_rewrite_comprehension_terms", c.rewriteComprehensionTerms},
- {"RewriteRefsInHead", "compile_stage_rewrite_refs_in_head", c.rewriteRefsInHead},
- {"RewriteWithValues", "compile_stage_rewrite_with_values", c.rewriteWithModifiers},
- {"CheckRuleConflicts", "compile_stage_check_rule_conflicts", c.checkRuleConflicts},
- {"CheckUndefinedFuncs", "compile_stage_check_undefined_funcs", c.checkUndefinedFuncs},
- {"CheckSafetyRuleHeads", "compile_stage_check_safety_rule_heads", c.checkSafetyRuleHeads},
- {"CheckSafetyRuleBodies", "compile_stage_check_safety_rule_bodies", c.checkSafetyRuleBodies},
- {"RewriteEquals", "compile_stage_rewrite_equals", c.rewriteEquals},
- {"RewriteDynamicTerms", "compile_stage_rewrite_dynamic_terms", c.rewriteDynamicTerms},
- {"RewriteTestRulesForTracing", "compile_stage_rewrite_test_rules_for_tracing", c.rewriteTestRuleEqualities}, // must run after RewriteDynamicTerms
- {"CheckRecursion", "compile_stage_check_recursion", c.checkRecursion},
- {"CheckTypes", "compile_stage_check_types", c.checkTypes}, // must be run after CheckRecursion
- {"CheckUnsafeBuiltins", "compile_state_check_unsafe_builtins", c.checkUnsafeBuiltins},
- {"CheckDeprecatedBuiltins", "compile_state_check_deprecated_builtins", c.checkDeprecatedBuiltins},
- {"BuildRuleIndices", "compile_stage_rebuild_indices", c.buildRuleIndices},
- {"BuildComprehensionIndices", "compile_stage_rebuild_comprehension_indices", c.buildComprehensionIndices},
- {"BuildRequiredCapabilities", "compile_stage_build_required_capabilities", c.buildRequiredCapabilities},
- }
-
- return c
-}
-
-// SetErrorLimit sets the number of errors the compiler can encounter before it
-// quits. Zero or a negative number indicates no limit.
-func (c *Compiler) SetErrorLimit(limit int) *Compiler {
- c.maxErrs = limit
- return c
-}
-
-// WithEnablePrintStatements enables print statements inside of modules compiled
-// by the compiler. If print statements are not enabled, calls to print() are
-// erased at compile-time.
-func (c *Compiler) WithEnablePrintStatements(yes bool) *Compiler {
- c.enablePrintStatements = yes
- return c
-}
-
-// WithPathConflictsCheck enables base-virtual document conflict
-// detection. The compiler will check that rules don't overlap with
-// paths that exist as determined by the provided callable.
-func (c *Compiler) WithPathConflictsCheck(fn func([]string) (bool, error)) *Compiler {
- c.pathExists = fn
- return c
-}
-
-// WithStageAfter registers a stage to run during compilation after
-// the named stage.
-func (c *Compiler) WithStageAfter(after string, stage CompilerStageDefinition) *Compiler {
- c.after[after] = append(c.after[after], stage)
- return c
-}
-
-// WithMetrics will set a metrics.Metrics and be used for profiling
-// the Compiler instance.
-func (c *Compiler) WithMetrics(metrics metrics.Metrics) *Compiler {
- c.metrics = metrics
- return c
-}
-
-// WithCapabilities sets capabilities to enable during compilation. Capabilities allow the caller
-// to specify the set of built-in functions available to the policy. In the future, capabilities
-// may be able to restrict access to other language features. Capabilities allow callers to check
-// if policies are compatible with a particular version of OPA. If policies are a compiled for a
-// specific version of OPA, there is no guarantee that _this_ version of OPA can evaluate them
-// successfully.
-func (c *Compiler) WithCapabilities(capabilities *Capabilities) *Compiler {
- c.capabilities = capabilities
- return c
-}
-
-// Capabilities returns the capabilities enabled during compilation.
-func (c *Compiler) Capabilities() *Capabilities {
- return c.capabilities
-}
-
-// WithDebug sets where debug messages are written to. Passing `nil` has no
-// effect.
-func (c *Compiler) WithDebug(sink io.Writer) *Compiler {
- if sink != nil {
- c.debug = debug.New(sink)
- }
- return c
-}
-
-// WithBuiltins is deprecated. Use WithCapabilities instead.
-func (c *Compiler) WithBuiltins(builtins map[string]*Builtin) *Compiler {
- c.customBuiltins = make(map[string]*Builtin)
- for k, v := range builtins {
- c.customBuiltins[k] = v
- }
- return c
-}
-
-// WithUnsafeBuiltins is deprecated. Use WithCapabilities instead.
-func (c *Compiler) WithUnsafeBuiltins(unsafeBuiltins map[string]struct{}) *Compiler {
- for name := range unsafeBuiltins {
- c.unsafeBuiltinsMap[name] = struct{}{}
- }
- return c
-}
-
-// WithStrict enables strict mode in the compiler.
-func (c *Compiler) WithStrict(strict bool) *Compiler {
- c.strict = strict
- return c
-}
-
-// WithKeepModules enables retaining unprocessed modules in the compiler.
-// Note that the modules aren't copied on the way in or out -- so when
-// accessing them via ParsedModules(), mutations will occur in the module
-// map that was passed into Compile().`
-func (c *Compiler) WithKeepModules(y bool) *Compiler {
- c.keepModules = y
- return c
-}
-
-// WithUseTypeCheckAnnotations use schema annotations during type checking
-func (c *Compiler) WithUseTypeCheckAnnotations(enabled bool) *Compiler {
- c.useTypeCheckAnnotations = enabled
- return c
-}
-
-func (c *Compiler) WithAllowUndefinedFunctionCalls(allow bool) *Compiler {
- c.allowUndefinedFuncCalls = allow
- return c
-}
-
-// WithEvalMode allows setting the CompilerEvalMode of the compiler
-func (c *Compiler) WithEvalMode(e CompilerEvalMode) *Compiler {
- c.evalMode = e
- return c
-}
-
-// WithRewriteTestRules enables rewriting test rules to capture dynamic values in local variables,
-// so they can be accessed by tracing.
-func (c *Compiler) WithRewriteTestRules(rewrite bool) *Compiler {
- c.rewriteTestRulesForTracing = rewrite
- return c
-}
-
-// ParsedModules returns the parsed, unprocessed modules from the compiler.
-// It is `nil` if keeping modules wasn't enabled via `WithKeepModules(true)`.
-// The map includes all modules loaded via the ModuleLoader, if one was used.
-func (c *Compiler) ParsedModules() map[string]*Module {
- return c.parsedModules
-}
-
-func (c *Compiler) QueryCompiler() QueryCompiler {
- c.init()
- c0 := *c
- return newQueryCompiler(&c0)
-}
-
-// Compile runs the compilation process on the input modules. The compiled
-// version of the modules and associated data structures are stored on the
-// compiler. If the compilation process fails for any reason, the compiler will
-// contain a slice of errors.
-func (c *Compiler) Compile(modules map[string]*Module) {
-
- c.init()
-
- c.Modules = make(map[string]*Module, len(modules))
- c.sorted = make([]string, 0, len(modules))
-
- if c.keepModules {
- c.parsedModules = make(map[string]*Module, len(modules))
- } else {
- c.parsedModules = nil
- }
-
- for k, v := range modules {
- c.Modules[k] = v.Copy()
- c.sorted = append(c.sorted, k)
- if c.parsedModules != nil {
- c.parsedModules[k] = v
- }
- }
-
- sort.Strings(c.sorted)
-
- c.compile()
-}
-
-// WithSchemas sets a schemaSet to the compiler
-func (c *Compiler) WithSchemas(schemas *SchemaSet) *Compiler {
- c.schemaSet = schemas
- return c
-}
-
-// Failed returns true if a compilation error has been encountered.
-func (c *Compiler) Failed() bool {
- return len(c.Errors) > 0
-}
-
-// ComprehensionIndex returns a data structure specifying how to index comprehension
-// results so that callers do not have to recompute the comprehension more than once.
-// If no index is found, returns nil.
-func (c *Compiler) ComprehensionIndex(term *Term) *ComprehensionIndex {
- return c.comprehensionIndices[term]
-}
-
-// GetArity returns the number of args a function referred to by ref takes. If
-// ref refers to built-in function, the built-in declaration is consulted,
-// otherwise, the ref is used to perform a ruleset lookup.
-func (c *Compiler) GetArity(ref Ref) int {
- if bi := c.builtins[ref.String()]; bi != nil {
- return len(bi.Decl.FuncArgs().Args)
- }
- rules := c.GetRulesExact(ref)
- if len(rules) == 0 {
- return -1
- }
- return len(rules[0].Head.Args)
-}
-
-// GetRulesExact returns a slice of rules referred to by the reference.
-//
-// E.g., given the following module:
-//
-// package a.b.c
-//
-// p[k] = v { ... } # rule1
-// p[k1] = v1 { ... } # rule2
-//
-// The following calls yield the rules on the right.
-//
-// GetRulesExact("data.a.b.c.p") => [rule1, rule2]
-// GetRulesExact("data.a.b.c.p.x") => nil
-// GetRulesExact("data.a.b.c") => nil
-func (c *Compiler) GetRulesExact(ref Ref) (rules []*Rule) {
- node := c.RuleTree
-
- for _, x := range ref {
- if node = node.Child(x.Value); node == nil {
- return nil
- }
- }
-
- return extractRules(node.Values)
-}
-
-// GetRulesForVirtualDocument returns a slice of rules that produce the virtual
-// document referred to by the reference.
-//
-// E.g., given the following module:
-//
-// package a.b.c
-//
-// p[k] = v { ... } # rule1
-// p[k1] = v1 { ... } # rule2
-//
-// The following calls yield the rules on the right.
-//
-// GetRulesForVirtualDocument("data.a.b.c.p") => [rule1, rule2]
-// GetRulesForVirtualDocument("data.a.b.c.p.x") => [rule1, rule2]
-// GetRulesForVirtualDocument("data.a.b.c") => nil
-func (c *Compiler) GetRulesForVirtualDocument(ref Ref) (rules []*Rule) {
-
- node := c.RuleTree
-
- for _, x := range ref {
- if node = node.Child(x.Value); node == nil {
- return nil
- }
- if len(node.Values) > 0 {
- return extractRules(node.Values)
- }
- }
-
- return extractRules(node.Values)
-}
-
-// GetRulesWithPrefix returns a slice of rules that share the prefix ref.
-//
-// E.g., given the following module:
-//
-// package a.b.c
-//
-// p[x] = y { ... } # rule1
-// p[k] = v { ... } # rule2
-// q { ... } # rule3
-//
-// The following calls yield the rules on the right.
-//
-// GetRulesWithPrefix("data.a.b.c.p") => [rule1, rule2]
-// GetRulesWithPrefix("data.a.b.c.p.a") => nil
-// GetRulesWithPrefix("data.a.b.c") => [rule1, rule2, rule3]
-func (c *Compiler) GetRulesWithPrefix(ref Ref) (rules []*Rule) {
-
- node := c.RuleTree
-
- for _, x := range ref {
- if node = node.Child(x.Value); node == nil {
- return nil
- }
- }
-
- var acc func(node *TreeNode)
-
- acc = func(node *TreeNode) {
- rules = append(rules, extractRules(node.Values)...)
- for _, child := range node.Children {
- if child.Hide {
- continue
- }
- acc(child)
- }
- }
-
- acc(node)
-
- return rules
-}
-
-func extractRules(s []util.T) []*Rule {
- rules := make([]*Rule, len(s))
- for i := range s {
- rules[i] = s[i].(*Rule)
- }
- return rules
-}
-
-// GetRules returns a slice of rules that are referred to by ref.
-//
-// E.g., given the following module:
-//
-// package a.b.c
-//
-// p[x] = y { q[x] = y; ... } # rule1
-// q[x] = y { ... } # rule2
-//
-// The following calls yield the rules on the right.
-//
-// GetRules("data.a.b.c.p") => [rule1]
-// GetRules("data.a.b.c.p.x") => [rule1]
-// GetRules("data.a.b.c.q") => [rule2]
-// GetRules("data.a.b.c") => [rule1, rule2]
-// GetRules("data.a.b.d") => nil
-func (c *Compiler) GetRules(ref Ref) (rules []*Rule) {
-
- set := map[*Rule]struct{}{}
-
- for _, rule := range c.GetRulesForVirtualDocument(ref) {
- set[rule] = struct{}{}
- }
-
- for _, rule := range c.GetRulesWithPrefix(ref) {
- set[rule] = struct{}{}
- }
-
- for rule := range set {
- rules = append(rules, rule)
- }
-
- return rules
-}
-
-// GetRulesDynamic returns a slice of rules that could be referred to by a ref.
-//
-// Deprecated: use GetRulesDynamicWithOpts
-func (c *Compiler) GetRulesDynamic(ref Ref) []*Rule {
- return c.GetRulesDynamicWithOpts(ref, RulesOptions{})
-}
-
-// GetRulesDynamicWithOpts returns a slice of rules that could be referred to by
-// a ref.
-// When parts of the ref are statically known, we use that information to narrow
-// down which rules the ref could refer to, but in the most general case this
-// will be an over-approximation.
-//
-// E.g., given the following modules:
-//
-// package a.b.c
-//
-// r1 = 1 # rule1
-//
-// and:
-//
-// package a.d.c
-//
-// r2 = 2 # rule2
-//
-// The following calls yield the rules on the right.
-//
-// GetRulesDynamicWithOpts("data.a[x].c[y]", opts) => [rule1, rule2]
-// GetRulesDynamicWithOpts("data.a[x].c.r2", opts) => [rule2]
-// GetRulesDynamicWithOpts("data.a.b[x][y]", opts) => [rule1]
-//
-// Using the RulesOptions parameter, the inclusion of hidden modules can be
-// controlled:
-//
-// With
-//
-// package system.main
-//
-// r3 = 3 # rule3
-//
-// We'd get this result:
-//
-// GetRulesDynamicWithOpts("data[x]", RulesOptions{IncludeHiddenModules: true}) => [rule1, rule2, rule3]
-//
-// Without the options, it would be excluded.
-func (c *Compiler) GetRulesDynamicWithOpts(ref Ref, opts RulesOptions) []*Rule {
- node := c.RuleTree
-
- set := map[*Rule]struct{}{}
- var walk func(node *TreeNode, i int)
- walk = func(node *TreeNode, i int) {
- switch {
- case i >= len(ref):
- // We've reached the end of the reference and want to collect everything
- // under this "prefix".
- node.DepthFirst(func(descendant *TreeNode) bool {
- insertRules(set, descendant.Values)
- if opts.IncludeHiddenModules {
- return false
- }
- return descendant.Hide
- })
-
- case i == 0 || IsConstant(ref[i].Value):
- // The head of the ref is always grounded. In case another part of the
- // ref is also grounded, we can lookup the exact child. If it's not found
- // we can immediately return...
- if child := node.Child(ref[i].Value); child != nil {
- if len(child.Values) > 0 {
- // Add any rules at this position
- insertRules(set, child.Values)
- }
- // There might still be "sub-rules" contributing key-value "overrides" for e.g. partial object rules, continue walking
- walk(child, i+1)
- } else {
- return
- }
-
- default:
- // This part of the ref is a dynamic term. We can't know what it refers
- // to and will just need to try all of the children.
- for _, child := range node.Children {
- if child.Hide && !opts.IncludeHiddenModules {
- continue
- }
- insertRules(set, child.Values)
- walk(child, i+1)
- }
- }
- }
-
- walk(node, 0)
- rules := make([]*Rule, 0, len(set))
- for rule := range set {
- rules = append(rules, rule)
- }
- return rules
-}
-
-// Utility: add all rule values to the set.
-func insertRules(set map[*Rule]struct{}, rules []util.T) {
- for _, rule := range rules {
- set[rule.(*Rule)] = struct{}{}
- }
-}
-
-// RuleIndex returns a RuleIndex built for the rule set referred to by path.
-// The path must refer to the rule set exactly, i.e., given a rule set at path
-// data.a.b.c.p, refs data.a.b.c.p.x and data.a.b.c would not return a
-// RuleIndex built for the rule.
-func (c *Compiler) RuleIndex(path Ref) RuleIndex {
- r, ok := c.ruleIndices.Get(path)
- if !ok {
- return nil
- }
- return r.(RuleIndex)
-}
-
-// PassesTypeCheck determines whether the given body passes type checking
-func (c *Compiler) PassesTypeCheck(body Body) bool {
- checker := newTypeChecker().WithSchemaSet(c.schemaSet).WithInputType(c.inputType)
- env := c.TypeEnv
- _, errs := checker.CheckBody(env, body)
- return len(errs) == 0
-}
-
-// PassesTypeCheckRules determines whether the given rules passes type checking
-func (c *Compiler) PassesTypeCheckRules(rules []*Rule) Errors {
- elems := []util.T{}
-
- for _, rule := range rules {
- elems = append(elems, rule)
- }
-
- // Load the global input schema if one was provided.
- if c.schemaSet != nil {
- if schema := c.schemaSet.Get(SchemaRootRef); schema != nil {
-
- var allowNet []string
- if c.capabilities != nil {
- allowNet = c.capabilities.AllowNet
- }
-
- tpe, err := loadSchema(schema, allowNet)
- if err != nil {
- return Errors{NewError(TypeErr, nil, err.Error())}
- }
- c.inputType = tpe
- }
- }
-
- var as *AnnotationSet
- if c.useTypeCheckAnnotations {
- as = c.annotationSet
- }
-
- checker := newTypeChecker().WithSchemaSet(c.schemaSet).WithInputType(c.inputType)
-
- if c.TypeEnv == nil {
- if c.capabilities == nil {
- c.capabilities = CapabilitiesForThisVersion()
- }
-
- c.builtins = make(map[string]*Builtin, len(c.capabilities.Builtins)+len(c.customBuiltins))
-
- for _, bi := range c.capabilities.Builtins {
- c.builtins[bi.Name] = bi
- }
-
- for name, bi := range c.customBuiltins {
- c.builtins[name] = bi
- }
-
- c.TypeEnv = checker.Env(c.builtins)
- }
-
- _, errs := checker.CheckTypes(c.TypeEnv, elems, as)
- return errs
+ return v1.NewCompiler().WithDefaultRegoVersion(DefaultRegoVersion)
}
// ModuleLoader defines the interface that callers can implement to enable lazy
// loading of modules during compilation.
-type ModuleLoader func(resolved map[string]*Module) (parsed map[string]*Module, err error)
-
-// WithModuleLoader sets f as the ModuleLoader on the compiler.
-//
-// The compiler will invoke the ModuleLoader after resolving all references in
-// the current set of input modules. The ModuleLoader can return a new
-// collection of parsed modules that are to be included in the compilation
-// process. This process will repeat until the ModuleLoader returns an empty
-// collection or an error. If an error is returned, compilation will stop
-// immediately.
-func (c *Compiler) WithModuleLoader(f ModuleLoader) *Compiler {
- c.moduleLoader = f
- return c
-}
-
-func (c *Compiler) counterAdd(name string, n uint64) {
- if c.metrics == nil {
- return
- }
- c.metrics.Counter(name).Add(n)
-}
-
-func (c *Compiler) buildRuleIndices() {
-
- c.RuleTree.DepthFirst(func(node *TreeNode) bool {
- if len(node.Values) == 0 {
- return false
- }
- rules := extractRules(node.Values)
- hasNonGroundRef := false
- for _, r := range rules {
- hasNonGroundRef = !r.Head.Ref().IsGround()
- }
- if hasNonGroundRef {
- // Collect children to ensure that all rules within the extent of a rule with a general ref
- // are found on the same index. E.g. the following rules should be indexed under data.a.b.c:
- //
- // package a
- // b.c[x].e := 1 { x := input.x }
- // b.c.d := 2
- // b.c.d2.e[x] := 3 { x := input.x }
- for _, child := range node.Children {
- child.DepthFirst(func(c *TreeNode) bool {
- rules = append(rules, extractRules(c.Values)...)
- return false
- })
- }
- }
-
- index := newBaseDocEqIndex(func(ref Ref) bool {
- return isVirtual(c.RuleTree, ref.GroundPrefix())
- })
- if index.Build(rules) {
- c.ruleIndices.Put(rules[0].Ref().GroundPrefix(), index)
- }
- return hasNonGroundRef // currently, we don't allow those branches to go deeper
- })
-
-}
-
-func (c *Compiler) buildComprehensionIndices() {
- for _, name := range c.sorted {
- WalkRules(c.Modules[name], func(r *Rule) bool {
- candidates := r.Head.Args.Vars()
- candidates.Update(ReservedVars)
- n := buildComprehensionIndices(c.debug, c.GetArity, candidates, c.RewrittenVars, r.Body, c.comprehensionIndices)
- c.counterAdd(compileStageComprehensionIndexBuild, n)
- return false
- })
- }
-}
+type ModuleLoader = v1.ModuleLoader
-// buildRequiredCapabilities updates the required capabilities on the compiler
-// to include any keyword and feature dependencies present in the modules. The
-// built-in function dependencies will have already been added by the type
-// checker.
-func (c *Compiler) buildRequiredCapabilities() {
-
- features := map[string]struct{}{}
-
- // extract required keywords from modules
- keywords := map[string]struct{}{}
- futureKeywordsPrefix := Ref{FutureRootDocument, StringTerm("keywords")}
- for _, name := range c.sorted {
- for _, imp := range c.imports[name] {
- path := imp.Path.Value.(Ref)
- switch {
- case path.Equal(RegoV1CompatibleRef):
- features[FeatureRegoV1Import] = struct{}{}
- case path.HasPrefix(futureKeywordsPrefix):
- if len(path) == 2 {
- for kw := range futureKeywords {
- keywords[kw] = struct{}{}
- }
- } else {
- keywords[string(path[2].Value.(String))] = struct{}{}
- }
- }
- }
- }
-
- c.Required.FutureKeywords = stringMapToSortedSlice(keywords)
-
- // extract required features from modules
-
- for _, name := range c.sorted {
- for _, rule := range c.Modules[name].Rules {
- refLen := len(rule.Head.Reference)
- if refLen >= 3 {
- if refLen > len(rule.Head.Reference.ConstantPrefix()) {
- features[FeatureRefHeads] = struct{}{}
- } else {
- features[FeatureRefHeadStringPrefixes] = struct{}{}
- }
- }
- }
- }
-
- c.Required.Features = stringMapToSortedSlice(features)
-
- for i, bi := range c.Required.Builtins {
- c.Required.Builtins[i] = bi.Minimal()
- }
-}
-
-func stringMapToSortedSlice(xs map[string]struct{}) []string {
- if len(xs) == 0 {
- return nil
- }
- s := make([]string, 0, len(xs))
- for k := range xs {
- s = append(s, k)
- }
- sort.Strings(s)
- return s
-}
-
-// checkRecursion ensures that there are no recursive definitions, i.e., there are
-// no cycles in the Graph.
-func (c *Compiler) checkRecursion() {
- eq := func(a, b util.T) bool {
- return a.(*Rule) == b.(*Rule)
- }
-
- c.RuleTree.DepthFirst(func(node *TreeNode) bool {
- for _, rule := range node.Values {
- for node := rule.(*Rule); node != nil; node = node.Else {
- c.checkSelfPath(node.Loc(), eq, node, node)
- }
- }
- return false
- })
-}
-
-func (c *Compiler) checkSelfPath(loc *Location, eq func(a, b util.T) bool, a, b util.T) {
- tr := NewGraphTraversal(c.Graph)
- if p := util.DFSPath(tr, eq, a, b); len(p) > 0 {
- n := make([]string, 0, len(p))
- for _, x := range p {
- n = append(n, astNodeToString(x))
- }
- c.err(NewError(RecursionErr, loc, "rule %v is recursive: %v", astNodeToString(a), strings.Join(n, " -> ")))
- }
-}
-
-func astNodeToString(x interface{}) string {
- return x.(*Rule).Ref().String()
-}
-
-// checkRuleConflicts ensures that rules definitions are not in conflict.
-func (c *Compiler) checkRuleConflicts() {
- rw := rewriteVarsInRef(c.RewrittenVars)
-
- c.RuleTree.DepthFirst(func(node *TreeNode) bool {
- if len(node.Values) == 0 {
- return false // go deeper
- }
-
- kinds := make(map[RuleKind]struct{}, len(node.Values))
- defaultRules := 0
- completeRules := 0
- partialRules := 0
- arities := make(map[int]struct{}, len(node.Values))
- name := ""
- var conflicts []Ref
-
- for _, rule := range node.Values {
- r := rule.(*Rule)
- ref := r.Ref()
- name = rw(ref.Copy()).String() // varRewriter operates in-place
- kinds[r.Head.RuleKind()] = struct{}{}
- arities[len(r.Head.Args)] = struct{}{}
- if r.Default {
- defaultRules++
- }
-
- // Single-value rules may not have any other rules in their extent.
- // Rules with vars in their ref are allowed to have rules inside their extent.
- // Only the ground portion (terms before the first var term) of a rule's ref is considered when determining
- // whether it's inside the extent of another (c.RuleTree is organized this way already).
- // These pairs are invalid:
- //
- // data.p.q.r { true } # data.p.q is { "r": true }
- // data.p.q.r.s { true }
- //
- // data.p.q.r { true }
- // data.p.q.r[s].t { s = input.key }
- //
- // But this is allowed:
- //
- // data.p.q.r { true }
- // data.p.q[r].s.t { r = input.key }
- //
- // data.p[r] := x { r = input.key; x = input.bar }
- // data.p.q[r] := x { r = input.key; x = input.bar }
- //
- // data.p.q[r] { r := input.r }
- // data.p.q.r.s { true }
- //
- // data.p.q[r] = 1 { r := "r" }
- // data.p.q.s = 2
- //
- // data.p[q][r] { q := input.q; r := input.r }
- // data.p.q.r { true }
- //
- // data.p.q[r] { r := input.r }
- // data.p[q].r { q := input.q }
- //
- // data.p.q[r][s] { r := input.r; s := input.s }
- // data.p[q].r.s { q := input.q }
-
- if r.Ref().IsGround() && len(node.Children) > 0 {
- conflicts = node.flattenChildren()
- }
-
- if r.Head.RuleKind() == SingleValue && r.Head.Ref().IsGround() {
- completeRules++
- } else {
- partialRules++
- }
- }
-
- switch {
- case conflicts != nil:
- c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "rule %v conflicts with %v", name, conflicts))
-
- case len(kinds) > 1 || len(arities) > 1 || (completeRules >= 1 && partialRules >= 1):
- c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "conflicting rules %v found", name))
-
- case defaultRules > 1:
- c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "multiple default rules %s found", name))
- }
-
- return false
- })
+// SafetyCheckVisitorParams defines the AST visitor parameters to use for collecting
+// variables during the safety check. This has to be exported because it's relied on
+// by the copy propagation implementation in topdown.
+var SafetyCheckVisitorParams = v1.SafetyCheckVisitorParams
- if c.pathExists != nil {
- for _, err := range CheckPathConflicts(c, c.pathExists) {
- c.err(err)
- }
- }
+// ComprehensionIndex specifies how the comprehension term can be indexed. The keys
+// tell the evaluator what variables to use for indexing. In the future, the index
+// could be expanded with more information that would allow the evaluator to index
+// a larger fragment of comprehensions (e.g., by closing over variables in the outer
+// query.)
+type ComprehensionIndex = v1.ComprehensionIndex
- // NOTE(sr): depthfirst might better use sorted for stable errs?
- c.ModuleTree.DepthFirst(func(node *ModuleTreeNode) bool {
- for _, mod := range node.Modules {
- for _, rule := range mod.Rules {
- ref := rule.Head.Ref().GroundPrefix()
- // Rules with a dynamic portion in their ref are exempted, as a conflict within the dynamic portion
- // can only be detected at eval-time.
- if len(ref) < len(rule.Head.Ref()) {
- continue
- }
+// ModuleTreeNode represents a node in the module tree. The module
+// tree is keyed by the package path.
+type ModuleTreeNode = v1.ModuleTreeNode
- childNode, tail := node.find(ref)
- if childNode != nil && len(tail) == 0 {
- for _, childMod := range childNode.Modules {
- // Avoid recursively checking a module for equality unless we know it's a possible self-match.
- if childMod.Equal(mod) {
- continue // don't self-conflict
- }
- msg := fmt.Sprintf("%v conflicts with rule %v defined at %v", childMod.Package, rule.Head.Ref(), rule.Loc())
- c.err(NewError(TypeErr, mod.Package.Loc(), msg))
- }
- }
- }
- }
- return false
- })
-}
+// TreeNode represents a node in the rule tree. The rule tree is keyed by
+// rule path.
+type TreeNode = v1.TreeNode
-func (c *Compiler) checkUndefinedFuncs() {
- for _, name := range c.sorted {
- m := c.Modules[name]
- for _, err := range checkUndefinedFuncs(c.TypeEnv, m, c.GetArity, c.RewrittenVars) {
- c.err(err)
- }
- }
+// NewRuleTree returns a new TreeNode that represents the root
+// of the rule tree populated with the given rules.
+func NewRuleTree(mtree *ModuleTreeNode) *TreeNode {
+ return v1.NewRuleTree(mtree)
}
-func checkUndefinedFuncs(env *TypeEnv, x interface{}, arity func(Ref) int, rwVars map[Var]Var) Errors {
-
- var errs Errors
-
- WalkExprs(x, func(expr *Expr) bool {
- if !expr.IsCall() {
- return false
- }
- ref := expr.Operator()
- if arity := arity(ref); arity >= 0 {
- operands := len(expr.Operands())
- if expr.Generated { // an output var was added
- if !expr.IsEquality() && operands != arity+1 {
- ref = rewriteVarsInRef(rwVars)(ref)
- errs = append(errs, arityMismatchError(env, ref, expr, arity, operands-1))
- return true
- }
- } else { // either output var or not
- if operands != arity && operands != arity+1 {
- ref = rewriteVarsInRef(rwVars)(ref)
- errs = append(errs, arityMismatchError(env, ref, expr, arity, operands))
- return true
- }
- }
- return false
- }
- ref = rewriteVarsInRef(rwVars)(ref)
- errs = append(errs, NewError(TypeErr, expr.Loc(), "undefined function %v", ref))
- return true
- })
-
- return errs
-}
+// Graph represents the graph of dependencies between rules.
+type Graph = v1.Graph
-func arityMismatchError(env *TypeEnv, f Ref, expr *Expr, exp, act int) *Error {
- if want, ok := env.Get(f).(*types.Function); ok { // generate richer error for built-in functions
- have := make([]types.Type, len(expr.Operands()))
- for i, op := range expr.Operands() {
- have[i] = env.Get(op)
- }
- return newArgError(expr.Loc(), f, "arity mismatch", have, want.NamedFuncArgs())
- }
- if act != 1 {
- return NewError(TypeErr, expr.Loc(), "function %v has arity %d, got %d arguments", f, exp, act)
- }
- return NewError(TypeErr, expr.Loc(), "function %v has arity %d, got %d argument", f, exp, act)
+// NewGraph returns a new Graph based on modules. The list function must return
+// the rules referred to directly by the ref.
+func NewGraph(modules map[string]*Module, list func(Ref) []*Rule) *Graph {
+ return v1.NewGraph(modules, list)
}
-// checkSafetyRuleBodies ensures that variables appearing in negated expressions or non-target
-// positions of built-in expressions will be bound when evaluating the rule from left
-// to right, re-ordering as necessary.
-func (c *Compiler) checkSafetyRuleBodies() {
- for _, name := range c.sorted {
- m := c.Modules[name]
- WalkRules(m, func(r *Rule) bool {
- safe := ReservedVars.Copy()
- safe.Update(r.Head.Args.Vars())
- r.Body = c.checkBodySafety(safe, r.Body)
- return false
- })
- }
-}
+// GraphTraversal is a Traversal that understands the dependency graph
+type GraphTraversal = v1.GraphTraversal
-func (c *Compiler) checkBodySafety(safe VarSet, b Body) Body {
- reordered, unsafe := reorderBodyForSafety(c.builtins, c.GetArity, safe, b)
- if errs := safetyErrorSlice(unsafe, c.RewrittenVars); len(errs) > 0 {
- for _, err := range errs {
- c.err(err)
- }
- return b
- }
- return reordered
+// NewGraphTraversal returns a Traversal for the dependency graph
+func NewGraphTraversal(graph *Graph) *GraphTraversal {
+ return v1.NewGraphTraversal(graph)
}
-// SafetyCheckVisitorParams defines the AST visitor parameters to use for collecting
-// variables during the safety check. This has to be exported because it's relied on
-// by the copy propagation implementation in topdown.
-var SafetyCheckVisitorParams = VarVisitorParams{
- SkipRefCallHead: true,
- SkipClosures: true,
+// OutputVarsFromBody returns all variables which are the "output" for
+// the given body. For safety checks this means that they would be
+// made safe by the body.
+func OutputVarsFromBody(c *Compiler, body Body, safe VarSet) VarSet {
+ return v1.OutputVarsFromBody(c, body, safe)
}
-// checkSafetyRuleHeads ensures that variables appearing in the head of a
-// rule also appear in the body.
-func (c *Compiler) checkSafetyRuleHeads() {
-
- for _, name := range c.sorted {
- m := c.Modules[name]
- WalkRules(m, func(r *Rule) bool {
- safe := r.Body.Vars(SafetyCheckVisitorParams)
- safe.Update(r.Head.Args.Vars())
- unsafe := r.Head.Vars().Diff(safe)
- for v := range unsafe {
- if w, ok := c.RewrittenVars[v]; ok {
- v = w
- }
- if !v.IsGenerated() {
- c.err(NewError(UnsafeVarErr, r.Loc(), "var %v is unsafe", v))
- }
- }
- return false
- })
- }
-}
-
-func compileSchema(goSchema interface{}, allowNet []string) (*gojsonschema.Schema, error) {
- gojsonschema.SetAllowNet(allowNet)
-
- var refLoader gojsonschema.JSONLoader
- sl := gojsonschema.NewSchemaLoader()
-
- if goSchema != nil {
- refLoader = gojsonschema.NewGoLoader(goSchema)
- } else {
- return nil, fmt.Errorf("no schema as input to compile")
- }
- schemasCompiled, err := sl.Compile(refLoader)
- if err != nil {
- return nil, fmt.Errorf("unable to compile the schema: %w", err)
- }
- return schemasCompiled, nil
-}
-
-func mergeSchemas(schemas ...*gojsonschema.SubSchema) (*gojsonschema.SubSchema, error) {
- if len(schemas) == 0 {
- return nil, nil
- }
- var result = schemas[0]
-
- for i := range schemas {
- if len(schemas[i].PropertiesChildren) > 0 {
- if !schemas[i].Types.Contains("object") {
- if err := schemas[i].Types.Add("object"); err != nil {
- return nil, fmt.Errorf("unable to set the type in schemas")
- }
- }
- } else if len(schemas[i].ItemsChildren) > 0 {
- if !schemas[i].Types.Contains("array") {
- if err := schemas[i].Types.Add("array"); err != nil {
- return nil, fmt.Errorf("unable to set the type in schemas")
- }
- }
- }
- }
-
- for i := 1; i < len(schemas); i++ {
- if result.Types.String() != schemas[i].Types.String() {
- return nil, fmt.Errorf("unable to merge these schemas: type mismatch: %v and %v", result.Types.String(), schemas[i].Types.String())
- } else if result.Types.Contains("object") && len(result.PropertiesChildren) > 0 && schemas[i].Types.Contains("object") && len(schemas[i].PropertiesChildren) > 0 {
- result.PropertiesChildren = append(result.PropertiesChildren, schemas[i].PropertiesChildren...)
- } else if result.Types.Contains("array") && len(result.ItemsChildren) > 0 && schemas[i].Types.Contains("array") && len(schemas[i].ItemsChildren) > 0 {
- for j := 0; j < len(schemas[i].ItemsChildren); j++ {
- if len(result.ItemsChildren)-1 < j && !(len(schemas[i].ItemsChildren)-1 < j) {
- result.ItemsChildren = append(result.ItemsChildren, schemas[i].ItemsChildren[j])
- }
- if result.ItemsChildren[j].Types.String() != schemas[i].ItemsChildren[j].Types.String() {
- return nil, fmt.Errorf("unable to merge these schemas")
- }
- }
- }
- }
- return result, nil
-}
-
-type schemaParser struct {
- definitionCache map[string]*cachedDef
-}
-
-type cachedDef struct {
- properties []*types.StaticProperty
-}
-
-func newSchemaParser() *schemaParser {
- return &schemaParser{
- definitionCache: map[string]*cachedDef{},
- }
-}
-
-func (parser *schemaParser) parseSchema(schema interface{}) (types.Type, error) {
- return parser.parseSchemaWithPropertyKey(schema, "")
-}
-
-func (parser *schemaParser) parseSchemaWithPropertyKey(schema interface{}, propertyKey string) (types.Type, error) {
- subSchema, ok := schema.(*gojsonschema.SubSchema)
- if !ok {
- return nil, fmt.Errorf("unexpected schema type %v", subSchema)
- }
-
- // Handle referenced schemas, returns directly when a $ref is found
- if subSchema.RefSchema != nil {
- if existing, ok := parser.definitionCache[subSchema.Ref.String()]; ok {
- return types.NewObject(existing.properties, nil), nil
- }
- return parser.parseSchemaWithPropertyKey(subSchema.RefSchema, subSchema.Ref.String())
- }
-
- // Handle anyOf
- if subSchema.AnyOf != nil {
- var orType types.Type
-
- // If there is a core schema, find its type first
- if subSchema.Types.IsTyped() {
- copySchema := *subSchema
- copySchemaRef := ©Schema
- copySchemaRef.AnyOf = nil
- coreType, err := parser.parseSchema(copySchemaRef)
- if err != nil {
- return nil, fmt.Errorf("unexpected schema type %v: %w", subSchema, err)
- }
-
- // Only add Object type with static props to orType
- if objType, ok := coreType.(*types.Object); ok {
- if objType.StaticProperties() != nil && objType.DynamicProperties() == nil {
- orType = types.Or(orType, coreType)
- }
- }
- }
-
- // Iterate through every property of AnyOf and add it to orType
- for _, pSchema := range subSchema.AnyOf {
- newtype, err := parser.parseSchema(pSchema)
- if err != nil {
- return nil, fmt.Errorf("unexpected schema type %v: %w", pSchema, err)
- }
- orType = types.Or(newtype, orType)
- }
-
- return orType, nil
- }
-
- if subSchema.AllOf != nil {
- subSchemaArray := subSchema.AllOf
- allOfResult, err := mergeSchemas(subSchemaArray...)
- if err != nil {
- return nil, err
- }
-
- if subSchema.Types.IsTyped() {
- if (subSchema.Types.Contains("object") && allOfResult.Types.Contains("object")) || (subSchema.Types.Contains("array") && allOfResult.Types.Contains("array")) {
- objectOrArrayResult, err := mergeSchemas(allOfResult, subSchema)
- if err != nil {
- return nil, err
- }
- return parser.parseSchema(objectOrArrayResult)
- } else if subSchema.Types.String() != allOfResult.Types.String() {
- return nil, fmt.Errorf("unable to merge these schemas")
- }
- }
- return parser.parseSchema(allOfResult)
- }
-
- if subSchema.Types.IsTyped() {
- if subSchema.Types.Contains("boolean") {
- return types.B, nil
-
- } else if subSchema.Types.Contains("string") {
- return types.S, nil
-
- } else if subSchema.Types.Contains("integer") || subSchema.Types.Contains("number") {
- return types.N, nil
-
- } else if subSchema.Types.Contains("object") {
- if len(subSchema.PropertiesChildren) > 0 {
- def := &cachedDef{
- properties: make([]*types.StaticProperty, 0, len(subSchema.PropertiesChildren)),
- }
- for _, pSchema := range subSchema.PropertiesChildren {
- def.properties = append(def.properties, types.NewStaticProperty(pSchema.Property, nil))
- }
- if propertyKey != "" {
- parser.definitionCache[propertyKey] = def
- }
- for _, pSchema := range subSchema.PropertiesChildren {
- newtype, err := parser.parseSchema(pSchema)
- if err != nil {
- return nil, fmt.Errorf("unexpected schema type %v: %w", pSchema, err)
- }
- for i, prop := range def.properties {
- if prop.Key == pSchema.Property {
- def.properties[i].Value = newtype
- break
- }
- }
- }
- return types.NewObject(def.properties, nil), nil
- }
- return types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), nil
-
- } else if subSchema.Types.Contains("array") {
- if len(subSchema.ItemsChildren) > 0 {
- if subSchema.ItemsChildrenIsSingleSchema {
- iSchema := subSchema.ItemsChildren[0]
- newtype, err := parser.parseSchema(iSchema)
- if err != nil {
- return nil, fmt.Errorf("unexpected schema type %v", iSchema)
- }
- return types.NewArray(nil, newtype), nil
- }
- newTypes := make([]types.Type, 0, len(subSchema.ItemsChildren))
- for i := 0; i != len(subSchema.ItemsChildren); i++ {
- iSchema := subSchema.ItemsChildren[i]
- newtype, err := parser.parseSchema(iSchema)
- if err != nil {
- return nil, fmt.Errorf("unexpected schema type %v", iSchema)
- }
- newTypes = append(newTypes, newtype)
- }
- return types.NewArray(newTypes, nil), nil
- }
- return types.NewArray(nil, types.A), nil
- }
- }
-
- // Assume types if not specified in schema
- if len(subSchema.PropertiesChildren) > 0 {
- if err := subSchema.Types.Add("object"); err == nil {
- return parser.parseSchema(subSchema)
- }
- } else if len(subSchema.ItemsChildren) > 0 {
- if err := subSchema.Types.Add("array"); err == nil {
- return parser.parseSchema(subSchema)
- }
- }
-
- return types.A, nil
-}
-
-func (c *Compiler) setAnnotationSet() {
- // Sorting modules by name for stable error reporting
- sorted := make([]*Module, 0, len(c.Modules))
- for _, mName := range c.sorted {
- sorted = append(sorted, c.Modules[mName])
- }
-
- as, errs := BuildAnnotationSet(sorted)
- for _, err := range errs {
- c.err(err)
- }
- c.annotationSet = as
-}
-
-// checkTypes runs the type checker on all rules. The type checker builds a
-// TypeEnv that is stored on the compiler.
-func (c *Compiler) checkTypes() {
- // Recursion is caught in earlier step, so this cannot fail.
- sorted, _ := c.Graph.Sort()
- checker := newTypeChecker().
- WithAllowNet(c.capabilities.AllowNet).
- WithSchemaSet(c.schemaSet).
- WithInputType(c.inputType).
- WithBuiltins(c.builtins).
- WithRequiredCapabilities(c.Required).
- WithVarRewriter(rewriteVarsInRef(c.RewrittenVars)).
- WithAllowUndefinedFunctionCalls(c.allowUndefinedFuncCalls)
- var as *AnnotationSet
- if c.useTypeCheckAnnotations {
- as = c.annotationSet
- }
- env, errs := checker.CheckTypes(c.TypeEnv, sorted, as)
- for _, err := range errs {
- c.err(err)
- }
- c.TypeEnv = env
-}
-
-func (c *Compiler) checkUnsafeBuiltins() {
- for _, name := range c.sorted {
- errs := checkUnsafeBuiltins(c.unsafeBuiltinsMap, c.Modules[name])
- for _, err := range errs {
- c.err(err)
- }
- }
-}
-
-func (c *Compiler) checkDeprecatedBuiltins() {
- for _, name := range c.sorted {
- mod := c.Modules[name]
- if c.strict || mod.regoV1Compatible() {
- errs := checkDeprecatedBuiltins(c.deprecatedBuiltinsMap, mod)
- for _, err := range errs {
- c.err(err)
- }
- }
- }
-}
-
-func (c *Compiler) runStage(metricName string, f func()) {
- if c.metrics != nil {
- c.metrics.Timer(metricName).Start()
- defer c.metrics.Timer(metricName).Stop()
- }
- f()
-}
-
-func (c *Compiler) runStageAfter(metricName string, s CompilerStage) *Error {
- if c.metrics != nil {
- c.metrics.Timer(metricName).Start()
- defer c.metrics.Timer(metricName).Stop()
- }
- return s(c)
-}
-
-func (c *Compiler) compile() {
-
- defer func() {
- if r := recover(); r != nil && r != errLimitReached {
- panic(r)
- }
- }()
-
- for _, s := range c.stages {
- if c.evalMode == EvalModeIR {
- switch s.name {
- case "BuildRuleIndices", "BuildComprehensionIndices":
- continue // skip these stages
- }
- }
-
- if c.allowUndefinedFuncCalls && (s.name == "CheckUndefinedFuncs" || s.name == "CheckSafetyRuleBodies") {
- continue
- }
-
- c.runStage(s.metricName, s.f)
- if c.Failed() {
- return
- }
- for _, a := range c.after[s.name] {
- if err := c.runStageAfter(a.MetricName, a.Stage); err != nil {
- c.err(err)
- return
- }
- }
- }
-}
-
-func (c *Compiler) init() {
-
- if c.initialized {
- return
- }
-
- if c.capabilities == nil {
- c.capabilities = CapabilitiesForThisVersion()
- }
-
- c.builtins = make(map[string]*Builtin, len(c.capabilities.Builtins)+len(c.customBuiltins))
-
- for _, bi := range c.capabilities.Builtins {
- c.builtins[bi.Name] = bi
- if bi.IsDeprecated() {
- c.deprecatedBuiltinsMap[bi.Name] = struct{}{}
- }
- }
-
- for name, bi := range c.customBuiltins {
- c.builtins[name] = bi
- }
-
- // Load the global input schema if one was provided.
- if c.schemaSet != nil {
- if schema := c.schemaSet.Get(SchemaRootRef); schema != nil {
- tpe, err := loadSchema(schema, c.capabilities.AllowNet)
- if err != nil {
- c.err(NewError(TypeErr, nil, err.Error()))
- } else {
- c.inputType = tpe
- }
- }
- }
-
- c.TypeEnv = newTypeChecker().
- WithSchemaSet(c.schemaSet).
- WithInputType(c.inputType).
- Env(c.builtins)
-
- c.initialized = true
-}
-
-func (c *Compiler) err(err *Error) {
- if c.maxErrs > 0 && len(c.Errors) >= c.maxErrs {
- c.Errors = append(c.Errors, errLimitReached)
- panic(errLimitReached)
- }
- c.Errors = append(c.Errors, err)
-}
-
-func (c *Compiler) getExports() *util.HashMap {
-
- rules := util.NewHashMap(func(a, b util.T) bool {
- return a.(Ref).Equal(b.(Ref))
- }, func(v util.T) int {
- return v.(Ref).Hash()
- })
-
- for _, name := range c.sorted {
- mod := c.Modules[name]
-
- for _, rule := range mod.Rules {
- hashMapAdd(rules, mod.Package.Path, rule.Head.Ref().GroundPrefix())
- }
- }
-
- return rules
-}
-
-func hashMapAdd(rules *util.HashMap, pkg, rule Ref) {
- prev, ok := rules.Get(pkg)
- if !ok {
- rules.Put(pkg, []Ref{rule})
- return
- }
- for _, p := range prev.([]Ref) {
- if p.Equal(rule) {
- return
- }
- }
- rules.Put(pkg, append(prev.([]Ref), rule))
-}
-
-func (c *Compiler) GetAnnotationSet() *AnnotationSet {
- return c.annotationSet
-}
-
-func (c *Compiler) checkDuplicateImports() {
- modules := make([]*Module, 0, len(c.Modules))
-
- for _, name := range c.sorted {
- mod := c.Modules[name]
- if c.strict || mod.regoV1Compatible() {
- modules = append(modules, mod)
- }
- }
-
- errs := checkDuplicateImports(modules)
- for _, err := range errs {
- c.err(err)
- }
-}
-
-func (c *Compiler) checkKeywordOverrides() {
- for _, name := range c.sorted {
- mod := c.Modules[name]
- if c.strict || mod.regoV1Compatible() {
- errs := checkRootDocumentOverrides(mod)
- for _, err := range errs {
- c.err(err)
- }
- }
- }
-}
-
-// resolveAllRefs resolves references in expressions to their fully qualified values.
-//
-// For instance, given the following module:
-//
-// package a.b
-// import data.foo.bar
-// p[x] { bar[_] = x }
-//
-// The reference "bar[_]" would be resolved to "data.foo.bar[_]".
-//
-// Ref rules are resolved, too:
-//
-// package a.b
-// q { c.d.e == 1 }
-// c.d[e] := 1 if e := "e"
-//
-// The reference "c.d.e" would be resolved to "data.a.b.c.d.e".
-func (c *Compiler) resolveAllRefs() {
-
- rules := c.getExports()
-
- for _, name := range c.sorted {
- mod := c.Modules[name]
-
- var ruleExports []Ref
- if x, ok := rules.Get(mod.Package.Path); ok {
- ruleExports = x.([]Ref)
- }
-
- globals := getGlobals(mod.Package, ruleExports, mod.Imports)
-
- WalkRules(mod, func(rule *Rule) bool {
- err := resolveRefsInRule(globals, rule)
- if err != nil {
- c.err(NewError(CompileErr, rule.Location, err.Error()))
- }
- return false
- })
-
- if c.strict { // check for unused imports
- for _, imp := range mod.Imports {
- path := imp.Path.Value.(Ref)
- if FutureRootDocument.Equal(path[0]) || RegoRootDocument.Equal(path[0]) {
- continue // ignore future and rego imports
- }
-
- for v, u := range globals {
- if v.Equal(imp.Name()) && !u.used {
- c.err(NewError(CompileErr, imp.Location, "%s unused", imp.String()))
- }
- }
- }
- }
- }
-
- if c.moduleLoader != nil {
-
- parsed, err := c.moduleLoader(c.Modules)
- if err != nil {
- c.err(NewError(CompileErr, nil, err.Error()))
- return
- }
-
- if len(parsed) == 0 {
- return
- }
-
- for id, module := range parsed {
- c.Modules[id] = module.Copy()
- c.sorted = append(c.sorted, id)
- if c.parsedModules != nil {
- c.parsedModules[id] = module
- }
- }
-
- sort.Strings(c.sorted)
- c.resolveAllRefs()
- }
-}
-
-func (c *Compiler) removeImports() {
- c.imports = make(map[string][]*Import, len(c.Modules))
- for name := range c.Modules {
- c.imports[name] = c.Modules[name].Imports
- c.Modules[name].Imports = nil
- }
-}
-
-func (c *Compiler) initLocalVarGen() {
- c.localvargen = newLocalVarGeneratorForModuleSet(c.sorted, c.Modules)
-}
-
-func (c *Compiler) rewriteComprehensionTerms() {
- f := newEqualityFactory(c.localvargen)
- for _, name := range c.sorted {
- mod := c.Modules[name]
- _, _ = rewriteComprehensionTerms(f, mod) // ignore error
- }
-}
-
-func (c *Compiler) rewriteExprTerms() {
- for _, name := range c.sorted {
- mod := c.Modules[name]
- WalkRules(mod, func(rule *Rule) bool {
- rewriteExprTermsInHead(c.localvargen, rule)
- rule.Body = rewriteExprTermsInBody(c.localvargen, rule.Body)
- return false
- })
- }
-}
-
-func (c *Compiler) rewriteRuleHeadRefs() {
- f := newEqualityFactory(c.localvargen)
- for _, name := range c.sorted {
- WalkRules(c.Modules[name], func(rule *Rule) bool {
-
- ref := rule.Head.Ref()
- // NOTE(sr): We're backfilling Refs here -- all parser code paths would have them, but
- // it's possible to construct Module{} instances from Golang code, so we need
- // to accommodate for that, too.
- if len(rule.Head.Reference) == 0 {
- rule.Head.Reference = ref
- }
-
- cannotSpeakStringPrefixRefs := true
- cannotSpeakGeneralRefs := true
- for _, f := range c.capabilities.Features {
- switch f {
- case FeatureRefHeadStringPrefixes:
- cannotSpeakStringPrefixRefs = false
- case FeatureRefHeads:
- cannotSpeakGeneralRefs = false
- }
- }
-
- if cannotSpeakStringPrefixRefs && cannotSpeakGeneralRefs && rule.Head.Name == "" {
- c.err(NewError(CompileErr, rule.Loc(), "rule heads with refs are not supported: %v", rule.Head.Reference))
- return true
- }
-
- for i := 1; i < len(ref); i++ {
- if cannotSpeakGeneralRefs && (rule.Head.RuleKind() == MultiValue || i != len(ref)-1) { // last
- if _, ok := ref[i].Value.(String); !ok {
- c.err(NewError(TypeErr, rule.Loc(), "rule heads with general refs (containing variables) are not supported: %v", rule.Head.Reference))
- continue
- }
- }
-
- // Rewrite so that any non-scalar elements in the rule's ref are vars:
- // p.q.r[y.z] { ... } => p.q.r[__local0__] { __local0__ = y.z }
- // p.q[a.b][c.d] { ... } => p.q[__local0__] { __local0__ = a.b; __local1__ = c.d }
- // because that's what the RuleTree knows how to deal with.
- if _, ok := ref[i].Value.(Var); !ok && !IsScalar(ref[i].Value) {
- expr := f.Generate(ref[i])
- if i == len(ref)-1 && rule.Head.Key.Equal(ref[i]) {
- rule.Head.Key = expr.Operand(0)
- }
- rule.Head.Reference[i] = expr.Operand(0)
- rule.Body.Append(expr)
- }
- }
-
- return true
- })
- }
-}
-
-func (c *Compiler) checkVoidCalls() {
- for _, name := range c.sorted {
- mod := c.Modules[name]
- for _, err := range checkVoidCalls(c.TypeEnv, mod) {
- c.err(err)
- }
- }
-}
-
-func (c *Compiler) rewritePrintCalls() {
- var modified bool
- if !c.enablePrintStatements {
- for _, name := range c.sorted {
- if erasePrintCalls(c.Modules[name]) {
- modified = true
- }
- }
- } else {
- for _, name := range c.sorted {
- mod := c.Modules[name]
- WalkRules(mod, func(r *Rule) bool {
- safe := r.Head.Args.Vars()
- safe.Update(ReservedVars)
- vis := func(b Body) bool {
- modrec, errs := rewritePrintCalls(c.localvargen, c.GetArity, safe, b)
- if modrec {
- modified = true
- }
- for _, err := range errs {
- c.err(err)
- }
- return false
- }
- WalkBodies(r.Head, vis)
- WalkBodies(r.Body, vis)
- return false
- })
- }
- }
- if modified {
- c.Required.addBuiltinSorted(Print)
- }
-}
-
-// checkVoidCalls returns errors for any expressions that treat void function
-// calls as values. The only void functions in Rego are specific built-ins like
-// print().
-func checkVoidCalls(env *TypeEnv, x interface{}) Errors {
- var errs Errors
- WalkTerms(x, func(x *Term) bool {
- if call, ok := x.Value.(Call); ok {
- if tpe, ok := env.Get(call[0]).(*types.Function); ok && tpe.Result() == nil {
- errs = append(errs, NewError(TypeErr, x.Loc(), "%v used as value", call))
- }
- }
- return false
- })
- return errs
-}
-
-// rewritePrintCalls will rewrite the body so that print operands are captured
-// in local variables and their evaluation occurs within a comprehension.
-// Wrapping the terms inside of a comprehension ensures that undefined values do
-// not short-circuit evaluation.
-//
-// For example, given the following print statement:
-//
-// print("the value of x is:", input.x)
-//
-// The expression would be rewritten to:
-//
-// print({__local0__ | __local0__ = "the value of x is:"}, {__local1__ | __local1__ = input.x})
-func rewritePrintCalls(gen *localVarGenerator, getArity func(Ref) int, globals VarSet, body Body) (bool, Errors) {
-
- var errs Errors
- var modified bool
-
- // Visit comprehension bodies recursively to ensure print statements inside
- // those bodies only close over variables that are safe.
- for i := range body {
- if ContainsClosures(body[i]) {
- safe := outputVarsForBody(body[:i], getArity, globals)
- safe.Update(globals)
- WalkClosures(body[i], func(x interface{}) bool {
- var modrec bool
- var errsrec Errors
- switch x := x.(type) {
- case *SetComprehension:
- modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body)
- case *ArrayComprehension:
- modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body)
- case *ObjectComprehension:
- modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body)
- case *Every:
- safe.Update(x.KeyValueVars())
- modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body)
- }
- if modrec {
- modified = true
- }
- errs = append(errs, errsrec...)
- return true
- })
- if len(errs) > 0 {
- return false, errs
- }
- }
- }
-
- for i := range body {
-
- if !isPrintCall(body[i]) {
- continue
- }
-
- modified = true
-
- var errs Errors
- safe := outputVarsForBody(body[:i], getArity, globals)
- safe.Update(globals)
- args := body[i].Operands()
-
- for j := range args {
- vis := NewVarVisitor().WithParams(SafetyCheckVisitorParams)
- vis.Walk(args[j])
- unsafe := vis.Vars().Diff(safe)
- for _, v := range unsafe.Sorted() {
- errs = append(errs, NewError(CompileErr, args[j].Loc(), "var %v is undeclared", v))
- }
- }
-
- if len(errs) > 0 {
- return false, errs
- }
-
- arr := NewArray()
-
- for j := range args {
- x := NewTerm(gen.Generate()).SetLocation(args[j].Loc())
- capture := Equality.Expr(x, args[j]).SetLocation(args[j].Loc())
- arr = arr.Append(SetComprehensionTerm(x, NewBody(capture)).SetLocation(args[j].Loc()))
- }
-
- body.Set(NewExpr([]*Term{
- NewTerm(InternalPrint.Ref()).SetLocation(body[i].Loc()),
- NewTerm(arr).SetLocation(body[i].Loc()),
- }).SetLocation(body[i].Loc()), i)
- }
-
- return modified, nil
-}
-
-func erasePrintCalls(node interface{}) bool {
- var modified bool
- NewGenericVisitor(func(x interface{}) bool {
- var modrec bool
- switch x := x.(type) {
- case *Rule:
- modrec, x.Body = erasePrintCallsInBody(x.Body)
- case *ArrayComprehension:
- modrec, x.Body = erasePrintCallsInBody(x.Body)
- case *SetComprehension:
- modrec, x.Body = erasePrintCallsInBody(x.Body)
- case *ObjectComprehension:
- modrec, x.Body = erasePrintCallsInBody(x.Body)
- case *Every:
- modrec, x.Body = erasePrintCallsInBody(x.Body)
- }
- if modrec {
- modified = true
- }
- return false
- }).Walk(node)
- return modified
-}
-
-func erasePrintCallsInBody(x Body) (bool, Body) {
-
- if !containsPrintCall(x) {
- return false, x
- }
-
- var cpy Body
-
- for i := range x {
-
- // Recursively visit any comprehensions contained in this expression.
- erasePrintCalls(x[i])
-
- if !isPrintCall(x[i]) {
- cpy.Append(x[i])
- }
- }
-
- if len(cpy) == 0 {
- term := BooleanTerm(true).SetLocation(x.Loc())
- expr := NewExpr(term).SetLocation(x.Loc())
- cpy.Append(expr)
- }
-
- return true, cpy
-}
-
-func containsPrintCall(x interface{}) bool {
- var found bool
- WalkExprs(x, func(expr *Expr) bool {
- if !found {
- if isPrintCall(expr) {
- found = true
- }
- }
- return found
- })
- return found
-}
-
-func isPrintCall(x *Expr) bool {
- return x.IsCall() && x.Operator().Equal(Print.Ref())
-}
-
-// rewriteRefsInHead will rewrite rules so that the head does not contain any
-// terms that require evaluation (e.g., refs or comprehensions). If the key or
-// value contains one or more of these terms, the key or value will be moved
-// into the body and assigned to a new variable. The new variable will replace
-// the key or value in the head.
-//
-// For instance, given the following rule:
-//
-// p[{"foo": data.foo[i]}] { i < 100 }
-//
-// The rule would be re-written as:
-//
-// p[__local0__] { i < 100; __local0__ = {"foo": data.foo[i]} }
-func (c *Compiler) rewriteRefsInHead() {
- f := newEqualityFactory(c.localvargen)
- for _, name := range c.sorted {
- mod := c.Modules[name]
- WalkRules(mod, func(rule *Rule) bool {
- if requiresEval(rule.Head.Key) {
- expr := f.Generate(rule.Head.Key)
- rule.Head.Key = expr.Operand(0)
- rule.Body.Append(expr)
- }
- if requiresEval(rule.Head.Value) {
- expr := f.Generate(rule.Head.Value)
- rule.Head.Value = expr.Operand(0)
- rule.Body.Append(expr)
- }
- for i := 0; i < len(rule.Head.Args); i++ {
- if requiresEval(rule.Head.Args[i]) {
- expr := f.Generate(rule.Head.Args[i])
- rule.Head.Args[i] = expr.Operand(0)
- rule.Body.Append(expr)
- }
- }
- return false
- })
- }
-}
-
-func (c *Compiler) rewriteEquals() {
- modified := false
- for _, name := range c.sorted {
- mod := c.Modules[name]
- modified = rewriteEquals(mod) || modified
- }
- if modified {
- c.Required.addBuiltinSorted(Equal)
- }
-}
-
-func (c *Compiler) rewriteDynamicTerms() {
- f := newEqualityFactory(c.localvargen)
- for _, name := range c.sorted {
- mod := c.Modules[name]
- WalkRules(mod, func(rule *Rule) bool {
- rule.Body = rewriteDynamics(f, rule.Body)
- return false
- })
- }
-}
-
-// rewriteTestRuleEqualities rewrites equality expressions in test rule bodies to create local vars for statements that would otherwise
-// not have their values captured through tracing, such as refs and comprehensions not unified/assigned to a local var.
-// For example, given the following module:
-//
-// package test
-//
-// p.q contains v if {
-// some v in numbers.range(1, 3)
-// }
-//
-// p.r := "foo"
-//
-// test_rule {
-// p == {
-// "q": {4, 5, 6}
-// }
-// }
-//
-// `p` in `test_rule` resolves to `data.test.p`, which won't be an entry in the virtual-cache and must therefore be calculated after-the-fact.
-// If `p` isn't captured in a local var, there is no trivial way to retrieve its value for test reporting.
-func (c *Compiler) rewriteTestRuleEqualities() {
- if !c.rewriteTestRulesForTracing {
- return
- }
-
- f := newEqualityFactory(c.localvargen)
- for _, name := range c.sorted {
- mod := c.Modules[name]
- WalkRules(mod, func(rule *Rule) bool {
- if strings.HasPrefix(string(rule.Head.Name), "test_") {
- rule.Body = rewriteTestEqualities(f, rule.Body)
- }
- return false
- })
- }
-}
-
-func (c *Compiler) parseMetadataBlocks() {
- // Only parse annotations if rego.metadata built-ins are called
- regoMetadataCalled := false
- for _, name := range c.sorted {
- mod := c.Modules[name]
- WalkExprs(mod, func(expr *Expr) bool {
- if isRegoMetadataChainCall(expr) || isRegoMetadataRuleCall(expr) {
- regoMetadataCalled = true
- }
- return regoMetadataCalled
- })
-
- if regoMetadataCalled {
- break
- }
- }
-
- if regoMetadataCalled {
- // NOTE: Possible optimization: only parse annotations for modules on the path of rego.metadata-calling module
- for _, name := range c.sorted {
- mod := c.Modules[name]
-
- if len(mod.Annotations) == 0 {
- var errs Errors
- mod.Annotations, errs = parseAnnotations(mod.Comments)
- errs = append(errs, attachAnnotationsNodes(mod)...)
- for _, err := range errs {
- c.err(err)
- }
-
- attachRuleAnnotations(mod)
- }
- }
- }
-}
-
-func (c *Compiler) rewriteRegoMetadataCalls() {
- eqFactory := newEqualityFactory(c.localvargen)
-
- _, chainFuncAllowed := c.builtins[RegoMetadataChain.Name]
- _, ruleFuncAllowed := c.builtins[RegoMetadataRule.Name]
-
- for _, name := range c.sorted {
- mod := c.Modules[name]
-
- WalkRules(mod, func(rule *Rule) bool {
- var firstChainCall *Expr
- var firstRuleCall *Expr
-
- WalkExprs(rule, func(expr *Expr) bool {
- if chainFuncAllowed && firstChainCall == nil && isRegoMetadataChainCall(expr) {
- firstChainCall = expr
- } else if ruleFuncAllowed && firstRuleCall == nil && isRegoMetadataRuleCall(expr) {
- firstRuleCall = expr
- }
- return firstChainCall != nil && firstRuleCall != nil
- })
-
- chainCalled := firstChainCall != nil
- ruleCalled := firstRuleCall != nil
-
- if chainCalled || ruleCalled {
- body := make(Body, 0, len(rule.Body)+2)
-
- var metadataChainVar Var
- if chainCalled {
- // Create and inject metadata chain for rule
-
- chain, err := createMetadataChain(c.annotationSet.Chain(rule))
- if err != nil {
- c.err(err)
- return false
- }
-
- chain.Location = firstChainCall.Location
- eq := eqFactory.Generate(chain)
- metadataChainVar = eq.Operands()[0].Value.(Var)
- body.Append(eq)
- }
-
- var metadataRuleVar Var
- if ruleCalled {
- // Create and inject metadata for rule
-
- var metadataRuleTerm *Term
-
- a := getPrimaryRuleAnnotations(c.annotationSet, rule)
- if a != nil {
- annotObj, err := a.toObject()
- if err != nil {
- c.err(err)
- return false
- }
- metadataRuleTerm = NewTerm(*annotObj)
- } else {
- // If rule has no annotations, assign an empty object
- metadataRuleTerm = ObjectTerm()
- }
-
- metadataRuleTerm.Location = firstRuleCall.Location
- eq := eqFactory.Generate(metadataRuleTerm)
- metadataRuleVar = eq.Operands()[0].Value.(Var)
- body.Append(eq)
- }
-
- for _, expr := range rule.Body {
- body.Append(expr)
- }
- rule.Body = body
-
- vis := func(b Body) bool {
- for _, err := range rewriteRegoMetadataCalls(&metadataChainVar, &metadataRuleVar, b, &c.RewrittenVars) {
- c.err(err)
- }
- return false
- }
- WalkBodies(rule.Head, vis)
- WalkBodies(rule.Body, vis)
- }
-
- return false
- })
- }
-}
-
-func getPrimaryRuleAnnotations(as *AnnotationSet, rule *Rule) *Annotations {
- annots := as.GetRuleScope(rule)
-
- if len(annots) == 0 {
- return nil
- }
-
- // Sort by annotation location; chain must start with annotations declared closest to rule, then going outward
- sort.SliceStable(annots, func(i, j int) bool {
- return annots[i].Location.Compare(annots[j].Location) > 0
- })
-
- return annots[0]
-}
-
-func rewriteRegoMetadataCalls(metadataChainVar *Var, metadataRuleVar *Var, body Body, rewrittenVars *map[Var]Var) Errors {
- var errs Errors
-
- WalkClosures(body, func(x interface{}) bool {
- switch x := x.(type) {
- case *ArrayComprehension:
- errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars)
- case *SetComprehension:
- errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars)
- case *ObjectComprehension:
- errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars)
- case *Every:
- errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars)
- }
- return true
- })
-
- for i := range body {
- expr := body[i]
- var metadataVar Var
-
- if metadataChainVar != nil && isRegoMetadataChainCall(expr) {
- metadataVar = *metadataChainVar
- } else if metadataRuleVar != nil && isRegoMetadataRuleCall(expr) {
- metadataVar = *metadataRuleVar
- } else {
- continue
- }
-
- // NOTE(johanfylling): An alternative strategy would be to walk the body and replace all operands[0]
- // usages with *metadataChainVar
- operands := expr.Operands()
- var newExpr *Expr
- if len(operands) > 0 { // There is an output var to rewrite
- rewrittenVar := operands[0]
- newExpr = Equality.Expr(rewrittenVar, NewTerm(metadataVar))
- } else { // No output var, just rewrite expr to metadataVar
- newExpr = NewExpr(NewTerm(metadataVar))
- }
-
- newExpr.Generated = true
- newExpr.Location = expr.Location
- body.Set(newExpr, i)
- }
-
- return errs
-}
-
-func isRegoMetadataChainCall(x *Expr) bool {
- return x.IsCall() && x.Operator().Equal(RegoMetadataChain.Ref())
-}
-
-func isRegoMetadataRuleCall(x *Expr) bool {
- return x.IsCall() && x.Operator().Equal(RegoMetadataRule.Ref())
-}
-
-func createMetadataChain(chain []*AnnotationsRef) (*Term, *Error) {
-
- metaArray := NewArray()
- for _, link := range chain {
- p := link.Path.toArray().
- Slice(1, -1) // Dropping leading 'data' element of path
- obj := NewObject(
- Item(StringTerm("path"), NewTerm(p)),
- )
- if link.Annotations != nil {
- annotObj, err := link.Annotations.toObject()
- if err != nil {
- return nil, err
- }
- obj.Insert(StringTerm("annotations"), NewTerm(*annotObj))
- }
- metaArray = metaArray.Append(NewTerm(obj))
- }
-
- return NewTerm(metaArray), nil
-}
-
-func (c *Compiler) rewriteLocalVars() {
-
- var assignment bool
-
- for _, name := range c.sorted {
- mod := c.Modules[name]
- gen := c.localvargen
-
- WalkRules(mod, func(rule *Rule) bool {
- argsStack := newLocalDeclaredVars()
-
- args := NewVarVisitor()
- if c.strict {
- args.Walk(rule.Head.Args)
- }
- unusedArgs := args.Vars()
-
- c.rewriteLocalArgVars(gen, argsStack, rule)
-
- // Rewrite local vars in each else-branch of the rule.
- // Note: this is done instead of a walk so that we can capture any unused function arguments
- // across else-branches.
- for rule := rule; rule != nil; rule = rule.Else {
- stack, errs := c.rewriteLocalVarsInRule(rule, unusedArgs, argsStack, gen)
- if stack.assignment {
- assignment = true
- }
-
- for arg := range unusedArgs {
- if stack.Count(arg) > 1 {
- delete(unusedArgs, arg)
- }
- }
-
- for _, err := range errs {
- c.err(err)
- }
- }
-
- if c.strict {
- // Report an error for each unused function argument
- for arg := range unusedArgs {
- if !arg.IsWildcard() {
- c.err(NewError(CompileErr, rule.Head.Location, "unused argument %v. (hint: use _ (wildcard variable) instead)", arg))
- }
- }
- }
-
- return true
- })
- }
-
- if assignment {
- c.Required.addBuiltinSorted(Assign)
- }
-}
-
-func (c *Compiler) rewriteLocalVarsInRule(rule *Rule, unusedArgs VarSet, argsStack *localDeclaredVars, gen *localVarGenerator) (*localDeclaredVars, Errors) {
- // Rewrite assignments contained in head of rule. Assignments can
- // occur in rule head if they're inside a comprehension. Note,
- // assigned vars in comprehensions in the head will be rewritten
- // first to preserve scoping rules. For example:
- //
- // p = [x | x := 1] { x := 2 } becomes p = [__local0__ | __local0__ = 1] { __local1__ = 2 }
- //
- // This behaviour is consistent scoping inside the body. For example:
- //
- // p = xs { x := 2; xs = [x | x := 1] } becomes p = xs { __local0__ = 2; xs = [__local1__ | __local1__ = 1] }
- nestedXform := &rewriteNestedHeadVarLocalTransform{
- gen: gen,
- RewrittenVars: c.RewrittenVars,
- strict: c.strict,
- }
-
- NewGenericVisitor(nestedXform.Visit).Walk(rule.Head)
-
- for _, err := range nestedXform.errs {
- c.err(err)
- }
-
- // Rewrite assignments in body.
- used := NewVarSet()
-
- for _, t := range rule.Head.Ref()[1:] {
- used.Update(t.Vars())
- }
-
- if rule.Head.Key != nil {
- used.Update(rule.Head.Key.Vars())
- }
-
- if rule.Head.Value != nil {
- valueVars := rule.Head.Value.Vars()
- used.Update(valueVars)
- for arg := range unusedArgs {
- if valueVars.Contains(arg) {
- delete(unusedArgs, arg)
- }
- }
- }
-
- stack := argsStack.Copy()
-
- body, declared, errs := rewriteLocalVars(gen, stack, used, rule.Body, c.strict)
-
- // For rewritten vars use the collection of all variables that
- // were in the stack at some point in time.
- for k, v := range stack.rewritten {
- c.RewrittenVars[k] = v
- }
-
- rule.Body = body
-
- // Rewrite vars in head that refer to locally declared vars in the body.
- localXform := rewriteHeadVarLocalTransform{declared: declared}
-
- for i := range rule.Head.Args {
- rule.Head.Args[i], _ = transformTerm(localXform, rule.Head.Args[i])
- }
-
- for i := 1; i < len(rule.Head.Ref()); i++ {
- rule.Head.Reference[i], _ = transformTerm(localXform, rule.Head.Ref()[i])
- }
- if rule.Head.Key != nil {
- rule.Head.Key, _ = transformTerm(localXform, rule.Head.Key)
- }
-
- if rule.Head.Value != nil {
- rule.Head.Value, _ = transformTerm(localXform, rule.Head.Value)
- }
- return stack, errs
-}
-
-type rewriteNestedHeadVarLocalTransform struct {
- gen *localVarGenerator
- errs Errors
- RewrittenVars map[Var]Var
- strict bool
-}
-
-func (xform *rewriteNestedHeadVarLocalTransform) Visit(x interface{}) bool {
-
- if term, ok := x.(*Term); ok {
-
- stop := false
- stack := newLocalDeclaredVars()
-
- switch x := term.Value.(type) {
- case *object:
- cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) {
- kcpy := k.Copy()
- NewGenericVisitor(xform.Visit).Walk(kcpy)
- vcpy := v.Copy()
- NewGenericVisitor(xform.Visit).Walk(vcpy)
- return kcpy, vcpy, nil
- })
- term.Value = cpy
- stop = true
- case *set:
- cpy, _ := x.Map(func(v *Term) (*Term, error) {
- vcpy := v.Copy()
- NewGenericVisitor(xform.Visit).Walk(vcpy)
- return vcpy, nil
- })
- term.Value = cpy
- stop = true
- case *ArrayComprehension:
- xform.errs = rewriteDeclaredVarsInArrayComprehension(xform.gen, stack, x, xform.errs, xform.strict)
- stop = true
- case *SetComprehension:
- xform.errs = rewriteDeclaredVarsInSetComprehension(xform.gen, stack, x, xform.errs, xform.strict)
- stop = true
- case *ObjectComprehension:
- xform.errs = rewriteDeclaredVarsInObjectComprehension(xform.gen, stack, x, xform.errs, xform.strict)
- stop = true
- }
-
- for k, v := range stack.rewritten {
- xform.RewrittenVars[k] = v
- }
-
- return stop
- }
-
- return false
-}
-
-type rewriteHeadVarLocalTransform struct {
- declared map[Var]Var
-}
-
-func (xform rewriteHeadVarLocalTransform) Transform(x interface{}) (interface{}, error) {
- if v, ok := x.(Var); ok {
- if gv, ok := xform.declared[v]; ok {
- return gv, nil
- }
- }
- return x, nil
-}
-
-func (c *Compiler) rewriteLocalArgVars(gen *localVarGenerator, stack *localDeclaredVars, rule *Rule) {
-
- vis := &ruleArgLocalRewriter{
- stack: stack,
- gen: gen,
- }
-
- for i := range rule.Head.Args {
- Walk(vis, rule.Head.Args[i])
- }
-
- for i := range vis.errs {
- c.err(vis.errs[i])
- }
-}
-
-type ruleArgLocalRewriter struct {
- stack *localDeclaredVars
- gen *localVarGenerator
- errs []*Error
-}
-
-func (vis *ruleArgLocalRewriter) Visit(x interface{}) Visitor {
-
- t, ok := x.(*Term)
- if !ok {
- return vis
- }
-
- switch v := t.Value.(type) {
- case Var:
- gv, ok := vis.stack.Declared(v)
- if ok {
- vis.stack.Seen(v)
- } else {
- gv = vis.gen.Generate()
- vis.stack.Insert(v, gv, argVar)
- }
- t.Value = gv
- return nil
- case *object:
- if cpy, err := v.Map(func(k, v *Term) (*Term, *Term, error) {
- vcpy := v.Copy()
- Walk(vis, vcpy)
- return k, vcpy, nil
- }); err != nil {
- vis.errs = append(vis.errs, NewError(CompileErr, t.Location, err.Error()))
- } else {
- t.Value = cpy
- }
- return nil
- case Null, Boolean, Number, String, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Set:
- // Scalars are no-ops. Comprehensions are handled above. Sets must not
- // contain variables.
- return nil
- case Call:
- vis.errs = append(vis.errs, NewError(CompileErr, t.Location, "rule arguments cannot contain calls"))
- return nil
- default:
- // Recurse on refs and arrays. Any embedded
- // variables can be rewritten.
- return vis
- }
-}
-
-func (c *Compiler) rewriteWithModifiers() {
- f := newEqualityFactory(c.localvargen)
- for _, name := range c.sorted {
- mod := c.Modules[name]
- t := NewGenericTransformer(func(x interface{}) (interface{}, error) {
- body, ok := x.(Body)
- if !ok {
- return x, nil
- }
- body, err := rewriteWithModifiersInBody(c, c.unsafeBuiltinsMap, f, body)
- if err != nil {
- c.err(err)
- }
-
- return body, nil
- })
- _, _ = Transform(t, mod) // ignore error
- }
-}
-
-func (c *Compiler) setModuleTree() {
- c.ModuleTree = NewModuleTree(c.Modules)
-}
-
-func (c *Compiler) setRuleTree() {
- c.RuleTree = NewRuleTree(c.ModuleTree)
-}
-
-func (c *Compiler) setGraph() {
- list := func(r Ref) []*Rule {
- return c.GetRulesDynamicWithOpts(r, RulesOptions{IncludeHiddenModules: true})
- }
- c.Graph = NewGraph(c.Modules, list)
-}
-
-type queryCompiler struct {
- compiler *Compiler
- qctx *QueryContext
- typeEnv *TypeEnv
- rewritten map[Var]Var
- after map[string][]QueryCompilerStageDefinition
- unsafeBuiltins map[string]struct{}
- comprehensionIndices map[*Term]*ComprehensionIndex
- enablePrintStatements bool
-}
-
-func newQueryCompiler(compiler *Compiler) QueryCompiler {
- qc := &queryCompiler{
- compiler: compiler,
- qctx: nil,
- after: map[string][]QueryCompilerStageDefinition{},
- comprehensionIndices: map[*Term]*ComprehensionIndex{},
- }
- return qc
-}
-
-func (qc *queryCompiler) WithStrict(strict bool) QueryCompiler {
- qc.compiler.WithStrict(strict)
- return qc
-}
-
-func (qc *queryCompiler) WithEnablePrintStatements(yes bool) QueryCompiler {
- qc.enablePrintStatements = yes
- return qc
-}
-
-func (qc *queryCompiler) WithContext(qctx *QueryContext) QueryCompiler {
- qc.qctx = qctx
- return qc
-}
-
-func (qc *queryCompiler) WithStageAfter(after string, stage QueryCompilerStageDefinition) QueryCompiler {
- qc.after[after] = append(qc.after[after], stage)
- return qc
-}
-
-func (qc *queryCompiler) WithUnsafeBuiltins(unsafe map[string]struct{}) QueryCompiler {
- qc.unsafeBuiltins = unsafe
- return qc
-}
-
-func (qc *queryCompiler) RewrittenVars() map[Var]Var {
- return qc.rewritten
-}
-
-func (qc *queryCompiler) ComprehensionIndex(term *Term) *ComprehensionIndex {
- if result, ok := qc.comprehensionIndices[term]; ok {
- return result
- } else if result, ok := qc.compiler.comprehensionIndices[term]; ok {
- return result
- }
- return nil
-}
-
-func (qc *queryCompiler) runStage(metricName string, qctx *QueryContext, query Body, s func(*QueryContext, Body) (Body, error)) (Body, error) {
- if qc.compiler.metrics != nil {
- qc.compiler.metrics.Timer(metricName).Start()
- defer qc.compiler.metrics.Timer(metricName).Stop()
- }
- return s(qctx, query)
-}
-
-func (qc *queryCompiler) runStageAfter(metricName string, query Body, s QueryCompilerStage) (Body, error) {
- if qc.compiler.metrics != nil {
- qc.compiler.metrics.Timer(metricName).Start()
- defer qc.compiler.metrics.Timer(metricName).Stop()
- }
- return s(qc, query)
-}
-
-type queryStage = struct {
- name string
- metricName string
- f func(*QueryContext, Body) (Body, error)
-}
-
-func (qc *queryCompiler) Compile(query Body) (Body, error) {
- if len(query) == 0 {
- return nil, Errors{NewError(CompileErr, nil, "empty query cannot be compiled")}
- }
-
- query = query.Copy()
-
- stages := []queryStage{
- {"CheckKeywordOverrides", "query_compile_stage_check_keyword_overrides", qc.checkKeywordOverrides},
- {"ResolveRefs", "query_compile_stage_resolve_refs", qc.resolveRefs},
- {"RewriteLocalVars", "query_compile_stage_rewrite_local_vars", qc.rewriteLocalVars},
- {"CheckVoidCalls", "query_compile_stage_check_void_calls", qc.checkVoidCalls},
- {"RewritePrintCalls", "query_compile_stage_rewrite_print_calls", qc.rewritePrintCalls},
- {"RewriteExprTerms", "query_compile_stage_rewrite_expr_terms", qc.rewriteExprTerms},
- {"RewriteComprehensionTerms", "query_compile_stage_rewrite_comprehension_terms", qc.rewriteComprehensionTerms},
- {"RewriteWithValues", "query_compile_stage_rewrite_with_values", qc.rewriteWithModifiers},
- {"CheckUndefinedFuncs", "query_compile_stage_check_undefined_funcs", qc.checkUndefinedFuncs},
- {"CheckSafety", "query_compile_stage_check_safety", qc.checkSafety},
- {"RewriteDynamicTerms", "query_compile_stage_rewrite_dynamic_terms", qc.rewriteDynamicTerms},
- {"CheckTypes", "query_compile_stage_check_types", qc.checkTypes},
- {"CheckUnsafeBuiltins", "query_compile_stage_check_unsafe_builtins", qc.checkUnsafeBuiltins},
- {"CheckDeprecatedBuiltins", "query_compile_stage_check_deprecated_builtins", qc.checkDeprecatedBuiltins},
- }
- if qc.compiler.evalMode == EvalModeTopdown {
- stages = append(stages, queryStage{"BuildComprehensionIndex", "query_compile_stage_build_comprehension_index", qc.buildComprehensionIndices})
- }
-
- qctx := qc.qctx.Copy()
-
- for _, s := range stages {
- var err error
- query, err = qc.runStage(s.metricName, qctx, query, s.f)
- if err != nil {
- return nil, qc.applyErrorLimit(err)
- }
- for _, s := range qc.after[s.name] {
- query, err = qc.runStageAfter(s.MetricName, query, s.Stage)
- if err != nil {
- return nil, qc.applyErrorLimit(err)
- }
- }
- }
-
- return query, nil
-}
-
-func (qc *queryCompiler) TypeEnv() *TypeEnv {
- return qc.typeEnv
-}
-
-func (qc *queryCompiler) applyErrorLimit(err error) error {
- var errs Errors
- if errors.As(err, &errs) {
- if qc.compiler.maxErrs > 0 && len(errs) > qc.compiler.maxErrs {
- err = append(errs[:qc.compiler.maxErrs], errLimitReached)
- }
- }
- return err
-}
-
-func (qc *queryCompiler) checkKeywordOverrides(_ *QueryContext, body Body) (Body, error) {
- if qc.compiler.strict {
- if errs := checkRootDocumentOverrides(body); len(errs) > 0 {
- return nil, errs
- }
- }
- return body, nil
-}
-
-func (qc *queryCompiler) resolveRefs(qctx *QueryContext, body Body) (Body, error) {
-
- var globals map[Var]*usedRef
-
- if qctx != nil {
- pkg := qctx.Package
- // Query compiler ought to generate a package if one was not provided and one or more imports were provided.
- // The generated package name could even be an empty string to avoid conflicts (it doesn't have to be valid syntactically)
- if pkg == nil && len(qctx.Imports) > 0 {
- pkg = &Package{Path: RefTerm(VarTerm("")).Value.(Ref)}
- }
- if pkg != nil {
- var ruleExports []Ref
- rules := qc.compiler.getExports()
- if exist, ok := rules.Get(pkg.Path); ok {
- ruleExports = exist.([]Ref)
- }
-
- globals = getGlobals(qctx.Package, ruleExports, qctx.Imports)
- qctx.Imports = nil
- }
- }
-
- ignore := &declaredVarStack{declaredVars(body)}
-
- return resolveRefsInBody(globals, ignore, body), nil
-}
-
-func (qc *queryCompiler) rewriteComprehensionTerms(_ *QueryContext, body Body) (Body, error) {
- gen := newLocalVarGenerator("q", body)
- f := newEqualityFactory(gen)
- node, err := rewriteComprehensionTerms(f, body)
- if err != nil {
- return nil, err
- }
- return node.(Body), nil
-}
-
-func (qc *queryCompiler) rewriteDynamicTerms(_ *QueryContext, body Body) (Body, error) {
- gen := newLocalVarGenerator("q", body)
- f := newEqualityFactory(gen)
- return rewriteDynamics(f, body), nil
-}
-
-func (qc *queryCompiler) rewriteExprTerms(_ *QueryContext, body Body) (Body, error) {
- gen := newLocalVarGenerator("q", body)
- return rewriteExprTermsInBody(gen, body), nil
-}
-
-func (qc *queryCompiler) rewriteLocalVars(_ *QueryContext, body Body) (Body, error) {
- gen := newLocalVarGenerator("q", body)
- stack := newLocalDeclaredVars()
- body, _, err := rewriteLocalVars(gen, stack, nil, body, qc.compiler.strict)
- if len(err) != 0 {
- return nil, err
- }
- qc.rewritten = make(map[Var]Var, len(stack.rewritten))
- for k, v := range stack.rewritten {
- // The vars returned during the rewrite will include all seen vars,
- // even if they're not declared with an assignment operation. We don't
- // want to include these inside the rewritten set though.
- qc.rewritten[k] = v
- }
- return body, nil
-}
-
-func (qc *queryCompiler) rewritePrintCalls(_ *QueryContext, body Body) (Body, error) {
- if !qc.enablePrintStatements {
- _, cpy := erasePrintCallsInBody(body)
- return cpy, nil
- }
- gen := newLocalVarGenerator("q", body)
- if _, errs := rewritePrintCalls(gen, qc.compiler.GetArity, ReservedVars, body); len(errs) > 0 {
- return nil, errs
- }
- return body, nil
-}
-
-func (qc *queryCompiler) checkVoidCalls(_ *QueryContext, body Body) (Body, error) {
- if errs := checkVoidCalls(qc.compiler.TypeEnv, body); len(errs) > 0 {
- return nil, errs
- }
- return body, nil
-}
-
-func (qc *queryCompiler) checkUndefinedFuncs(_ *QueryContext, body Body) (Body, error) {
- if errs := checkUndefinedFuncs(qc.compiler.TypeEnv, body, qc.compiler.GetArity, qc.rewritten); len(errs) > 0 {
- return nil, errs
- }
- return body, nil
-}
-
-func (qc *queryCompiler) checkSafety(_ *QueryContext, body Body) (Body, error) {
- safe := ReservedVars.Copy()
- reordered, unsafe := reorderBodyForSafety(qc.compiler.builtins, qc.compiler.GetArity, safe, body)
- if errs := safetyErrorSlice(unsafe, qc.RewrittenVars()); len(errs) > 0 {
- return nil, errs
- }
- return reordered, nil
-}
-
-func (qc *queryCompiler) checkTypes(_ *QueryContext, body Body) (Body, error) {
- var errs Errors
- checker := newTypeChecker().
- WithSchemaSet(qc.compiler.schemaSet).
- WithInputType(qc.compiler.inputType).
- WithVarRewriter(rewriteVarsInRef(qc.rewritten, qc.compiler.RewrittenVars))
- qc.typeEnv, errs = checker.CheckBody(qc.compiler.TypeEnv, body)
- if len(errs) > 0 {
- return nil, errs
- }
-
- return body, nil
-}
-
-func (qc *queryCompiler) checkUnsafeBuiltins(_ *QueryContext, body Body) (Body, error) {
- errs := checkUnsafeBuiltins(qc.unsafeBuiltinsMap(), body)
- if len(errs) > 0 {
- return nil, errs
- }
- return body, nil
-}
-
-func (qc *queryCompiler) unsafeBuiltinsMap() map[string]struct{} {
- if qc.unsafeBuiltins != nil {
- return qc.unsafeBuiltins
- }
- return qc.compiler.unsafeBuiltinsMap
-}
-
-func (qc *queryCompiler) checkDeprecatedBuiltins(_ *QueryContext, body Body) (Body, error) {
- if qc.compiler.strict {
- errs := checkDeprecatedBuiltins(qc.compiler.deprecatedBuiltinsMap, body)
- if len(errs) > 0 {
- return nil, errs
- }
- }
- return body, nil
-}
-
-func (qc *queryCompiler) rewriteWithModifiers(_ *QueryContext, body Body) (Body, error) {
- f := newEqualityFactory(newLocalVarGenerator("q", body))
- body, err := rewriteWithModifiersInBody(qc.compiler, qc.unsafeBuiltinsMap(), f, body)
- if err != nil {
- return nil, Errors{err}
- }
- return body, nil
-}
-
-func (qc *queryCompiler) buildComprehensionIndices(_ *QueryContext, body Body) (Body, error) {
- // NOTE(tsandall): The query compiler does not have a metrics object so we
- // cannot record index metrics currently.
- _ = buildComprehensionIndices(qc.compiler.debug, qc.compiler.GetArity, ReservedVars, qc.RewrittenVars(), body, qc.comprehensionIndices)
- return body, nil
-}
-
-// ComprehensionIndex specifies how the comprehension term can be indexed. The keys
-// tell the evaluator what variables to use for indexing. In the future, the index
-// could be expanded with more information that would allow the evaluator to index
-// a larger fragment of comprehensions (e.g., by closing over variables in the outer
-// query.)
-type ComprehensionIndex struct {
- Term *Term
- Keys []*Term
-}
-
-func (ci *ComprehensionIndex) String() string {
- if ci == nil {
- return ""
- }
- return fmt.Sprintf("", NewArray(ci.Keys...))
-}
-
-func buildComprehensionIndices(dbg debug.Debug, arity func(Ref) int, candidates VarSet, rwVars map[Var]Var, node interface{}, result map[*Term]*ComprehensionIndex) uint64 {
- var n uint64
- cpy := candidates.Copy()
- WalkBodies(node, func(b Body) bool {
- for _, expr := range b {
- index := getComprehensionIndex(dbg, arity, cpy, rwVars, expr)
- if index != nil {
- result[index.Term] = index
- n++
- }
- // Any variables appearing in the expressions leading up to the comprehension
- // are fair-game to be used as index keys.
- cpy.Update(expr.Vars(VarVisitorParams{SkipClosures: true, SkipRefCallHead: true}))
- }
- return false
- })
- return n
-}
-
-func getComprehensionIndex(dbg debug.Debug, arity func(Ref) int, candidates VarSet, rwVars map[Var]Var, expr *Expr) *ComprehensionIndex {
-
- // Ignore everything except = expressions. Extract
- // the comprehension term from the expression.
- if !expr.IsEquality() || expr.Negated || len(expr.With) > 0 {
- // No debug message, these are assumed to be known hinderances
- // to comprehension indexing.
- return nil
- }
-
- var term *Term
-
- lhs, rhs := expr.Operand(0), expr.Operand(1)
-
- if _, ok := lhs.Value.(Var); ok && IsComprehension(rhs.Value) {
- term = rhs
- } else if _, ok := rhs.Value.(Var); ok && IsComprehension(lhs.Value) {
- term = lhs
- }
-
- if term == nil {
- // no debug for this, it's the ordinary "nothing to do here" case
- return nil
- }
-
- // Ignore comprehensions that contain expressions that close over variables
- // in the outer body if those variables are not also output variables in the
- // comprehension body. In other words, ignore comprehensions that we cannot
- // safely evaluate without bindings from the outer body. For example:
- //
- // x = [1]
- // [true | data.y[z] = x] # safe to evaluate w/o outer body
- // [true | data.y[z] = x[0]] # NOT safe to evaluate because 'x' would be unsafe.
- //
- // By identifying output variables in the body we also know what to index on by
- // intersecting with candidate variables from the outer query.
- //
- // For example:
- //
- // x = data.foo[_]
- // _ = [y | data.bar[y] = x] # index on 'x'
- //
- // This query goes from O(data.foo*data.bar) to O(data.foo+data.bar).
- var body Body
-
- switch x := term.Value.(type) {
- case *ArrayComprehension:
- body = x.Body
- case *SetComprehension:
- body = x.Body
- case *ObjectComprehension:
- body = x.Body
- }
-
- outputs := outputVarsForBody(body, arity, ReservedVars)
- unsafe := body.Vars(SafetyCheckVisitorParams).Diff(outputs).Diff(ReservedVars)
-
- if len(unsafe) > 0 {
- dbg.Printf("%s: comprehension index: unsafe vars: %v", expr.Location, unsafe)
- return nil
- }
-
- // Similarly, ignore comprehensions that contain references with output variables
- // that intersect with the candidates. Indexing these comprehensions could worsen
- // performance.
- regressionVis := newComprehensionIndexRegressionCheckVisitor(candidates)
- regressionVis.Walk(body)
- if regressionVis.worse {
- dbg.Printf("%s: comprehension index: output vars intersect candidates", expr.Location)
- return nil
- }
-
- // Check if any nested comprehensions close over candidates. If any intersection is found
- // the comprehension cannot be cached because it would require closing over the candidates
- // which the evaluator does not support today.
- nestedVis := newComprehensionIndexNestedCandidateVisitor(candidates)
- nestedVis.Walk(body)
- if nestedVis.found {
- dbg.Printf("%s: comprehension index: nested comprehensions close over candidates", expr.Location)
- return nil
- }
-
- // Make a sorted set of variable names that will serve as the index key set.
- // Sort to ensure deterministic indexing. In future this could be relaxed
- // if we can decide that one ordering is better than another. If the set is
- // empty, there is no indexing to do.
- indexVars := candidates.Intersect(outputs)
- if len(indexVars) == 0 {
- dbg.Printf("%s: comprehension index: no index vars", expr.Location)
- return nil
- }
-
- result := make([]*Term, 0, len(indexVars))
-
- for v := range indexVars {
- result = append(result, NewTerm(v))
- }
-
- sort.Slice(result, func(i, j int) bool {
- return result[i].Value.Compare(result[j].Value) < 0
- })
-
- debugRes := make([]*Term, len(result))
- for i, r := range result {
- if o, ok := rwVars[r.Value.(Var)]; ok {
- debugRes[i] = NewTerm(o)
- } else {
- debugRes[i] = r
- }
- }
- dbg.Printf("%s: comprehension index: built with keys: %v", expr.Location, debugRes)
- return &ComprehensionIndex{Term: term, Keys: result}
-}
-
-type comprehensionIndexRegressionCheckVisitor struct {
- candidates VarSet
- seen VarSet
- worse bool
-}
-
-// TODO(tsandall): Improve this so that users can either supply this list explicitly
-// or the information is maintained on the built-in function declaration. What we really
-// need to know is whether the built-in function allows callers to push down output
-// values or not. It's unlikely that anything outside of OPA does this today so this
-// solution is fine for now.
-var comprehensionIndexBlacklist = map[string]int{
- WalkBuiltin.Name: len(WalkBuiltin.Decl.FuncArgs().Args),
-}
-
-func newComprehensionIndexRegressionCheckVisitor(candidates VarSet) *comprehensionIndexRegressionCheckVisitor {
- return &comprehensionIndexRegressionCheckVisitor{
- candidates: candidates,
- seen: NewVarSet(),
- }
-}
-
-func (vis *comprehensionIndexRegressionCheckVisitor) Walk(x interface{}) {
- NewGenericVisitor(vis.visit).Walk(x)
-}
-
-func (vis *comprehensionIndexRegressionCheckVisitor) visit(x interface{}) bool {
- if !vis.worse {
- switch x := x.(type) {
- case *Expr:
- operands := x.Operands()
- if pos := comprehensionIndexBlacklist[x.Operator().String()]; pos > 0 && pos < len(operands) {
- vis.assertEmptyIntersection(operands[pos].Vars())
- }
- case Ref:
- vis.assertEmptyIntersection(x.OutputVars())
- case Var:
- vis.seen.Add(x)
- // Always skip comprehensions. We do not have to visit their bodies here.
- case *ArrayComprehension, *SetComprehension, *ObjectComprehension:
- return true
- }
- }
- return vis.worse
-}
-
-func (vis *comprehensionIndexRegressionCheckVisitor) assertEmptyIntersection(vs VarSet) {
- for v := range vs {
- if vis.candidates.Contains(v) && !vis.seen.Contains(v) {
- vis.worse = true
- return
- }
- }
-}
-
-type comprehensionIndexNestedCandidateVisitor struct {
- candidates VarSet
- found bool
-}
-
-func newComprehensionIndexNestedCandidateVisitor(candidates VarSet) *comprehensionIndexNestedCandidateVisitor {
- return &comprehensionIndexNestedCandidateVisitor{
- candidates: candidates,
- }
-}
-
-func (vis *comprehensionIndexNestedCandidateVisitor) Walk(x interface{}) {
- NewGenericVisitor(vis.visit).Walk(x)
-}
-
-func (vis *comprehensionIndexNestedCandidateVisitor) visit(x interface{}) bool {
-
- if vis.found {
- return true
- }
-
- if v, ok := x.(Value); ok && IsComprehension(v) {
- varVis := NewVarVisitor().WithParams(VarVisitorParams{SkipRefHead: true})
- varVis.Walk(v)
- vis.found = len(varVis.Vars().Intersect(vis.candidates)) > 0
- return true
- }
-
- return false
-}
-
-// ModuleTreeNode represents a node in the module tree. The module
-// tree is keyed by the package path.
-type ModuleTreeNode struct {
- Key Value
- Modules []*Module
- Children map[Value]*ModuleTreeNode
- Hide bool
-}
-
-func (n *ModuleTreeNode) String() string {
- var rules []string
- for _, m := range n.Modules {
- for _, r := range m.Rules {
- rules = append(rules, r.Head.String())
- }
- }
- return fmt.Sprintf("", n.Key, n.Children, rules, n.Hide)
-}
-
-// NewModuleTree returns a new ModuleTreeNode that represents the root
-// of the module tree populated with the given modules.
-func NewModuleTree(mods map[string]*Module) *ModuleTreeNode {
- root := &ModuleTreeNode{
- Children: map[Value]*ModuleTreeNode{},
- }
- names := make([]string, 0, len(mods))
- for name := range mods {
- names = append(names, name)
- }
- sort.Strings(names)
- for _, name := range names {
- m := mods[name]
- node := root
- for i, x := range m.Package.Path {
- c, ok := node.Children[x.Value]
- if !ok {
- var hide bool
- if i == 1 && x.Value.Compare(SystemDocumentKey) == 0 {
- hide = true
- }
- c = &ModuleTreeNode{
- Key: x.Value,
- Children: map[Value]*ModuleTreeNode{},
- Hide: hide,
- }
- node.Children[x.Value] = c
- }
- node = c
- }
- node.Modules = append(node.Modules, m)
- }
- return root
-}
-
-// Size returns the number of modules in the tree.
-func (n *ModuleTreeNode) Size() int {
- s := len(n.Modules)
- for _, c := range n.Children {
- s += c.Size()
- }
- return s
-}
-
-// Child returns n's child with key k.
-func (n *ModuleTreeNode) child(k Value) *ModuleTreeNode {
- switch k.(type) {
- case String, Var:
- return n.Children[k]
- }
- return nil
-}
-
-// Find dereferences ref along the tree. ref[0] is converted to a String
-// for convenience.
-func (n *ModuleTreeNode) find(ref Ref) (*ModuleTreeNode, Ref) {
- if v, ok := ref[0].Value.(Var); ok {
- ref = Ref{StringTerm(string(v))}.Concat(ref[1:])
- }
- node := n
- for i, r := range ref {
- next := node.child(r.Value)
- if next == nil {
- tail := make(Ref, len(ref)-i)
- tail[0] = VarTerm(string(ref[i].Value.(String)))
- copy(tail[1:], ref[i+1:])
- return node, tail
- }
- node = next
- }
- return node, nil
-}
-
-// DepthFirst performs a depth-first traversal of the module tree rooted at n.
-// If f returns true, traversal will not continue to the children of n.
-func (n *ModuleTreeNode) DepthFirst(f func(*ModuleTreeNode) bool) {
- if f(n) {
- return
- }
- for _, node := range n.Children {
- node.DepthFirst(f)
- }
-}
-
-// TreeNode represents a node in the rule tree. The rule tree is keyed by
-// rule path.
-type TreeNode struct {
- Key Value
- Values []util.T
- Children map[Value]*TreeNode
- Sorted []Value
- Hide bool
-}
-
-func (n *TreeNode) String() string {
- return fmt.Sprintf("", n.Key, n.Values, n.Sorted, n.Hide)
-}
-
-// NewRuleTree returns a new TreeNode that represents the root
-// of the rule tree populated with the given rules.
-func NewRuleTree(mtree *ModuleTreeNode) *TreeNode {
- root := TreeNode{
- Key: mtree.Key,
- }
-
- mtree.DepthFirst(func(m *ModuleTreeNode) bool {
- for _, mod := range m.Modules {
- if len(mod.Rules) == 0 {
- root.add(mod.Package.Path, nil)
- }
- for _, rule := range mod.Rules {
- root.add(rule.Ref().GroundPrefix(), rule)
- }
- }
- return false
- })
-
- // ensure that data.system's TreeNode is hidden
- node, tail := root.find(DefaultRootRef.Append(NewTerm(SystemDocumentKey)))
- if len(tail) == 0 { // found
- node.Hide = true
- }
-
- root.DepthFirst(func(x *TreeNode) bool {
- x.sort()
- return false
- })
-
- return &root
-}
-
-func (n *TreeNode) add(path Ref, rule *Rule) {
- node, tail := n.find(path)
- if len(tail) > 0 {
- sub := treeNodeFromRef(tail, rule)
- if node.Children == nil {
- node.Children = make(map[Value]*TreeNode, 1)
- }
- node.Children[sub.Key] = sub
- node.Sorted = append(node.Sorted, sub.Key)
- } else {
- if rule != nil {
- node.Values = append(node.Values, rule)
- }
- }
-}
-
-// Size returns the number of rules in the tree.
-func (n *TreeNode) Size() int {
- s := len(n.Values)
- for _, c := range n.Children {
- s += c.Size()
- }
- return s
-}
-
-// Child returns n's child with key k.
-func (n *TreeNode) Child(k Value) *TreeNode {
- switch k.(type) {
- case Ref, Call:
- return nil
- default:
- return n.Children[k]
- }
-}
-
-// Find dereferences ref along the tree
-func (n *TreeNode) Find(ref Ref) *TreeNode {
- node := n
- for _, r := range ref {
- node = node.Child(r.Value)
- if node == nil {
- return nil
- }
- }
- return node
-}
-
-// Iteratively dereferences ref along the node's subtree.
-// - If matching fails immediately, the tail will contain the full ref.
-// - Partial matching will result in a tail of non-zero length.
-// - A complete match will result in a 0 length tail.
-func (n *TreeNode) find(ref Ref) (*TreeNode, Ref) {
- node := n
- for i := range ref {
- next := node.Child(ref[i].Value)
- if next == nil {
- tail := make(Ref, len(ref)-i)
- copy(tail, ref[i:])
- return node, tail
- }
- node = next
- }
- return node, nil
-}
-
-// DepthFirst performs a depth-first traversal of the rule tree rooted at n. If
-// f returns true, traversal will not continue to the children of n.
-func (n *TreeNode) DepthFirst(f func(*TreeNode) bool) {
- if f(n) {
- return
- }
- for _, node := range n.Children {
- node.DepthFirst(f)
- }
-}
-
-func (n *TreeNode) sort() {
- sort.Slice(n.Sorted, func(i, j int) bool {
- return n.Sorted[i].Compare(n.Sorted[j]) < 0
- })
-}
-
-func treeNodeFromRef(ref Ref, rule *Rule) *TreeNode {
- depth := len(ref) - 1
- key := ref[depth].Value
- node := &TreeNode{
- Key: key,
- Children: nil,
- }
- if rule != nil {
- node.Values = []util.T{rule}
- }
-
- for i := len(ref) - 2; i >= 0; i-- {
- key := ref[i].Value
- node = &TreeNode{
- Key: key,
- Children: map[Value]*TreeNode{ref[i+1].Value: node},
- Sorted: []Value{ref[i+1].Value},
- }
- }
- return node
-}
-
-// flattenChildren flattens all children's rule refs into a sorted array.
-func (n *TreeNode) flattenChildren() []Ref {
- ret := newRefSet()
- for _, sub := range n.Children { // we only want the children, so don't use n.DepthFirst() right away
- sub.DepthFirst(func(x *TreeNode) bool {
- for _, r := range x.Values {
- rule := r.(*Rule)
- ret.AddPrefix(rule.Ref())
- }
- return false
- })
- }
-
- sort.Slice(ret.s, func(i, j int) bool {
- return ret.s[i].Compare(ret.s[j]) < 0
- })
- return ret.s
-}
-
-// Graph represents the graph of dependencies between rules.
-type Graph struct {
- adj map[util.T]map[util.T]struct{}
- radj map[util.T]map[util.T]struct{}
- nodes map[util.T]struct{}
- sorted []util.T
-}
-
-// NewGraph returns a new Graph based on modules. The list function must return
-// the rules referred to directly by the ref.
-func NewGraph(modules map[string]*Module, list func(Ref) []*Rule) *Graph {
-
- graph := &Graph{
- adj: map[util.T]map[util.T]struct{}{},
- radj: map[util.T]map[util.T]struct{}{},
- nodes: map[util.T]struct{}{},
- sorted: nil,
- }
-
- // Create visitor to walk a rule AST and add edges to the rule graph for
- // each dependency.
- vis := func(a *Rule) *GenericVisitor {
- stop := false
- return NewGenericVisitor(func(x interface{}) bool {
- switch x := x.(type) {
- case Ref:
- for _, b := range list(x) {
- for node := b; node != nil; node = node.Else {
- graph.addDependency(a, node)
- }
- }
- case *Rule:
- if stop {
- // Do not recurse into else clauses (which will be handled
- // by the outer visitor.)
- return true
- }
- stop = true
- }
- return false
- })
- }
-
- // Walk over all rules, add them to graph, and build adjacency lists.
- for _, module := range modules {
- WalkRules(module, func(a *Rule) bool {
- graph.addNode(a)
- vis(a).Walk(a)
- return false
- })
- }
-
- return graph
-}
-
-// Dependencies returns the set of rules that x depends on.
-func (g *Graph) Dependencies(x util.T) map[util.T]struct{} {
- return g.adj[x]
-}
-
-// Dependents returns the set of rules that depend on x.
-func (g *Graph) Dependents(x util.T) map[util.T]struct{} {
- return g.radj[x]
-}
-
-// Sort returns a slice of rules sorted by dependencies. If a cycle is found,
-// ok is set to false.
-func (g *Graph) Sort() (sorted []util.T, ok bool) {
- if g.sorted != nil {
- return g.sorted, true
- }
-
- sorter := &graphSort{
- sorted: make([]util.T, 0, len(g.nodes)),
- deps: g.Dependencies,
- marked: map[util.T]struct{}{},
- temp: map[util.T]struct{}{},
- }
-
- for node := range g.nodes {
- if !sorter.Visit(node) {
- return nil, false
- }
- }
-
- g.sorted = sorter.sorted
- return g.sorted, true
-}
-
-func (g *Graph) addDependency(u util.T, v util.T) {
-
- if _, ok := g.nodes[u]; !ok {
- g.addNode(u)
- }
-
- if _, ok := g.nodes[v]; !ok {
- g.addNode(v)
- }
-
- edges, ok := g.adj[u]
- if !ok {
- edges = map[util.T]struct{}{}
- g.adj[u] = edges
- }
-
- edges[v] = struct{}{}
-
- edges, ok = g.radj[v]
- if !ok {
- edges = map[util.T]struct{}{}
- g.radj[v] = edges
- }
-
- edges[u] = struct{}{}
-}
-
-func (g *Graph) addNode(n util.T) {
- g.nodes[n] = struct{}{}
-}
-
-type graphSort struct {
- sorted []util.T
- deps func(util.T) map[util.T]struct{}
- marked map[util.T]struct{}
- temp map[util.T]struct{}
-}
-
-func (sort *graphSort) Marked(node util.T) bool {
- _, marked := sort.marked[node]
- return marked
-}
-
-func (sort *graphSort) Visit(node util.T) (ok bool) {
- if _, ok := sort.temp[node]; ok {
- return false
- }
- if sort.Marked(node) {
- return true
- }
- sort.temp[node] = struct{}{}
- for other := range sort.deps(node) {
- if !sort.Visit(other) {
- return false
- }
- }
- sort.marked[node] = struct{}{}
- delete(sort.temp, node)
- sort.sorted = append(sort.sorted, node)
- return true
-}
-
-// GraphTraversal is a Traversal that understands the dependency graph
-type GraphTraversal struct {
- graph *Graph
- visited map[util.T]struct{}
-}
-
-// NewGraphTraversal returns a Traversal for the dependency graph
-func NewGraphTraversal(graph *Graph) *GraphTraversal {
- return &GraphTraversal{
- graph: graph,
- visited: map[util.T]struct{}{},
- }
-}
-
-// Edges lists all dependency connections for a given node
-func (g *GraphTraversal) Edges(x util.T) []util.T {
- r := []util.T{}
- for v := range g.graph.Dependencies(x) {
- r = append(r, v)
- }
- return r
-}
-
-// Visited returns whether a node has been visited, setting a node to visited if not
-func (g *GraphTraversal) Visited(u util.T) bool {
- _, ok := g.visited[u]
- g.visited[u] = struct{}{}
- return ok
-}
-
-type unsafePair struct {
- Expr *Expr
- Vars VarSet
-}
-
-type unsafeVarLoc struct {
- Var Var
- Loc *Location
-}
-
-type unsafeVars map[*Expr]VarSet
-
-func (vs unsafeVars) Add(e *Expr, v Var) {
- if u, ok := vs[e]; ok {
- u[v] = struct{}{}
- } else {
- vs[e] = VarSet{v: struct{}{}}
- }
-}
-
-func (vs unsafeVars) Set(e *Expr, s VarSet) {
- vs[e] = s
-}
-
-func (vs unsafeVars) Update(o unsafeVars) {
- for k, v := range o {
- if _, ok := vs[k]; !ok {
- vs[k] = VarSet{}
- }
- vs[k].Update(v)
- }
-}
-
-func (vs unsafeVars) Vars() (result []unsafeVarLoc) {
-
- locs := map[Var]*Location{}
-
- // If var appears in multiple sets then pick first by location.
- for expr, vars := range vs {
- for v := range vars {
- if locs[v].Compare(expr.Location) > 0 {
- locs[v] = expr.Location
- }
- }
- }
-
- for v, loc := range locs {
- result = append(result, unsafeVarLoc{
- Var: v,
- Loc: loc,
- })
- }
-
- sort.Slice(result, func(i, j int) bool {
- return result[i].Loc.Compare(result[j].Loc) < 0
- })
-
- return result
-}
-
-func (vs unsafeVars) Slice() (result []unsafePair) {
- for expr, vs := range vs {
- result = append(result, unsafePair{
- Expr: expr,
- Vars: vs,
- })
- }
- return
-}
-
-// reorderBodyForSafety returns a copy of the body ordered such that
-// left to right evaluation of the body will not encounter unbound variables
-// in input positions or negated expressions.
-//
-// Expressions are added to the re-ordered body as soon as they are considered
-// safe. If multiple expressions become safe in the same pass, they are added
-// in their original order. This results in minimal re-ordering of the body.
-//
-// If the body cannot be reordered to ensure safety, the second return value
-// contains a mapping of expressions to unsafe variables in those expressions.
-func reorderBodyForSafety(builtins map[string]*Builtin, arity func(Ref) int, globals VarSet, body Body) (Body, unsafeVars) {
-
- bodyVars := body.Vars(SafetyCheckVisitorParams)
- reordered := make(Body, 0, len(body))
- safe := VarSet{}
- unsafe := unsafeVars{}
-
- for _, e := range body {
- for v := range e.Vars(SafetyCheckVisitorParams) {
- if globals.Contains(v) {
- safe.Add(v)
- } else {
- unsafe.Add(e, v)
- }
- }
- }
-
- for {
- n := len(reordered)
-
- for _, e := range body {
- if reordered.Contains(e) {
- continue
- }
-
- ovs := outputVarsForExpr(e, arity, safe)
-
- // check closures: is this expression closing over variables that
- // haven't been made safe by what's already included in `reordered`?
- vs := unsafeVarsInClosures(e)
- cv := vs.Intersect(bodyVars).Diff(globals)
- uv := cv.Diff(outputVarsForBody(reordered, arity, safe))
-
- if len(uv) > 0 {
- if uv.Equal(ovs) { // special case "closure-self"
- continue
- }
- unsafe.Set(e, uv)
- }
-
- for v := range unsafe[e] {
- if ovs.Contains(v) || safe.Contains(v) {
- delete(unsafe[e], v)
- }
- }
-
- if len(unsafe[e]) == 0 {
- delete(unsafe, e)
- reordered.Append(e)
- safe.Update(ovs) // this expression's outputs are safe
- }
- }
-
- if len(reordered) == n { // fixed point, could not add any expr of body
- break
- }
- }
-
- // Recursively visit closures and perform the safety checks on them.
- // Update the globals at each expression to include the variables that could
- // be closed over.
- g := globals.Copy()
- for i, e := range reordered {
- if i > 0 {
- g.Update(reordered[i-1].Vars(SafetyCheckVisitorParams))
- }
- xform := &bodySafetyTransformer{
- builtins: builtins,
- arity: arity,
- current: e,
- globals: g,
- unsafe: unsafe,
- }
- NewGenericVisitor(xform.Visit).Walk(e)
- }
-
- return reordered, unsafe
-}
-
-type bodySafetyTransformer struct {
- builtins map[string]*Builtin
- arity func(Ref) int
- current *Expr
- globals VarSet
- unsafe unsafeVars
-}
-
-func (xform *bodySafetyTransformer) Visit(x interface{}) bool {
- switch term := x.(type) {
- case *Term:
- switch x := term.Value.(type) {
- case *object:
- cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) {
- kcpy := k.Copy()
- NewGenericVisitor(xform.Visit).Walk(kcpy)
- vcpy := v.Copy()
- NewGenericVisitor(xform.Visit).Walk(vcpy)
- return kcpy, vcpy, nil
- })
- term.Value = cpy
- return true
- case *set:
- cpy, _ := x.Map(func(v *Term) (*Term, error) {
- vcpy := v.Copy()
- NewGenericVisitor(xform.Visit).Walk(vcpy)
- return vcpy, nil
- })
- term.Value = cpy
- return true
- case *ArrayComprehension:
- xform.reorderArrayComprehensionSafety(x)
- return true
- case *ObjectComprehension:
- xform.reorderObjectComprehensionSafety(x)
- return true
- case *SetComprehension:
- xform.reorderSetComprehensionSafety(x)
- return true
- }
- case *Expr:
- if ev, ok := term.Terms.(*Every); ok {
- xform.globals.Update(ev.KeyValueVars())
- ev.Body = xform.reorderComprehensionSafety(NewVarSet(), ev.Body)
- return true
- }
- }
- return false
-}
-
-func (xform *bodySafetyTransformer) reorderComprehensionSafety(tv VarSet, body Body) Body {
- bv := body.Vars(SafetyCheckVisitorParams)
- bv.Update(xform.globals)
- uv := tv.Diff(bv)
- for v := range uv {
- xform.unsafe.Add(xform.current, v)
- }
-
- r, u := reorderBodyForSafety(xform.builtins, xform.arity, xform.globals, body)
- if len(u) == 0 {
- return r
- }
-
- xform.unsafe.Update(u)
- return body
-}
-
-func (xform *bodySafetyTransformer) reorderArrayComprehensionSafety(ac *ArrayComprehension) {
- ac.Body = xform.reorderComprehensionSafety(ac.Term.Vars(), ac.Body)
-}
-
-func (xform *bodySafetyTransformer) reorderObjectComprehensionSafety(oc *ObjectComprehension) {
- tv := oc.Key.Vars()
- tv.Update(oc.Value.Vars())
- oc.Body = xform.reorderComprehensionSafety(tv, oc.Body)
-}
-
-func (xform *bodySafetyTransformer) reorderSetComprehensionSafety(sc *SetComprehension) {
- sc.Body = xform.reorderComprehensionSafety(sc.Term.Vars(), sc.Body)
-}
-
-// unsafeVarsInClosures collects vars that are contained in closures within
-// this expression.
-func unsafeVarsInClosures(e *Expr) VarSet {
- vs := VarSet{}
- WalkClosures(e, func(x interface{}) bool {
- vis := &VarVisitor{vars: vs}
- if ev, ok := x.(*Every); ok {
- vis.Walk(ev.Body)
- return true
- }
- vis.Walk(x)
- return true
- })
- return vs
-}
-
-// OutputVarsFromBody returns all variables which are the "output" for
-// the given body. For safety checks this means that they would be
-// made safe by the body.
-func OutputVarsFromBody(c *Compiler, body Body, safe VarSet) VarSet {
- return outputVarsForBody(body, c.GetArity, safe)
-}
-
-func outputVarsForBody(body Body, arity func(Ref) int, safe VarSet) VarSet {
- o := safe.Copy()
- for _, e := range body {
- o.Update(outputVarsForExpr(e, arity, o))
- }
- return o.Diff(safe)
-}
-
-// OutputVarsFromExpr returns all variables which are the "output" for
-// the given expression. For safety checks this means that they would be
-// made safe by the expr.
-func OutputVarsFromExpr(c *Compiler, expr *Expr, safe VarSet) VarSet {
- return outputVarsForExpr(expr, c.GetArity, safe)
-}
-
-func outputVarsForExpr(expr *Expr, arity func(Ref) int, safe VarSet) VarSet {
-
- // Negated expressions must be safe.
- if expr.Negated {
- return VarSet{}
- }
-
- // With modifier inputs must be safe.
- for _, with := range expr.With {
- vis := NewVarVisitor().WithParams(SafetyCheckVisitorParams)
- vis.Walk(with)
- vars := vis.Vars()
- unsafe := vars.Diff(safe)
- if len(unsafe) > 0 {
- return VarSet{}
- }
- }
-
- switch terms := expr.Terms.(type) {
- case *Term:
- return outputVarsForTerms(expr, safe)
- case []*Term:
- if expr.IsEquality() {
- return outputVarsForExprEq(expr, safe)
- }
-
- operator, ok := terms[0].Value.(Ref)
- if !ok {
- return VarSet{}
- }
-
- ar := arity(operator)
- if ar < 0 {
- return VarSet{}
- }
-
- return outputVarsForExprCall(expr, ar, safe, terms)
- case *Every:
- return outputVarsForTerms(terms.Domain, safe)
- default:
- panic("illegal expression")
- }
-}
-
-func outputVarsForExprEq(expr *Expr, safe VarSet) VarSet {
-
- if !validEqAssignArgCount(expr) {
- return safe
- }
-
- output := outputVarsForTerms(expr, safe)
- output.Update(safe)
- output.Update(Unify(output, expr.Operand(0), expr.Operand(1)))
-
- return output.Diff(safe)
-}
-
-func outputVarsForExprCall(expr *Expr, arity int, safe VarSet, terms []*Term) VarSet {
-
- output := outputVarsForTerms(expr, safe)
-
- numInputTerms := arity + 1
- if numInputTerms >= len(terms) {
- return output
- }
-
- params := VarVisitorParams{
- SkipClosures: true,
- SkipSets: true,
- SkipObjectKeys: true,
- SkipRefHead: true,
- }
- vis := NewVarVisitor().WithParams(params)
- vis.Walk(Args(terms[:numInputTerms]))
- unsafe := vis.Vars().Diff(output).Diff(safe)
-
- if len(unsafe) > 0 {
- return VarSet{}
- }
-
- vis = NewVarVisitor().WithParams(params)
- vis.Walk(Args(terms[numInputTerms:]))
- output.Update(vis.vars)
- return output
-}
-
-func outputVarsForTerms(expr interface{}, safe VarSet) VarSet {
- output := VarSet{}
- WalkTerms(expr, func(x *Term) bool {
- switch r := x.Value.(type) {
- case *SetComprehension, *ArrayComprehension, *ObjectComprehension:
- return true
- case Ref:
- if !isRefSafe(r, safe) {
- return true
- }
- output.Update(r.OutputVars())
- return false
- }
- return false
- })
- return output
-}
-
-type equalityFactory struct {
- gen *localVarGenerator
-}
-
-func newEqualityFactory(gen *localVarGenerator) *equalityFactory {
- return &equalityFactory{gen}
-}
-
-func (f *equalityFactory) Generate(other *Term) *Expr {
- term := NewTerm(f.gen.Generate()).SetLocation(other.Location)
- expr := Equality.Expr(term, other)
- expr.Generated = true
- expr.Location = other.Location
- return expr
-}
-
-type localVarGenerator struct {
- exclude VarSet
- suffix string
- next int
-}
-
-func newLocalVarGeneratorForModuleSet(sorted []string, modules map[string]*Module) *localVarGenerator {
- exclude := NewVarSet()
- vis := &VarVisitor{vars: exclude}
- for _, key := range sorted {
- vis.Walk(modules[key])
- }
- return &localVarGenerator{exclude: exclude, next: 0}
-}
-
-func newLocalVarGenerator(suffix string, node interface{}) *localVarGenerator {
- exclude := NewVarSet()
- vis := &VarVisitor{vars: exclude}
- vis.Walk(node)
- return &localVarGenerator{exclude: exclude, suffix: suffix, next: 0}
-}
-
-func (l *localVarGenerator) Generate() Var {
- for {
- result := Var("__local" + l.suffix + strconv.Itoa(l.next) + "__")
- l.next++
- if !l.exclude.Contains(result) {
- return result
- }
- }
-}
-
-func getGlobals(pkg *Package, rules []Ref, imports []*Import) map[Var]*usedRef {
-
- globals := make(map[Var]*usedRef, len(rules)) // NB: might grow bigger with imports
-
- // Populate globals with exports within the package.
- for _, ref := range rules {
- v := ref[0].Value.(Var)
- globals[v] = &usedRef{ref: pkg.Path.Append(StringTerm(string(v)))}
- }
-
- // Populate globals with imports.
- for _, imp := range imports {
- path := imp.Path.Value.(Ref)
- if FutureRootDocument.Equal(path[0]) || RegoRootDocument.Equal(path[0]) {
- continue // ignore future and rego imports
- }
- globals[imp.Name()] = &usedRef{ref: path}
- }
-
- return globals
-}
-
-func requiresEval(x *Term) bool {
- if x == nil {
- return false
- }
- return ContainsRefs(x) || ContainsComprehensions(x)
-}
-
-func resolveRef(globals map[Var]*usedRef, ignore *declaredVarStack, ref Ref) Ref {
-
- r := Ref{}
- for i, x := range ref {
- switch v := x.Value.(type) {
- case Var:
- if g, ok := globals[v]; ok && !ignore.Contains(v) {
- cpy := g.ref.Copy()
- for i := range cpy {
- cpy[i].SetLocation(x.Location)
- }
- if i == 0 {
- r = cpy
- } else {
- r = append(r, NewTerm(cpy).SetLocation(x.Location))
- }
- g.used = true
- } else {
- r = append(r, x)
- }
- case Ref, *Array, Object, Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call:
- r = append(r, resolveRefsInTerm(globals, ignore, x))
- default:
- r = append(r, x)
- }
- }
-
- return r
-}
-
-type usedRef struct {
- ref Ref
- used bool
-}
-
-func resolveRefsInRule(globals map[Var]*usedRef, rule *Rule) error {
- ignore := &declaredVarStack{}
-
- vars := NewVarSet()
- var vis *GenericVisitor
- var err error
-
- // Walk args to collect vars and transform body so that callers can shadow
- // root documents.
- vis = NewGenericVisitor(func(x interface{}) bool {
- if err != nil {
- return true
- }
- switch x := x.(type) {
- case Var:
- vars.Add(x)
-
- // Object keys cannot be pattern matched so only walk values.
- case *object:
- x.Foreach(func(_, v *Term) {
- vis.Walk(v)
- })
-
- // Skip terms that could contain vars that cannot be pattern matched.
- case Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call:
- return true
-
- case *Term:
- if _, ok := x.Value.(Ref); ok {
- if RootDocumentRefs.Contains(x) {
- // We could support args named input, data, etc. however
- // this would require rewriting terms in the head and body.
- // Preventing root document shadowing is simpler, and
- // arguably, will prevent confusing names from being used.
- // NOTE: this check is also performed as part of strict-mode in
- // checkRootDocumentOverrides.
- err = fmt.Errorf("args must not shadow %v (use a different variable name)", x)
- return true
- }
- }
- }
- return false
- })
-
- vis.Walk(rule.Head.Args)
-
- if err != nil {
- return err
- }
-
- ignore.Push(vars)
- ignore.Push(declaredVars(rule.Body))
-
- ref := rule.Head.Ref()
- for i := 1; i < len(ref); i++ {
- ref[i] = resolveRefsInTerm(globals, ignore, ref[i])
- }
- if rule.Head.Key != nil {
- rule.Head.Key = resolveRefsInTerm(globals, ignore, rule.Head.Key)
- }
-
- if rule.Head.Value != nil {
- rule.Head.Value = resolveRefsInTerm(globals, ignore, rule.Head.Value)
- }
-
- rule.Body = resolveRefsInBody(globals, ignore, rule.Body)
- return nil
-}
-
-func resolveRefsInBody(globals map[Var]*usedRef, ignore *declaredVarStack, body Body) Body {
- r := make([]*Expr, 0, len(body))
- for _, expr := range body {
- r = append(r, resolveRefsInExpr(globals, ignore, expr))
- }
- return r
-}
-
-func resolveRefsInExpr(globals map[Var]*usedRef, ignore *declaredVarStack, expr *Expr) *Expr {
- cpy := *expr
- switch ts := expr.Terms.(type) {
- case *Term:
- cpy.Terms = resolveRefsInTerm(globals, ignore, ts)
- case []*Term:
- buf := make([]*Term, len(ts))
- for i := 0; i < len(ts); i++ {
- buf[i] = resolveRefsInTerm(globals, ignore, ts[i])
- }
- cpy.Terms = buf
- case *SomeDecl:
- if val, ok := ts.Symbols[0].Value.(Call); ok {
- cpy.Terms = &SomeDecl{Symbols: []*Term{CallTerm(resolveRefsInTermSlice(globals, ignore, val)...)}}
- }
- case *Every:
- locals := NewVarSet()
- if ts.Key != nil {
- locals.Update(ts.Key.Vars())
- }
- locals.Update(ts.Value.Vars())
- ignore.Push(locals)
- cpy.Terms = &Every{
- Key: ts.Key.Copy(), // TODO(sr): do more?
- Value: ts.Value.Copy(), // TODO(sr): do more?
- Domain: resolveRefsInTerm(globals, ignore, ts.Domain),
- Body: resolveRefsInBody(globals, ignore, ts.Body),
- }
- ignore.Pop()
- }
- for _, w := range cpy.With {
- w.Target = resolveRefsInTerm(globals, ignore, w.Target)
- w.Value = resolveRefsInTerm(globals, ignore, w.Value)
- }
- return &cpy
-}
-
-func resolveRefsInTerm(globals map[Var]*usedRef, ignore *declaredVarStack, term *Term) *Term {
- switch v := term.Value.(type) {
- case Var:
- if g, ok := globals[v]; ok && !ignore.Contains(v) {
- cpy := g.ref.Copy()
- for i := range cpy {
- cpy[i].SetLocation(term.Location)
- }
- g.used = true
- return NewTerm(cpy).SetLocation(term.Location)
- }
- return term
- case Ref:
- fqn := resolveRef(globals, ignore, v)
- cpy := *term
- cpy.Value = fqn
- return &cpy
- case *object:
- cpy := *term
- cpy.Value, _ = v.Map(func(k, v *Term) (*Term, *Term, error) {
- k = resolveRefsInTerm(globals, ignore, k)
- v = resolveRefsInTerm(globals, ignore, v)
- return k, v, nil
- })
- return &cpy
- case *Array:
- cpy := *term
- cpy.Value = NewArray(resolveRefsInTermArray(globals, ignore, v)...)
- return &cpy
- case Call:
- cpy := *term
- cpy.Value = Call(resolveRefsInTermSlice(globals, ignore, v))
- return &cpy
- case Set:
- s, _ := v.Map(func(e *Term) (*Term, error) {
- return resolveRefsInTerm(globals, ignore, e), nil
- })
- cpy := *term
- cpy.Value = s
- return &cpy
- case *ArrayComprehension:
- ac := &ArrayComprehension{}
- ignore.Push(declaredVars(v.Body))
- ac.Term = resolveRefsInTerm(globals, ignore, v.Term)
- ac.Body = resolveRefsInBody(globals, ignore, v.Body)
- cpy := *term
- cpy.Value = ac
- ignore.Pop()
- return &cpy
- case *ObjectComprehension:
- oc := &ObjectComprehension{}
- ignore.Push(declaredVars(v.Body))
- oc.Key = resolveRefsInTerm(globals, ignore, v.Key)
- oc.Value = resolveRefsInTerm(globals, ignore, v.Value)
- oc.Body = resolveRefsInBody(globals, ignore, v.Body)
- cpy := *term
- cpy.Value = oc
- ignore.Pop()
- return &cpy
- case *SetComprehension:
- sc := &SetComprehension{}
- ignore.Push(declaredVars(v.Body))
- sc.Term = resolveRefsInTerm(globals, ignore, v.Term)
- sc.Body = resolveRefsInBody(globals, ignore, v.Body)
- cpy := *term
- cpy.Value = sc
- ignore.Pop()
- return &cpy
- default:
- return term
- }
-}
-
-func resolveRefsInTermArray(globals map[Var]*usedRef, ignore *declaredVarStack, terms *Array) []*Term {
- cpy := make([]*Term, terms.Len())
- for i := 0; i < terms.Len(); i++ {
- cpy[i] = resolveRefsInTerm(globals, ignore, terms.Elem(i))
- }
- return cpy
-}
-
-func resolveRefsInTermSlice(globals map[Var]*usedRef, ignore *declaredVarStack, terms []*Term) []*Term {
- cpy := make([]*Term, len(terms))
- for i := 0; i < len(terms); i++ {
- cpy[i] = resolveRefsInTerm(globals, ignore, terms[i])
- }
- return cpy
-}
-
-type declaredVarStack []VarSet
-
-func (s declaredVarStack) Contains(v Var) bool {
- for i := len(s) - 1; i >= 0; i-- {
- if _, ok := s[i][v]; ok {
- return ok
- }
- }
- return false
-}
-
-func (s declaredVarStack) Add(v Var) {
- s[len(s)-1].Add(v)
-}
-
-func (s *declaredVarStack) Push(vs VarSet) {
- *s = append(*s, vs)
-}
-
-func (s *declaredVarStack) Pop() {
- curr := *s
- *s = curr[:len(curr)-1]
-}
-
-func declaredVars(x interface{}) VarSet {
- vars := NewVarSet()
- vis := NewGenericVisitor(func(x interface{}) bool {
- switch x := x.(type) {
- case *Expr:
- if x.IsAssignment() && validEqAssignArgCount(x) {
- WalkVars(x.Operand(0), func(v Var) bool {
- vars.Add(v)
- return false
- })
- } else if decl, ok := x.Terms.(*SomeDecl); ok {
- for i := range decl.Symbols {
- switch val := decl.Symbols[i].Value.(type) {
- case Var:
- vars.Add(val)
- case Call:
- args := val[1:]
- if len(args) == 3 { // some x, y in xs
- WalkVars(args[1], func(v Var) bool {
- vars.Add(v)
- return false
- })
- }
- // some x in xs
- WalkVars(args[0], func(v Var) bool {
- vars.Add(v)
- return false
- })
- }
- }
- }
- case *ArrayComprehension, *SetComprehension, *ObjectComprehension:
- return true
- }
- return false
- })
- vis.Walk(x)
- return vars
-}
-
-// rewriteComprehensionTerms will rewrite comprehensions so that the term part
-// is bound to a variable in the body. This allows any type of term to be used
-// in the term part (even if the term requires evaluation.)
-//
-// For instance, given the following comprehension:
-//
-// [x[0] | x = y[_]; y = [1,2,3]]
-//
-// The comprehension would be rewritten as:
-//
-// [__local0__ | x = y[_]; y = [1,2,3]; __local0__ = x[0]]
-func rewriteComprehensionTerms(f *equalityFactory, node interface{}) (interface{}, error) {
- return TransformComprehensions(node, func(x interface{}) (Value, error) {
- switch x := x.(type) {
- case *ArrayComprehension:
- if requiresEval(x.Term) {
- expr := f.Generate(x.Term)
- x.Term = expr.Operand(0)
- x.Body.Append(expr)
- }
- return x, nil
- case *SetComprehension:
- if requiresEval(x.Term) {
- expr := f.Generate(x.Term)
- x.Term = expr.Operand(0)
- x.Body.Append(expr)
- }
- return x, nil
- case *ObjectComprehension:
- if requiresEval(x.Key) {
- expr := f.Generate(x.Key)
- x.Key = expr.Operand(0)
- x.Body.Append(expr)
- }
- if requiresEval(x.Value) {
- expr := f.Generate(x.Value)
- x.Value = expr.Operand(0)
- x.Body.Append(expr)
- }
- return x, nil
- }
- panic("illegal type")
- })
-}
-
-// rewriteEquals will rewrite exprs under x as unification calls instead of ==
-// calls. For example:
-//
-// data.foo == data.bar is rewritten as data.foo = data.bar
-//
-// This stage should only run the safety check (since == is a built-in with no
-// outputs, so the inputs must not be marked as safe.)
-//
-// This stage is not executed by the query compiler by default because when
-// callers specify == instead of = they expect to receive a true/false/undefined
-// result back whereas with = the result is only ever true/undefined. For
-// partial evaluation cases we do want to rewrite == to = to simplify the
-// result.
-func rewriteEquals(x interface{}) (modified bool) {
- doubleEq := Equal.Ref()
- unifyOp := Equality.Ref()
- t := NewGenericTransformer(func(x interface{}) (interface{}, error) {
- if x, ok := x.(*Expr); ok && x.IsCall() {
- operator := x.Operator()
- if operator.Equal(doubleEq) && len(x.Operands()) == 2 {
- modified = true
- x.SetOperator(NewTerm(unifyOp))
- }
- }
- return x, nil
- })
- _, _ = Transform(t, x) // ignore error
- return modified
-}
-
-func rewriteTestEqualities(f *equalityFactory, body Body) Body {
- result := make(Body, 0, len(body))
- for _, expr := range body {
- // We can't rewrite negated expressions; if the extracted term is undefined, evaluation would fail before
- // reaching the negation check.
- if !expr.Negated && !expr.Generated {
- switch {
- case expr.IsEquality():
- terms := expr.Terms.([]*Term)
- result, terms[1] = rewriteDynamicsShallow(expr, f, terms[1], result)
- result, terms[2] = rewriteDynamicsShallow(expr, f, terms[2], result)
- case expr.IsEvery():
- // We rewrite equalities inside of every-bodies as a fail here will be the cause of the test-rule fail.
- // Failures inside other expressions with closures, such as comprehensions, won't cause the test-rule to fail, so we skip those.
- every := expr.Terms.(*Every)
- every.Body = rewriteTestEqualities(f, every.Body)
- }
- }
- result = appendExpr(result, expr)
- }
- return result
-}
-
-func rewriteDynamicsShallow(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) {
- switch term.Value.(type) {
- case Ref, *ArrayComprehension, *SetComprehension, *ObjectComprehension:
- generated := f.Generate(term)
- generated.With = original.With
- result.Append(generated)
- connectGeneratedExprs(original, generated)
- return result, result[len(result)-1].Operand(0)
- }
- return result, term
-}
-
-// rewriteDynamics will rewrite the body so that dynamic terms (i.e., refs and
-// comprehensions) are bound to vars earlier in the query. This translation
-// results in eager evaluation.
-//
-// For instance, given the following query:
-//
-// foo(data.bar) = 1
-//
-// The rewritten version will be:
-//
-// __local0__ = data.bar; foo(__local0__) = 1
-func rewriteDynamics(f *equalityFactory, body Body) Body {
- result := make(Body, 0, len(body))
- for _, expr := range body {
- switch {
- case expr.IsEquality():
- result = rewriteDynamicsEqExpr(f, expr, result)
- case expr.IsCall():
- result = rewriteDynamicsCallExpr(f, expr, result)
- case expr.IsEvery():
- result = rewriteDynamicsEveryExpr(f, expr, result)
- default:
- result = rewriteDynamicsTermExpr(f, expr, result)
- }
- }
- return result
-}
-
-func appendExpr(body Body, expr *Expr) Body {
- body.Append(expr)
- return body
-}
-
-func rewriteDynamicsEqExpr(f *equalityFactory, expr *Expr, result Body) Body {
- if !validEqAssignArgCount(expr) {
- return appendExpr(result, expr)
- }
- terms := expr.Terms.([]*Term)
- result, terms[1] = rewriteDynamicsInTerm(expr, f, terms[1], result)
- result, terms[2] = rewriteDynamicsInTerm(expr, f, terms[2], result)
- return appendExpr(result, expr)
-}
-
-func rewriteDynamicsCallExpr(f *equalityFactory, expr *Expr, result Body) Body {
- terms := expr.Terms.([]*Term)
- for i := 1; i < len(terms); i++ {
- result, terms[i] = rewriteDynamicsOne(expr, f, terms[i], result)
- }
- return appendExpr(result, expr)
-}
-
-func rewriteDynamicsEveryExpr(f *equalityFactory, expr *Expr, result Body) Body {
- ev := expr.Terms.(*Every)
- result, ev.Domain = rewriteDynamicsOne(expr, f, ev.Domain, result)
- ev.Body = rewriteDynamics(f, ev.Body)
- return appendExpr(result, expr)
-}
-
-func rewriteDynamicsTermExpr(f *equalityFactory, expr *Expr, result Body) Body {
- term := expr.Terms.(*Term)
- result, expr.Terms = rewriteDynamicsInTerm(expr, f, term, result)
- return appendExpr(result, expr)
-}
-
-func rewriteDynamicsInTerm(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) {
- switch v := term.Value.(type) {
- case Ref:
- for i := 1; i < len(v); i++ {
- result, v[i] = rewriteDynamicsOne(original, f, v[i], result)
- }
- case *ArrayComprehension:
- v.Body = rewriteDynamics(f, v.Body)
- case *SetComprehension:
- v.Body = rewriteDynamics(f, v.Body)
- case *ObjectComprehension:
- v.Body = rewriteDynamics(f, v.Body)
- default:
- result, term = rewriteDynamicsOne(original, f, term, result)
- }
- return result, term
-}
-
-func rewriteDynamicsOne(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) {
- switch v := term.Value.(type) {
- case Ref:
- for i := 1; i < len(v); i++ {
- result, v[i] = rewriteDynamicsOne(original, f, v[i], result)
- }
- generated := f.Generate(term)
- generated.With = original.With
- result.Append(generated)
- connectGeneratedExprs(original, generated)
- return result, result[len(result)-1].Operand(0)
- case *Array:
- for i := 0; i < v.Len(); i++ {
- var t *Term
- result, t = rewriteDynamicsOne(original, f, v.Elem(i), result)
- v.set(i, t)
- }
- return result, term
- case *object:
- cpy := NewObject()
- v.Foreach(func(key, value *Term) {
- result, key = rewriteDynamicsOne(original, f, key, result)
- result, value = rewriteDynamicsOne(original, f, value, result)
- cpy.Insert(key, value)
- })
- return result, NewTerm(cpy).SetLocation(term.Location)
- case Set:
- cpy := NewSet()
- for _, term := range v.Slice() {
- var rw *Term
- result, rw = rewriteDynamicsOne(original, f, term, result)
- cpy.Add(rw)
- }
- return result, NewTerm(cpy).SetLocation(term.Location)
- case *ArrayComprehension:
- var extra *Expr
- v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term)
- result.Append(extra)
- connectGeneratedExprs(original, extra)
- return result, result[len(result)-1].Operand(0)
- case *SetComprehension:
- var extra *Expr
- v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term)
- result.Append(extra)
- connectGeneratedExprs(original, extra)
- return result, result[len(result)-1].Operand(0)
- case *ObjectComprehension:
- var extra *Expr
- v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term)
- result.Append(extra)
- connectGeneratedExprs(original, extra)
- return result, result[len(result)-1].Operand(0)
- }
- return result, term
-}
-
-func rewriteDynamicsComprehensionBody(original *Expr, f *equalityFactory, body Body, term *Term) (Body, *Expr) {
- body = rewriteDynamics(f, body)
- generated := f.Generate(term)
- generated.With = original.With
- return body, generated
-}
-
-func rewriteExprTermsInHead(gen *localVarGenerator, rule *Rule) {
- for i := range rule.Head.Args {
- support, output := expandExprTerm(gen, rule.Head.Args[i])
- for j := range support {
- rule.Body.Append(support[j])
- }
- rule.Head.Args[i] = output
- }
- if rule.Head.Key != nil {
- support, output := expandExprTerm(gen, rule.Head.Key)
- for i := range support {
- rule.Body.Append(support[i])
- }
- rule.Head.Key = output
- }
- if rule.Head.Value != nil {
- support, output := expandExprTerm(gen, rule.Head.Value)
- for i := range support {
- rule.Body.Append(support[i])
- }
- rule.Head.Value = output
- }
-}
-
-func rewriteExprTermsInBody(gen *localVarGenerator, body Body) Body {
- cpy := make(Body, 0, len(body))
- for i := 0; i < len(body); i++ {
- for _, expr := range expandExpr(gen, body[i]) {
- cpy.Append(expr)
- }
- }
- return cpy
-}
-
-func expandExpr(gen *localVarGenerator, expr *Expr) (result []*Expr) {
- for i := range expr.With {
- extras, value := expandExprTerm(gen, expr.With[i].Value)
- expr.With[i].Value = value
- result = append(result, extras...)
- }
- switch terms := expr.Terms.(type) {
- case *Term:
- extras, term := expandExprTerm(gen, terms)
- if len(expr.With) > 0 {
- for i := range extras {
- extras[i].With = expr.With
- }
- }
- result = append(result, extras...)
- expr.Terms = term
- result = append(result, expr)
- case []*Term:
- for i := 1; i < len(terms); i++ {
- var extras []*Expr
- extras, terms[i] = expandExprTerm(gen, terms[i])
- connectGeneratedExprs(expr, extras...)
- if len(expr.With) > 0 {
- for i := range extras {
- extras[i].With = expr.With
- }
- }
- result = append(result, extras...)
- }
- result = append(result, expr)
- case *Every:
- var extras []*Expr
-
- term := NewTerm(gen.Generate()).SetLocation(terms.Domain.Location)
- eq := Equality.Expr(term, terms.Domain).SetLocation(terms.Domain.Location)
- eq.Generated = true
- eq.With = expr.With
- extras = expandExpr(gen, eq)
- terms.Domain = term
-
- terms.Body = rewriteExprTermsInBody(gen, terms.Body)
- result = append(result, extras...)
- result = append(result, expr)
- }
- return
-}
-
-func connectGeneratedExprs(parent *Expr, children ...*Expr) {
- for _, child := range children {
- child.generatedFrom = parent
- parent.generates = append(parent.generates, child)
- }
-}
-
-func expandExprTerm(gen *localVarGenerator, term *Term) (support []*Expr, output *Term) {
- output = term
- switch v := term.Value.(type) {
- case Call:
- for i := 1; i < len(v); i++ {
- var extras []*Expr
- extras, v[i] = expandExprTerm(gen, v[i])
- support = append(support, extras...)
- }
- output = NewTerm(gen.Generate()).SetLocation(term.Location)
- expr := v.MakeExpr(output).SetLocation(term.Location)
- expr.Generated = true
- support = append(support, expr)
- case Ref:
- support = expandExprRef(gen, v)
- case *Array:
- support = expandExprTermArray(gen, v)
- case *object:
- cpy, _ := v.Map(func(k, v *Term) (*Term, *Term, error) {
- extras1, expandedKey := expandExprTerm(gen, k)
- extras2, expandedValue := expandExprTerm(gen, v)
- support = append(support, extras1...)
- support = append(support, extras2...)
- return expandedKey, expandedValue, nil
- })
- output = NewTerm(cpy).SetLocation(term.Location)
- case Set:
- cpy, _ := v.Map(func(x *Term) (*Term, error) {
- extras, expanded := expandExprTerm(gen, x)
- support = append(support, extras...)
- return expanded, nil
- })
- output = NewTerm(cpy).SetLocation(term.Location)
- case *ArrayComprehension:
- support, term := expandExprTerm(gen, v.Term)
- for i := range support {
- v.Body.Append(support[i])
- }
- v.Term = term
- v.Body = rewriteExprTermsInBody(gen, v.Body)
- case *SetComprehension:
- support, term := expandExprTerm(gen, v.Term)
- for i := range support {
- v.Body.Append(support[i])
- }
- v.Term = term
- v.Body = rewriteExprTermsInBody(gen, v.Body)
- case *ObjectComprehension:
- support, key := expandExprTerm(gen, v.Key)
- for i := range support {
- v.Body.Append(support[i])
- }
- v.Key = key
- support, value := expandExprTerm(gen, v.Value)
- for i := range support {
- v.Body.Append(support[i])
- }
- v.Value = value
- v.Body = rewriteExprTermsInBody(gen, v.Body)
- }
- return
-}
-
-func expandExprRef(gen *localVarGenerator, v []*Term) (support []*Expr) {
- // Start by calling a normal expandExprTerm on all terms.
- support = expandExprTermSlice(gen, v)
-
- // Rewrite references in order to support indirect references. We rewrite
- // e.g.
- //
- // [1, 2, 3][i]
- //
- // to
- //
- // __local_var = [1, 2, 3]
- // __local_var[i]
- //
- // to support these. This only impacts the reference subject, i.e. the
- // first item in the slice.
- var subject = v[0]
- switch subject.Value.(type) {
- case *Array, Object, Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call:
- f := newEqualityFactory(gen)
- assignToLocal := f.Generate(subject)
- support = append(support, assignToLocal)
- v[0] = assignToLocal.Operand(0)
- }
- return
-}
-
-func expandExprTermArray(gen *localVarGenerator, arr *Array) (support []*Expr) {
- for i := 0; i < arr.Len(); i++ {
- extras, v := expandExprTerm(gen, arr.Elem(i))
- arr.set(i, v)
- support = append(support, extras...)
- }
- return
-}
-
-func expandExprTermSlice(gen *localVarGenerator, v []*Term) (support []*Expr) {
- for i := 0; i < len(v); i++ {
- var extras []*Expr
- extras, v[i] = expandExprTerm(gen, v[i])
- support = append(support, extras...)
- }
- return
-}
-
-type localDeclaredVars struct {
- vars []*declaredVarSet
-
- // rewritten contains a mapping of *all* user-defined variables
- // that have been rewritten whereas vars contains the state
- // from the current query (not any nested queries, and all vars
- // seen).
- rewritten map[Var]Var
-
- // indicates if an assignment (:= operator) has been seen *ever*
- assignment bool
-}
-
-type varOccurrence int
-
-const (
- newVar varOccurrence = iota
- argVar
- seenVar
- assignedVar
- declaredVar
-)
-
-type declaredVarSet struct {
- vs map[Var]Var
- reverse map[Var]Var
- occurrence map[Var]varOccurrence
- count map[Var]int
-}
-
-func newDeclaredVarSet() *declaredVarSet {
- return &declaredVarSet{
- vs: map[Var]Var{},
- reverse: map[Var]Var{},
- occurrence: map[Var]varOccurrence{},
- count: map[Var]int{},
- }
-}
-
-func newLocalDeclaredVars() *localDeclaredVars {
- return &localDeclaredVars{
- vars: []*declaredVarSet{newDeclaredVarSet()},
- rewritten: map[Var]Var{},
- }
-}
-
-func (s *localDeclaredVars) Copy() *localDeclaredVars {
- stack := &localDeclaredVars{
- vars: []*declaredVarSet{},
- rewritten: map[Var]Var{},
- }
-
- for i := range s.vars {
- stack.vars = append(stack.vars, newDeclaredVarSet())
- for k, v := range s.vars[i].vs {
- stack.vars[0].vs[k] = v
- }
- for k, v := range s.vars[i].reverse {
- stack.vars[0].reverse[k] = v
- }
- for k, v := range s.vars[i].count {
- stack.vars[0].count[k] = v
- }
- for k, v := range s.vars[i].occurrence {
- stack.vars[0].occurrence[k] = v
- }
- }
-
- for k, v := range s.rewritten {
- stack.rewritten[k] = v
- }
-
- return stack
-}
-
-func (s *localDeclaredVars) Push() {
- s.vars = append(s.vars, newDeclaredVarSet())
-}
-
-func (s *localDeclaredVars) Pop() *declaredVarSet {
- sl := s.vars
- curr := sl[len(sl)-1]
- s.vars = sl[:len(sl)-1]
- return curr
-}
-
-func (s localDeclaredVars) Peek() *declaredVarSet {
- return s.vars[len(s.vars)-1]
-}
-
-func (s localDeclaredVars) Insert(x, y Var, occurrence varOccurrence) {
- elem := s.vars[len(s.vars)-1]
- elem.vs[x] = y
- elem.reverse[y] = x
- elem.occurrence[x] = occurrence
-
- elem.count[x] = 1
-
- // If the variable has been rewritten (where x != y, with y being
- // the generated value), store it in the map of rewritten vars.
- // Assume that the generated values are unique for the compilation.
- if !x.Equal(y) {
- s.rewritten[y] = x
- }
-}
-
-func (s localDeclaredVars) Declared(x Var) (y Var, ok bool) {
- for i := len(s.vars) - 1; i >= 0; i-- {
- if y, ok = s.vars[i].vs[x]; ok {
- return
- }
- }
- return
-}
-
-// Occurrence returns a flag that indicates whether x has occurred in the
-// current scope.
-func (s localDeclaredVars) Occurrence(x Var) varOccurrence {
- return s.vars[len(s.vars)-1].occurrence[x]
-}
-
-// GlobalOccurrence returns a flag that indicates whether x has occurred in the
-// global scope.
-func (s localDeclaredVars) GlobalOccurrence(x Var) (varOccurrence, bool) {
- for i := len(s.vars) - 1; i >= 0; i-- {
- if occ, ok := s.vars[i].occurrence[x]; ok {
- return occ, true
- }
- }
- return newVar, false
-}
-
-// Seen marks x as seen by incrementing its counter
-func (s localDeclaredVars) Seen(x Var) {
- for i := len(s.vars) - 1; i >= 0; i-- {
- dvs := s.vars[i]
- if c, ok := dvs.count[x]; ok {
- dvs.count[x] = c + 1
- return
- }
- }
-
- s.vars[len(s.vars)-1].count[x] = 1
-}
-
-// Count returns how many times x has been seen
-func (s localDeclaredVars) Count(x Var) int {
- for i := len(s.vars) - 1; i >= 0; i-- {
- if c, ok := s.vars[i].count[x]; ok {
- return c
- }
- }
-
- return 0
-}
-
-// rewriteLocalVars rewrites bodies to remove assignment/declaration
-// expressions. For example:
-//
-// a := 1; p[a]
-//
-// Is rewritten to:
-//
-// __local0__ = 1; p[__local0__]
-//
-// During rewriting, assignees are validated to prevent use before declaration.
-func rewriteLocalVars(g *localVarGenerator, stack *localDeclaredVars, used VarSet, body Body, strict bool) (Body, map[Var]Var, Errors) {
- var errs Errors
- body, errs = rewriteDeclaredVarsInBody(g, stack, used, body, errs, strict)
- return body, stack.Peek().vs, errs
-}
-
-func rewriteDeclaredVarsInBody(g *localVarGenerator, stack *localDeclaredVars, used VarSet, body Body, errs Errors, strict bool) (Body, Errors) {
-
- var cpy Body
-
- for i := range body {
- var expr *Expr
- switch {
- case body[i].IsAssignment():
- stack.assignment = true
- expr, errs = rewriteDeclaredAssignment(g, stack, body[i], errs, strict)
- case body[i].IsSome():
- expr, errs = rewriteSomeDeclStatement(g, stack, body[i], errs, strict)
- case body[i].IsEvery():
- expr, errs = rewriteEveryStatement(g, stack, body[i], errs, strict)
- default:
- expr, errs = rewriteDeclaredVarsInExpr(g, stack, body[i], errs, strict)
- }
- if expr != nil {
- cpy.Append(expr)
- }
- }
-
- // If the body only contained a var statement it will be empty at this
- // point. Append true to the body to ensure that it's non-empty (zero length
- // bodies are not supported.)
- if len(cpy) == 0 {
- cpy.Append(NewExpr(BooleanTerm(true)))
- }
-
- errs = checkUnusedAssignedVars(body, stack, used, errs, strict)
- return cpy, checkUnusedDeclaredVars(body, stack, used, cpy, errs)
-}
-
-func checkUnusedAssignedVars(body Body, stack *localDeclaredVars, used VarSet, errs Errors, strict bool) Errors {
-
- if !strict || len(errs) > 0 {
- return errs
- }
-
- dvs := stack.Peek()
- unused := NewVarSet()
-
- for v, occ := range dvs.occurrence {
- // A var that was assigned in this scope must have been seen (used) more than once (the time of assignment) in
- // the same, or nested, scope to be counted as used.
- if !v.IsWildcard() && stack.Count(v) <= 1 && occ == assignedVar {
- unused.Add(dvs.vs[v])
- }
- }
-
- rewrittenUsed := NewVarSet()
- for v := range used {
- if gv, ok := stack.Declared(v); ok {
- rewrittenUsed.Add(gv)
- } else {
- rewrittenUsed.Add(v)
- }
- }
-
- unused = unused.Diff(rewrittenUsed)
-
- for _, gv := range unused.Sorted() {
- found := false
- for i := range body {
- if body[i].Vars(VarVisitorParams{}).Contains(gv) {
- errs = append(errs, NewError(CompileErr, body[i].Loc(), "assigned var %v unused", dvs.reverse[gv]))
- found = true
- break
- }
- }
- if !found {
- errs = append(errs, NewError(CompileErr, body[0].Loc(), "assigned var %v unused", dvs.reverse[gv]))
- }
- }
-
- return errs
-}
-
-func checkUnusedDeclaredVars(body Body, stack *localDeclaredVars, used VarSet, cpy Body, errs Errors) Errors {
-
- // NOTE(tsandall): Do not generate more errors if there are existing
- // declaration errors.
- if len(errs) > 0 {
- return errs
- }
-
- dvs := stack.Peek()
- declared := NewVarSet()
-
- for v, occ := range dvs.occurrence {
- if occ == declaredVar {
- declared.Add(dvs.vs[v])
- }
- }
-
- bodyvars := cpy.Vars(VarVisitorParams{})
-
- for v := range used {
- if gv, ok := stack.Declared(v); ok {
- bodyvars.Add(gv)
- } else {
- bodyvars.Add(v)
- }
- }
-
- unused := declared.Diff(bodyvars).Diff(used)
-
- for _, gv := range unused.Sorted() {
- rv := dvs.reverse[gv]
- if !rv.IsGenerated() {
- // Scan through body exprs, looking for a match between the
- // bad var's original name, and each expr's declared vars.
- foundUnusedVarByName := false
- for i := range body {
- varsDeclaredInExpr := declaredVars(body[i])
- if varsDeclaredInExpr.Contains(dvs.reverse[gv]) {
- // TODO(philipc): Clean up the offset logic here when the parser
- // reports more accurate locations.
- errs = append(errs, NewError(CompileErr, body[i].Loc(), "declared var %v unused", dvs.reverse[gv]))
- foundUnusedVarByName = true
- break
- }
- }
- // Default error location returned.
- if !foundUnusedVarByName {
- errs = append(errs, NewError(CompileErr, body[0].Loc(), "declared var %v unused", dvs.reverse[gv]))
- }
- }
- }
-
- return errs
-}
-
-func rewriteEveryStatement(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) {
- e := expr.Copy()
- every := e.Terms.(*Every)
-
- errs = rewriteDeclaredVarsInTermRecursive(g, stack, every.Domain, errs, strict)
-
- stack.Push()
- defer stack.Pop()
-
- // if the key exists, rewrite
- if every.Key != nil {
- if v := every.Key.Value.(Var); !v.IsWildcard() {
- gv, err := rewriteDeclaredVar(g, stack, v, declaredVar)
- if err != nil {
- return nil, append(errs, NewError(CompileErr, every.Loc(), err.Error()))
- }
- every.Key.Value = gv
- }
- } else { // if the key doesn't exist, add dummy local
- every.Key = NewTerm(g.Generate())
- }
-
- // value is always present
- if v := every.Value.Value.(Var); !v.IsWildcard() {
- gv, err := rewriteDeclaredVar(g, stack, v, declaredVar)
- if err != nil {
- return nil, append(errs, NewError(CompileErr, every.Loc(), err.Error()))
- }
- every.Value.Value = gv
- }
-
- used := NewVarSet()
- every.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, every.Body, errs, strict)
-
- return rewriteDeclaredVarsInExpr(g, stack, e, errs, strict)
-}
-
-func rewriteSomeDeclStatement(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) {
- e := expr.Copy()
- decl := e.Terms.(*SomeDecl)
- for i := range decl.Symbols {
- switch v := decl.Symbols[i].Value.(type) {
- case Var:
- if _, err := rewriteDeclaredVar(g, stack, v, declaredVar); err != nil {
- return nil, append(errs, NewError(CompileErr, decl.Loc(), err.Error()))
- }
- case Call:
- var key, val, container *Term
- switch len(v) {
- case 4: // member3
- key = v[1]
- val = v[2]
- container = v[3]
- case 3: // member
- key = NewTerm(g.Generate())
- val = v[1]
- container = v[2]
- }
-
- var rhs *Term
- switch c := container.Value.(type) {
- case Ref:
- rhs = RefTerm(append(c, key)...)
- default:
- rhs = RefTerm(container, key)
- }
- e.Terms = []*Term{
- RefTerm(VarTerm(Equality.Name)), val, rhs,
- }
-
- for _, v0 := range outputVarsForExprEq(e, container.Vars()).Sorted() {
- if _, err := rewriteDeclaredVar(g, stack, v0, declaredVar); err != nil {
- return nil, append(errs, NewError(CompileErr, decl.Loc(), err.Error()))
- }
- }
- return rewriteDeclaredVarsInExpr(g, stack, e, errs, strict)
- }
- }
- return nil, errs
-}
-
-func rewriteDeclaredVarsInExpr(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) {
- vis := NewGenericVisitor(func(x interface{}) bool {
- var stop bool
- switch x := x.(type) {
- case *Term:
- stop, errs = rewriteDeclaredVarsInTerm(g, stack, x, errs, strict)
- case *With:
- stop, errs = true, rewriteDeclaredVarsInWithRecursive(g, stack, x, errs, strict)
- }
- return stop
- })
- vis.Walk(expr)
- return expr, errs
-}
-
-func rewriteDeclaredAssignment(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) {
-
- if expr.Negated {
- errs = append(errs, NewError(CompileErr, expr.Location, "cannot assign vars inside negated expression"))
- return expr, errs
- }
-
- numErrsBefore := len(errs)
-
- if !validEqAssignArgCount(expr) {
- return expr, errs
- }
-
- // Rewrite terms on right hand side capture seen vars and recursively
- // process comprehensions before left hand side is processed. Also
- // rewrite with modifier.
- errs = rewriteDeclaredVarsInTermRecursive(g, stack, expr.Operand(1), errs, strict)
-
- for _, w := range expr.With {
- errs = rewriteDeclaredVarsInTermRecursive(g, stack, w.Value, errs, strict)
- }
-
- // Rewrite vars on left hand side with unique names. Catch redeclaration
- // and invalid term types here.
- var vis func(t *Term) bool
-
- vis = func(t *Term) bool {
- switch v := t.Value.(type) {
- case Var:
- if gv, err := rewriteDeclaredVar(g, stack, v, assignedVar); err != nil {
- errs = append(errs, NewError(CompileErr, t.Location, err.Error()))
- } else {
- t.Value = gv
- }
- return true
- case *Array:
- return false
- case *object:
- v.Foreach(func(_, v *Term) {
- WalkTerms(v, vis)
- })
- return true
- case Ref:
- if RootDocumentRefs.Contains(t) {
- if gv, err := rewriteDeclaredVar(g, stack, v[0].Value.(Var), assignedVar); err != nil {
- errs = append(errs, NewError(CompileErr, t.Location, err.Error()))
- } else {
- t.Value = gv
- }
- return true
- }
- }
- errs = append(errs, NewError(CompileErr, t.Location, "cannot assign to %v", TypeName(t.Value)))
- return true
- }
-
- WalkTerms(expr.Operand(0), vis)
-
- if len(errs) == numErrsBefore {
- loc := expr.Operator()[0].Location
- expr.SetOperator(RefTerm(VarTerm(Equality.Name).SetLocation(loc)).SetLocation(loc))
- }
-
- return expr, errs
-}
-
-func rewriteDeclaredVarsInTerm(g *localVarGenerator, stack *localDeclaredVars, term *Term, errs Errors, strict bool) (bool, Errors) {
- switch v := term.Value.(type) {
- case Var:
- if gv, ok := stack.Declared(v); ok {
- term.Value = gv
- stack.Seen(v)
- } else if stack.Occurrence(v) == newVar {
- stack.Insert(v, v, seenVar)
- }
- case Ref:
- if RootDocumentRefs.Contains(term) {
- x := v[0].Value.(Var)
- if occ, ok := stack.GlobalOccurrence(x); ok && occ != seenVar {
- gv, _ := stack.Declared(x)
- term.Value = gv
- }
-
- return true, errs
- }
- return false, errs
- case Call:
- ref := v[0]
- WalkVars(ref, func(v Var) bool {
- if gv, ok := stack.Declared(v); ok && !gv.Equal(v) {
- // We will rewrite the ref of a function call, which is never ok since we don't have first-class functions.
- errs = append(errs, NewError(CompileErr, term.Location, "called function %s shadowed", ref))
- return true
- }
- return false
- })
- return false, errs
- case *object:
- cpy, _ := v.Map(func(k, v *Term) (*Term, *Term, error) {
- kcpy := k.Copy()
- errs = rewriteDeclaredVarsInTermRecursive(g, stack, kcpy, errs, strict)
- errs = rewriteDeclaredVarsInTermRecursive(g, stack, v, errs, strict)
- return kcpy, v, nil
- })
- term.Value = cpy
- case Set:
- cpy, _ := v.Map(func(elem *Term) (*Term, error) {
- elemcpy := elem.Copy()
- errs = rewriteDeclaredVarsInTermRecursive(g, stack, elemcpy, errs, strict)
- return elemcpy, nil
- })
- term.Value = cpy
- case *ArrayComprehension:
- errs = rewriteDeclaredVarsInArrayComprehension(g, stack, v, errs, strict)
- case *SetComprehension:
- errs = rewriteDeclaredVarsInSetComprehension(g, stack, v, errs, strict)
- case *ObjectComprehension:
- errs = rewriteDeclaredVarsInObjectComprehension(g, stack, v, errs, strict)
- default:
- return false, errs
- }
- return true, errs
-}
-
-func rewriteDeclaredVarsInTermRecursive(g *localVarGenerator, stack *localDeclaredVars, term *Term, errs Errors, strict bool) Errors {
- WalkTerms(term, func(t *Term) bool {
- var stop bool
- stop, errs = rewriteDeclaredVarsInTerm(g, stack, t, errs, strict)
- return stop
- })
- return errs
-}
-
-func rewriteDeclaredVarsInWithRecursive(g *localVarGenerator, stack *localDeclaredVars, w *With, errs Errors, strict bool) Errors {
- // NOTE(sr): `with input as` and `with input.a.b.c as` are deliberately skipped here: `input` could
- // have been shadowed by a local variable/argument but should NOT be replaced in the `with` target.
- //
- // We cannot drop `input` from the stack since it's conceivable to do `with input[input] as` where
- // the second input is meant to be the local var. It's a terrible idea, but when you're shadowing
- // `input` those might be your thing.
- errs = rewriteDeclaredVarsInTermRecursive(g, stack, w.Target, errs, strict)
- if sdwInput, ok := stack.Declared(InputRootDocument.Value.(Var)); ok { // Was "input" shadowed...
- switch value := w.Target.Value.(type) {
- case Var:
- if sdwInput.Equal(value) { // ...and replaced? If so, fix it
- w.Target.Value = InputRootRef
- }
- case Ref:
- if sdwInput.Equal(value[0].Value.(Var)) {
- w.Target.Value.(Ref)[0].Value = InputRootDocument.Value
- }
- }
- }
- // No special handling of the `with` value
- return rewriteDeclaredVarsInTermRecursive(g, stack, w.Value, errs, strict)
-}
-
-func rewriteDeclaredVarsInArrayComprehension(g *localVarGenerator, stack *localDeclaredVars, v *ArrayComprehension, errs Errors, strict bool) Errors {
- used := NewVarSet()
- used.Update(v.Term.Vars())
-
- stack.Push()
- v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict)
- errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Term, errs, strict)
- stack.Pop()
- return errs
-}
-
-func rewriteDeclaredVarsInSetComprehension(g *localVarGenerator, stack *localDeclaredVars, v *SetComprehension, errs Errors, strict bool) Errors {
- used := NewVarSet()
- used.Update(v.Term.Vars())
-
- stack.Push()
- v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict)
- errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Term, errs, strict)
- stack.Pop()
- return errs
-}
-
-func rewriteDeclaredVarsInObjectComprehension(g *localVarGenerator, stack *localDeclaredVars, v *ObjectComprehension, errs Errors, strict bool) Errors {
- used := NewVarSet()
- used.Update(v.Key.Vars())
- used.Update(v.Value.Vars())
-
- stack.Push()
- v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict)
- errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Key, errs, strict)
- errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Value, errs, strict)
- stack.Pop()
- return errs
-}
-
-func rewriteDeclaredVar(g *localVarGenerator, stack *localDeclaredVars, v Var, occ varOccurrence) (gv Var, err error) {
- switch stack.Occurrence(v) {
- case seenVar:
- return gv, fmt.Errorf("var %v referenced above", v)
- case assignedVar:
- return gv, fmt.Errorf("var %v assigned above", v)
- case declaredVar:
- return gv, fmt.Errorf("var %v declared above", v)
- case argVar:
- return gv, fmt.Errorf("arg %v redeclared", v)
- }
- gv = g.Generate()
- stack.Insert(v, gv, occ)
- return
-}
-
-// rewriteWithModifiersInBody will rewrite the body so that with modifiers do
-// not contain terms that require evaluation as values. If this function
-// encounters an invalid with modifier target then it will raise an error.
-func rewriteWithModifiersInBody(c *Compiler, unsafeBuiltinsMap map[string]struct{}, f *equalityFactory, body Body) (Body, *Error) {
- var result Body
- for i := range body {
- exprs, err := rewriteWithModifier(c, unsafeBuiltinsMap, f, body[i])
- if err != nil {
- return nil, err
- }
- if len(exprs) > 0 {
- for _, expr := range exprs {
- result.Append(expr)
- }
- } else {
- result.Append(body[i])
- }
- }
- return result, nil
-}
-
-func rewriteWithModifier(c *Compiler, unsafeBuiltinsMap map[string]struct{}, f *equalityFactory, expr *Expr) ([]*Expr, *Error) {
-
- var result []*Expr
- for i := range expr.With {
- eval, err := validateWith(c, unsafeBuiltinsMap, expr, i)
- if err != nil {
- return nil, err
- }
-
- if eval {
- eq := f.Generate(expr.With[i].Value)
- result = append(result, eq)
- expr.With[i].Value = eq.Operand(0)
- }
- }
-
- return append(result, expr), nil
-}
-
-func validateWith(c *Compiler, unsafeBuiltinsMap map[string]struct{}, expr *Expr, i int) (bool, *Error) {
- target, value := expr.With[i].Target, expr.With[i].Value
-
- // Ensure that values that are built-ins are rewritten to Ref (not Var)
- if v, ok := value.Value.(Var); ok {
- if _, ok := c.builtins[v.String()]; ok {
- value.Value = Ref([]*Term{NewTerm(v)})
- }
- }
- isBuiltinRefOrVar, err := isBuiltinRefOrVar(c.builtins, unsafeBuiltinsMap, target)
- if err != nil {
- return false, err
- }
-
- isAllowedUnknownFuncCall := false
- if c.allowUndefinedFuncCalls {
- switch target.Value.(type) {
- case Ref, Var:
- isAllowedUnknownFuncCall = true
- }
- }
-
- switch {
- case isDataRef(target):
- ref := target.Value.(Ref)
- targetNode := c.RuleTree
- for i := 0; i < len(ref)-1; i++ {
- child := targetNode.Child(ref[i].Value)
- if child == nil {
- break
- } else if len(child.Values) > 0 {
- return false, NewError(CompileErr, target.Loc(), "with keyword cannot partially replace virtual document(s)")
- }
- targetNode = child
- }
-
- if targetNode != nil {
- // NOTE(sr): at this point in the compiler stages, we don't have a fully-populated
- // TypeEnv yet -- so we have to make do with this check to see if the replacement
- // target is a function. It's probably wrong for arity-0 functions, but those are
- // and edge case anyways.
- if child := targetNode.Child(ref[len(ref)-1].Value); child != nil {
- for _, v := range child.Values {
- if len(v.(*Rule).Head.Args) > 0 {
- if ok, err := validateWithFunctionValue(c.builtins, unsafeBuiltinsMap, c.RuleTree, value); err != nil || ok {
- return false, err // err may be nil
- }
- }
- }
- }
- }
-
- // If the with-value is a ref to a function, but not a call, we can't rewrite it
- if r, ok := value.Value.(Ref); ok {
- // TODO: check that target ref doesn't exist?
- if valueNode := c.RuleTree.Find(r); valueNode != nil {
- for _, v := range valueNode.Values {
- if len(v.(*Rule).Head.Args) > 0 {
- return false, nil
- }
- }
- }
- }
- case isInputRef(target): // ok, valid
- case isBuiltinRefOrVar:
-
- // NOTE(sr): first we ensure that parsed Var builtins (`count`, `concat`, etc)
- // are rewritten to their proper Ref convention
- if v, ok := target.Value.(Var); ok {
- target.Value = Ref([]*Term{NewTerm(v)})
- }
-
- targetRef := target.Value.(Ref)
- bi := c.builtins[targetRef.String()] // safe because isBuiltinRefOrVar checked this
- if err := validateWithBuiltinTarget(bi, targetRef, target.Loc()); err != nil {
- return false, err
- }
-
- if ok, err := validateWithFunctionValue(c.builtins, unsafeBuiltinsMap, c.RuleTree, value); err != nil || ok {
- return false, err // err may be nil
- }
- case isAllowedUnknownFuncCall:
- // The target isn't a ref to the input doc, data doc, or a known built-in, but it might be a ref to an unknown built-in.
- return false, nil
- default:
- return false, NewError(TypeErr, target.Location, "with keyword target must reference existing %v, %v, or a function", InputRootDocument, DefaultRootDocument)
- }
- return requiresEval(value), nil
-}
-
-func validateWithBuiltinTarget(bi *Builtin, target Ref, loc *location.Location) *Error {
- switch bi.Name {
- case Equality.Name,
- RegoMetadataChain.Name,
- RegoMetadataRule.Name:
- return NewError(CompileErr, loc, "with keyword replacing built-in function: replacement of %q invalid", bi.Name)
- }
-
- switch {
- case target.HasPrefix(Ref([]*Term{VarTerm("internal")})):
- return NewError(CompileErr, loc, "with keyword replacing built-in function: replacement of internal function %q invalid", target)
-
- case bi.Relation:
- return NewError(CompileErr, loc, "with keyword replacing built-in function: target must not be a relation")
-
- case bi.Decl.Result() == nil:
- return NewError(CompileErr, loc, "with keyword replacing built-in function: target must not be a void function")
- }
- return nil
-}
-
-func validateWithFunctionValue(bs map[string]*Builtin, unsafeMap map[string]struct{}, ruleTree *TreeNode, value *Term) (bool, *Error) {
- if v, ok := value.Value.(Ref); ok {
- if ruleTree.Find(v) != nil { // ref exists in rule tree
- return true, nil
- }
- }
- return isBuiltinRefOrVar(bs, unsafeMap, value)
-}
-
-func isInputRef(term *Term) bool {
- if ref, ok := term.Value.(Ref); ok {
- if ref.HasPrefix(InputRootRef) {
- return true
- }
- }
- return false
-}
-
-func isDataRef(term *Term) bool {
- if ref, ok := term.Value.(Ref); ok {
- if ref.HasPrefix(DefaultRootRef) {
- return true
- }
- }
- return false
-}
-
-func isBuiltinRefOrVar(bs map[string]*Builtin, unsafeBuiltinsMap map[string]struct{}, term *Term) (bool, *Error) {
- switch v := term.Value.(type) {
- case Ref, Var:
- if _, ok := unsafeBuiltinsMap[v.String()]; ok {
- return false, NewError(CompileErr, term.Location, "with keyword replacing built-in function: target must not be unsafe: %q", v)
- }
- _, ok := bs[v.String()]
- return ok, nil
- }
- return false, nil
-}
-
-func isVirtual(node *TreeNode, ref Ref) bool {
- for i := range ref {
- child := node.Child(ref[i].Value)
- if child == nil {
- return false
- } else if len(child.Values) > 0 {
- return true
- }
- node = child
- }
- return true
-}
-
-func safetyErrorSlice(unsafe unsafeVars, rewritten map[Var]Var) (result Errors) {
-
- if len(unsafe) == 0 {
- return
- }
-
- for _, pair := range unsafe.Vars() {
- v := pair.Var
- if w, ok := rewritten[v]; ok {
- v = w
- }
- if !v.IsGenerated() {
- if _, ok := futureKeywords[string(v)]; ok {
- result = append(result, NewError(UnsafeVarErr, pair.Loc,
- "var %[1]v is unsafe (hint: `import future.keywords.%[1]v` to import a future keyword)", v))
- continue
- }
- result = append(result, NewError(UnsafeVarErr, pair.Loc, "var %v is unsafe", v))
- }
- }
-
- if len(result) > 0 {
- return
- }
-
- // If the expression contains unsafe generated variables, report which
- // expressions are unsafe instead of the variables that are unsafe (since
- // the latter are not meaningful to the user.)
- pairs := unsafe.Slice()
-
- sort.Slice(pairs, func(i, j int) bool {
- return pairs[i].Expr.Location.Compare(pairs[j].Expr.Location) < 0
- })
-
- // Report at most one error per generated variable.
- seen := NewVarSet()
-
- for _, expr := range pairs {
- before := len(seen)
- for v := range expr.Vars {
- if v.IsGenerated() {
- seen.Add(v)
- }
- }
- if len(seen) > before {
- result = append(result, NewError(UnsafeVarErr, expr.Expr.Location, "expression is unsafe"))
- }
- }
-
- return
-}
-
-func checkUnsafeBuiltins(unsafeBuiltinsMap map[string]struct{}, node interface{}) Errors {
- errs := make(Errors, 0)
- WalkExprs(node, func(x *Expr) bool {
- if x.IsCall() {
- operator := x.Operator().String()
- if _, ok := unsafeBuiltinsMap[operator]; ok {
- errs = append(errs, NewError(TypeErr, x.Loc(), "unsafe built-in function calls in expression: %v", operator))
- }
- }
- return false
- })
- return errs
-}
-
-func rewriteVarsInRef(vars ...map[Var]Var) varRewriter {
- return func(node Ref) Ref {
- i, _ := TransformVars(node, func(v Var) (Value, error) {
- for _, m := range vars {
- if u, ok := m[v]; ok {
- return u, nil
- }
- }
- return v, nil
- })
- return i.(Ref)
- }
-}
-
-// NOTE(sr): This is duplicated with compile/compile.go; but moving it into another location
-// would cause a circular dependency -- the refSet definition needs ast.Ref. If we make it
-// public in the ast package, the compile package could take it from there, but it would also
-// increase our public interface. Let's reconsider if we need it in a third place.
-type refSet struct {
- s []Ref
-}
-
-func newRefSet(x ...Ref) *refSet {
- result := &refSet{}
- for i := range x {
- result.AddPrefix(x[i])
- }
- return result
-}
-
-// ContainsPrefix returns true if r is prefixed by any of the existing refs in the set.
-func (rs *refSet) ContainsPrefix(r Ref) bool {
- for i := range rs.s {
- if r.HasPrefix(rs.s[i]) {
- return true
- }
- }
- return false
-}
-
-// AddPrefix inserts r into the set if r is not prefixed by any existing
-// refs in the set. If any existing refs are prefixed by r, those existing
-// refs are removed.
-func (rs *refSet) AddPrefix(r Ref) {
- if rs.ContainsPrefix(r) {
- return
- }
- cpy := []Ref{r}
- for i := range rs.s {
- if !rs.s[i].HasPrefix(r) {
- cpy = append(cpy, rs.s[i])
- }
- }
- rs.s = cpy
-}
-
-// Sorted returns a sorted slice of terms for refs in the set.
-func (rs *refSet) Sorted() []*Term {
- terms := make([]*Term, len(rs.s))
- for i := range rs.s {
- terms[i] = NewTerm(rs.s[i])
- }
- sort.Slice(terms, func(i, j int) bool {
- return terms[i].Value.Compare(terms[j].Value) < 0
- })
- return terms
+// OutputVarsFromExpr returns all variables which are the "output" for
+// the given expression. For safety checks this means that they would be
+// made safe by the expr.
+func OutputVarsFromExpr(c *Compiler, expr *Expr, safe VarSet) VarSet {
+ return v1.OutputVarsFromExpr(c, expr, safe)
}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/compilehelper.go b/vendor/github.com/open-policy-agent/opa/ast/compilehelper.go
index dd48884f9d..37ede329ea 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/compilehelper.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/compilehelper.go
@@ -4,41 +4,29 @@
package ast
+import v1 "github.com/open-policy-agent/opa/v1/ast"
+
// CompileModules takes a set of Rego modules represented as strings and
// compiles them for evaluation. The keys of the map are used as filenames.
func CompileModules(modules map[string]string) (*Compiler, error) {
- return CompileModulesWithOpt(modules, CompileOpts{})
+ return CompileModulesWithOpt(modules, CompileOpts{
+ ParserOptions: ParserOptions{
+ RegoVersion: DefaultRegoVersion,
+ },
+ })
}
// CompileOpts defines a set of options for the compiler.
-type CompileOpts struct {
- EnablePrintStatements bool
- ParserOptions ParserOptions
-}
+type CompileOpts = v1.CompileOpts
// CompileModulesWithOpt takes a set of Rego modules represented as strings and
// compiles them for evaluation. The keys of the map are used as filenames.
func CompileModulesWithOpt(modules map[string]string, opts CompileOpts) (*Compiler, error) {
-
- parsed := make(map[string]*Module, len(modules))
-
- for f, module := range modules {
- var pm *Module
- var err error
- if pm, err = ParseModuleWithOpts(f, module, opts.ParserOptions); err != nil {
- return nil, err
- }
- parsed[f] = pm
- }
-
- compiler := NewCompiler().WithEnablePrintStatements(opts.EnablePrintStatements)
- compiler.Compile(parsed)
-
- if compiler.Failed() {
- return nil, compiler.Errors
+ if opts.ParserOptions.RegoVersion == RegoUndefined {
+ opts.ParserOptions.RegoVersion = DefaultRegoVersion
}
- return compiler, nil
+ return v1.CompileModulesWithOpt(modules, opts)
}
// MustCompileModules compiles a set of Rego modules represented as strings. If
diff --git a/vendor/github.com/open-policy-agent/opa/ast/conflicts.go b/vendor/github.com/open-policy-agent/opa/ast/conflicts.go
index c2713ad576..10edce382c 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/conflicts.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/conflicts.go
@@ -5,49 +5,11 @@
package ast
import (
- "strings"
+ v1 "github.com/open-policy-agent/opa/v1/ast"
)
// CheckPathConflicts returns a set of errors indicating paths that
// are in conflict with the result of the provided callable.
func CheckPathConflicts(c *Compiler, exists func([]string) (bool, error)) Errors {
- var errs Errors
-
- root := c.RuleTree.Child(DefaultRootDocument.Value)
- if root == nil {
- return nil
- }
-
- for _, node := range root.Children {
- errs = append(errs, checkDocumentConflicts(node, exists, nil)...)
- }
-
- return errs
-}
-
-func checkDocumentConflicts(node *TreeNode, exists func([]string) (bool, error), path []string) Errors {
-
- switch key := node.Key.(type) {
- case String:
- path = append(path, string(key))
- default: // other key types cannot conflict with data
- return nil
- }
-
- if len(node.Values) > 0 {
- s := strings.Join(path, "/")
- if ok, err := exists(path); err != nil {
- return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflict check for data path %v: %v", s, err.Error())}
- } else if ok {
- return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflicting rule for data path %v found", s)}
- }
- }
-
- var errs Errors
-
- for _, child := range node.Children {
- errs = append(errs, checkDocumentConflicts(child, exists, path)...)
- }
-
- return errs
+ return v1.CheckPathConflicts(c, exists)
}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/doc.go b/vendor/github.com/open-policy-agent/opa/ast/doc.go
index 62b04e301e..ba974e5ba6 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/doc.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/doc.go
@@ -1,36 +1,8 @@
-// Copyright 2016 The OPA Authors. All rights reserved.
+// Copyright 2024 The OPA Authors. All rights reserved.
// Use of this source code is governed by an Apache2
// license that can be found in the LICENSE file.
-// Package ast declares Rego syntax tree types and also includes a parser and compiler for preparing policies for execution in the policy engine.
-//
-// Rego policies are defined using a relatively small set of types: modules, package and import declarations, rules, expressions, and terms. At their core, policies consist of rules that are defined by one or more expressions over documents available to the policy engine. The expressions are defined by intrinsic values (terms) such as strings, objects, variables, etc.
-//
-// Rego policies are typically defined in text files and then parsed and compiled by the policy engine at runtime. The parsing stage takes the text or string representation of the policy and converts it into an abstract syntax tree (AST) that consists of the types mentioned above. The AST is organized as follows:
-//
-// Module
-// |
-// +--- Package (Reference)
-// |
-// +--- Imports
-// | |
-// | +--- Import (Term)
-// |
-// +--- Rules
-// |
-// +--- Rule
-// |
-// +--- Head
-// | |
-// | +--- Name (Variable)
-// | |
-// | +--- Key (Term)
-// | |
-// | +--- Value (Term)
-// |
-// +--- Body
-// |
-// +--- Expression (Term | Terms | Variable Declaration)
-//
-// At query time, the policy engine expects policies to have been compiled. The compilation stage takes one or more modules and compiles them into a format that the policy engine supports.
+// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended.
+// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead.
+// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information.
package ast
diff --git a/vendor/github.com/open-policy-agent/opa/ast/env.go b/vendor/github.com/open-policy-agent/opa/ast/env.go
index c767aafefb..ef0ccf89ce 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/env.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/env.go
@@ -5,522 +5,8 @@
package ast
import (
- "fmt"
- "strings"
-
- "github.com/open-policy-agent/opa/types"
- "github.com/open-policy-agent/opa/util"
+ v1 "github.com/open-policy-agent/opa/v1/ast"
)
// TypeEnv contains type info for static analysis such as type checking.
-type TypeEnv struct {
- tree *typeTreeNode
- next *TypeEnv
- newChecker func() *typeChecker
-}
-
-// newTypeEnv returns an empty TypeEnv. The constructor is not exported because
-// type environments should only be created by the type checker.
-func newTypeEnv(f func() *typeChecker) *TypeEnv {
- return &TypeEnv{
- tree: newTypeTree(),
- newChecker: f,
- }
-}
-
-// Get returns the type of x.
-func (env *TypeEnv) Get(x interface{}) types.Type {
-
- if term, ok := x.(*Term); ok {
- x = term.Value
- }
-
- switch x := x.(type) {
-
- // Scalars.
- case Null:
- return types.NewNull()
- case Boolean:
- return types.NewBoolean()
- case Number:
- return types.NewNumber()
- case String:
- return types.NewString()
-
- // Composites.
- case *Array:
- static := make([]types.Type, x.Len())
- for i := range static {
- tpe := env.Get(x.Elem(i).Value)
- static[i] = tpe
- }
-
- var dynamic types.Type
- if len(static) == 0 {
- dynamic = types.A
- }
-
- return types.NewArray(static, dynamic)
-
- case *lazyObj:
- return env.Get(x.force())
- case *object:
- static := []*types.StaticProperty{}
- var dynamic *types.DynamicProperty
-
- x.Foreach(func(k, v *Term) {
- if IsConstant(k.Value) {
- kjson, err := JSON(k.Value)
- if err == nil {
- tpe := env.Get(v)
- static = append(static, types.NewStaticProperty(kjson, tpe))
- return
- }
- }
- // Can't handle it as a static property, fallback to dynamic
- typeK := env.Get(k.Value)
- typeV := env.Get(v.Value)
- dynamic = types.NewDynamicProperty(typeK, typeV)
- })
-
- if len(static) == 0 && dynamic == nil {
- dynamic = types.NewDynamicProperty(types.A, types.A)
- }
-
- return types.NewObject(static, dynamic)
-
- case Set:
- var tpe types.Type
- x.Foreach(func(elem *Term) {
- other := env.Get(elem.Value)
- tpe = types.Or(tpe, other)
- })
- if tpe == nil {
- tpe = types.A
- }
- return types.NewSet(tpe)
-
- // Comprehensions.
- case *ArrayComprehension:
- cpy, errs := env.newChecker().CheckBody(env, x.Body)
- if len(errs) == 0 {
- return types.NewArray(nil, cpy.Get(x.Term))
- }
- return nil
- case *ObjectComprehension:
- cpy, errs := env.newChecker().CheckBody(env, x.Body)
- if len(errs) == 0 {
- return types.NewObject(nil, types.NewDynamicProperty(cpy.Get(x.Key), cpy.Get(x.Value)))
- }
- return nil
- case *SetComprehension:
- cpy, errs := env.newChecker().CheckBody(env, x.Body)
- if len(errs) == 0 {
- return types.NewSet(cpy.Get(x.Term))
- }
- return nil
-
- // Refs.
- case Ref:
- return env.getRef(x)
-
- // Vars.
- case Var:
- if node := env.tree.Child(x); node != nil {
- return node.Value()
- }
- if env.next != nil {
- return env.next.Get(x)
- }
- return nil
-
- // Calls.
- case Call:
- return nil
-
- default:
- panic("unreachable")
- }
-}
-
-func (env *TypeEnv) getRef(ref Ref) types.Type {
-
- node := env.tree.Child(ref[0].Value)
- if node == nil {
- return env.getRefFallback(ref)
- }
-
- return env.getRefRec(node, ref, ref[1:])
-}
-
-func (env *TypeEnv) getRefFallback(ref Ref) types.Type {
-
- if env.next != nil {
- return env.next.Get(ref)
- }
-
- if RootDocumentNames.Contains(ref[0]) {
- return types.A
- }
-
- return nil
-}
-
-func (env *TypeEnv) getRefRec(node *typeTreeNode, ref, tail Ref) types.Type {
- if len(tail) == 0 {
- return env.getRefRecExtent(node)
- }
-
- if node.Leaf() {
- if node.children.Len() > 0 {
- if child := node.Child(tail[0].Value); child != nil {
- return env.getRefRec(child, ref, tail[1:])
- }
- }
- return selectRef(node.Value(), tail)
- }
-
- if !IsConstant(tail[0].Value) {
- return selectRef(env.getRefRecExtent(node), tail)
- }
-
- child := node.Child(tail[0].Value)
- if child == nil {
- return env.getRefFallback(ref)
- }
-
- return env.getRefRec(child, ref, tail[1:])
-}
-
-func (env *TypeEnv) getRefRecExtent(node *typeTreeNode) types.Type {
-
- if node.Leaf() {
- return node.Value()
- }
-
- children := []*types.StaticProperty{}
-
- node.Children().Iter(func(k, v util.T) bool {
- key := k.(Value)
- child := v.(*typeTreeNode)
-
- tpe := env.getRefRecExtent(child)
-
- // NOTE(sr): Converting to Golang-native types here is an extension of what we did
- // before -- only supporting strings. But since we cannot differentiate sets and arrays
- // that way, we could reconsider.
- switch key.(type) {
- case String, Number, Boolean: // skip anything else
- propKey, err := JSON(key)
- if err != nil {
- panic(fmt.Errorf("unreachable, ValueToInterface: %w", err))
- }
- children = append(children, types.NewStaticProperty(propKey, tpe))
- }
- return false
- })
-
- // TODO(tsandall): for now, these objects can have any dynamic properties
- // because we don't have schema for base docs. Once schemas are supported
- // we can improve this.
- return types.NewObject(children, types.NewDynamicProperty(types.S, types.A))
-}
-
-func (env *TypeEnv) wrap() *TypeEnv {
- cpy := *env
- cpy.next = env
- cpy.tree = newTypeTree()
- return &cpy
-}
-
-// typeTreeNode is used to store type information in a tree.
-type typeTreeNode struct {
- key Value
- value types.Type
- children *util.HashMap
-}
-
-func newTypeTree() *typeTreeNode {
- return &typeTreeNode{
- key: nil,
- value: nil,
- children: util.NewHashMap(valueEq, valueHash),
- }
-}
-
-func (n *typeTreeNode) Child(key Value) *typeTreeNode {
- value, ok := n.children.Get(key)
- if !ok {
- return nil
- }
- return value.(*typeTreeNode)
-}
-
-func (n *typeTreeNode) Children() *util.HashMap {
- return n.children
-}
-
-func (n *typeTreeNode) Get(path Ref) types.Type {
- curr := n
- for _, term := range path {
- child, ok := curr.children.Get(term.Value)
- if !ok {
- return nil
- }
- curr = child.(*typeTreeNode)
- }
- return curr.Value()
-}
-
-func (n *typeTreeNode) Leaf() bool {
- return n.value != nil
-}
-
-func (n *typeTreeNode) PutOne(key Value, tpe types.Type) {
- c, ok := n.children.Get(key)
-
- var child *typeTreeNode
- if !ok {
- child = newTypeTree()
- child.key = key
- n.children.Put(key, child)
- } else {
- child = c.(*typeTreeNode)
- }
-
- child.value = tpe
-}
-
-func (n *typeTreeNode) Put(path Ref, tpe types.Type) {
- curr := n
- for _, term := range path {
- c, ok := curr.children.Get(term.Value)
-
- var child *typeTreeNode
- if !ok {
- child = newTypeTree()
- child.key = term.Value
- curr.children.Put(child.key, child)
- } else {
- child = c.(*typeTreeNode)
- }
-
- curr = child
- }
- curr.value = tpe
-}
-
-// Insert inserts tpe at path in the tree, but also merges the value into any types.Object present along that path.
-// If a types.Object is inserted, any leafs already present further down the tree are merged into the inserted object.
-// path must be ground.
-func (n *typeTreeNode) Insert(path Ref, tpe types.Type, env *TypeEnv) {
- curr := n
- for i, term := range path {
- c, ok := curr.children.Get(term.Value)
-
- var child *typeTreeNode
- if !ok {
- child = newTypeTree()
- child.key = term.Value
- curr.children.Put(child.key, child)
- } else {
- child = c.(*typeTreeNode)
-
- if child.value != nil && i+1 < len(path) {
- // If child has an object value, merge the new value into it.
- if o, ok := child.value.(*types.Object); ok {
- var err error
- child.value, err = insertIntoObject(o, path[i+1:], tpe, env)
- if err != nil {
- panic(fmt.Errorf("unreachable, insertIntoObject: %w", err))
- }
- }
- }
- }
-
- curr = child
- }
-
- curr.value = mergeTypes(curr.value, tpe)
-
- if _, ok := tpe.(*types.Object); ok && curr.children.Len() > 0 {
- // merge all leafs into the inserted object
- leafs := curr.Leafs()
- for p, t := range leafs {
- var err error
- curr.value, err = insertIntoObject(curr.value.(*types.Object), *p, t, env)
- if err != nil {
- panic(fmt.Errorf("unreachable, insertIntoObject: %w", err))
- }
- }
- }
-}
-
-// mergeTypes merges the types of 'a' and 'b'. If both are sets, their 'of' types are joined with an types.Or.
-// If both are objects, the key types of their dynamic properties are joined with types.Or:s, and their value types
-// are recursively merged (using mergeTypes).
-// If 'a' and 'b' are both objects, and at least one of them have static properties, they are joined
-// with an types.Or, instead of being merged.
-// If 'a' is an Any containing an Object, and 'b' is an Object (or vice versa); AND both objects have no
-// static properties, they are merged.
-// If 'a' and 'b' are different types, they are joined with an types.Or.
-func mergeTypes(a, b types.Type) types.Type {
- if a == nil {
- return b
- }
-
- if b == nil {
- return a
- }
-
- switch a := a.(type) {
- case *types.Object:
- if bObj, ok := b.(*types.Object); ok && len(a.StaticProperties()) == 0 && len(bObj.StaticProperties()) == 0 {
- if len(a.StaticProperties()) > 0 || len(bObj.StaticProperties()) > 0 {
- return types.Or(a, bObj)
- }
-
- aDynProps := a.DynamicProperties()
- bDynProps := bObj.DynamicProperties()
- dynProps := types.NewDynamicProperty(
- types.Or(aDynProps.Key, bDynProps.Key),
- mergeTypes(aDynProps.Value, bDynProps.Value))
- return types.NewObject(nil, dynProps)
- } else if bAny, ok := b.(types.Any); ok && len(a.StaticProperties()) == 0 {
- // If a is an object type with no static components ...
- for _, t := range bAny {
- if tObj, ok := t.(*types.Object); ok && len(tObj.StaticProperties()) == 0 {
- // ... and b is a types.Any containing an object with no static components, we merge them.
- aDynProps := a.DynamicProperties()
- tDynProps := tObj.DynamicProperties()
- tDynProps.Key = types.Or(tDynProps.Key, aDynProps.Key)
- tDynProps.Value = types.Or(tDynProps.Value, aDynProps.Value)
- return bAny
- }
- }
- }
- case *types.Set:
- if bSet, ok := b.(*types.Set); ok {
- return types.NewSet(types.Or(a.Of(), bSet.Of()))
- }
- case types.Any:
- if _, ok := b.(types.Any); !ok {
- return mergeTypes(b, a)
- }
- }
-
- return types.Or(a, b)
-}
-
-func (n *typeTreeNode) String() string {
- b := strings.Builder{}
-
- if k := n.key; k != nil {
- b.WriteString(k.String())
- } else {
- b.WriteString("-")
- }
-
- if v := n.value; v != nil {
- b.WriteString(": ")
- b.WriteString(v.String())
- }
-
- n.children.Iter(func(_, v util.T) bool {
- if child, ok := v.(*typeTreeNode); ok {
- b.WriteString("\n\t+ ")
- s := child.String()
- s = strings.ReplaceAll(s, "\n", "\n\t")
- b.WriteString(s)
- }
- return false
- })
-
- return b.String()
-}
-
-func insertIntoObject(o *types.Object, path Ref, tpe types.Type, env *TypeEnv) (*types.Object, error) {
- if len(path) == 0 {
- return o, nil
- }
-
- key := env.Get(path[0].Value)
-
- if len(path) == 1 {
- var dynamicProps *types.DynamicProperty
- if dp := o.DynamicProperties(); dp != nil {
- dynamicProps = types.NewDynamicProperty(types.Or(o.DynamicProperties().Key, key), types.Or(o.DynamicProperties().Value, tpe))
- } else {
- dynamicProps = types.NewDynamicProperty(key, tpe)
- }
- return types.NewObject(o.StaticProperties(), dynamicProps), nil
- }
-
- child, err := insertIntoObject(types.NewObject(nil, nil), path[1:], tpe, env)
- if err != nil {
- return nil, err
- }
-
- var dynamicProps *types.DynamicProperty
- if dp := o.DynamicProperties(); dp != nil {
- dynamicProps = types.NewDynamicProperty(types.Or(o.DynamicProperties().Key, key), types.Or(o.DynamicProperties().Value, child))
- } else {
- dynamicProps = types.NewDynamicProperty(key, child)
- }
- return types.NewObject(o.StaticProperties(), dynamicProps), nil
-}
-
-func (n *typeTreeNode) Leafs() map[*Ref]types.Type {
- leafs := map[*Ref]types.Type{}
- n.children.Iter(func(_, v util.T) bool {
- collectLeafs(v.(*typeTreeNode), nil, leafs)
- return false
- })
- return leafs
-}
-
-func collectLeafs(n *typeTreeNode, path Ref, leafs map[*Ref]types.Type) {
- nPath := append(path, NewTerm(n.key))
- if n.Leaf() {
- leafs[&nPath] = n.Value()
- return
- }
- n.children.Iter(func(_, v util.T) bool {
- collectLeafs(v.(*typeTreeNode), nPath, leafs)
- return false
- })
-}
-
-func (n *typeTreeNode) Value() types.Type {
- return n.value
-}
-
-// selectConstant returns the attribute of the type referred to by the term. If
-// the attribute type cannot be determined, nil is returned.
-func selectConstant(tpe types.Type, term *Term) types.Type {
- x, err := JSON(term.Value)
- if err == nil {
- return types.Select(tpe, x)
- }
- return nil
-}
-
-// selectRef returns the type of the nested attribute referred to by ref. If
-// the attribute type cannot be determined, nil is returned. If the ref
-// contains vars or refs, then the returned type will be a union of the
-// possible types.
-func selectRef(tpe types.Type, ref Ref) types.Type {
-
- if tpe == nil || len(ref) == 0 {
- return tpe
- }
-
- head, tail := ref[0], ref[1:]
-
- switch head.Value.(type) {
- case Var, Ref, *Array, Object, Set:
- return selectRef(types.Values(tpe), tail)
- default:
- return selectRef(selectConstant(tpe, head), tail)
- }
-}
+type TypeEnv = v1.TypeEnv
diff --git a/vendor/github.com/open-policy-agent/opa/ast/errors.go b/vendor/github.com/open-policy-agent/opa/ast/errors.go
index 066dfcdd68..722cfc0fb7 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/errors.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/errors.go
@@ -5,119 +5,42 @@
package ast
import (
- "fmt"
- "sort"
- "strings"
+ v1 "github.com/open-policy-agent/opa/v1/ast"
)
// Errors represents a series of errors encountered during parsing, compiling,
// etc.
-type Errors []*Error
-
-func (e Errors) Error() string {
-
- if len(e) == 0 {
- return "no error(s)"
- }
-
- if len(e) == 1 {
- return fmt.Sprintf("1 error occurred: %v", e[0].Error())
- }
-
- s := make([]string, len(e))
- for i, err := range e {
- s[i] = err.Error()
- }
-
- return fmt.Sprintf("%d errors occurred:\n%s", len(e), strings.Join(s, "\n"))
-}
-
-// Sort sorts the error slice by location. If the locations are equal then the
-// error message is compared.
-func (e Errors) Sort() {
- sort.Slice(e, func(i, j int) bool {
- a := e[i]
- b := e[j]
-
- if cmp := a.Location.Compare(b.Location); cmp != 0 {
- return cmp < 0
- }
-
- return a.Error() < b.Error()
- })
-}
+type Errors = v1.Errors
const (
// ParseErr indicates an unclassified parse error occurred.
- ParseErr = "rego_parse_error"
+ ParseErr = v1.ParseErr
// CompileErr indicates an unclassified compile error occurred.
- CompileErr = "rego_compile_error"
+ CompileErr = v1.CompileErr
// TypeErr indicates a type error was caught.
- TypeErr = "rego_type_error"
+ TypeErr = v1.TypeErr
// UnsafeVarErr indicates an unsafe variable was found during compilation.
- UnsafeVarErr = "rego_unsafe_var_error"
+ UnsafeVarErr = v1.UnsafeVarErr
// RecursionErr indicates recursion was found during compilation.
- RecursionErr = "rego_recursion_error"
+ RecursionErr = v1.RecursionErr
)
// IsError returns true if err is an AST error with code.
func IsError(code string, err error) bool {
- if err, ok := err.(*Error); ok {
- return err.Code == code
- }
- return false
+ return v1.IsError(code, err)
}
// ErrorDetails defines the interface for detailed error messages.
-type ErrorDetails interface {
- Lines() []string
-}
+type ErrorDetails = v1.ErrorDetails
// Error represents a single error caught during parsing, compiling, etc.
-type Error struct {
- Code string `json:"code"`
- Message string `json:"message"`
- Location *Location `json:"location,omitempty"`
- Details ErrorDetails `json:"details,omitempty"`
-}
-
-func (e *Error) Error() string {
-
- var prefix string
-
- if e.Location != nil {
-
- if len(e.Location.File) > 0 {
- prefix += e.Location.File + ":" + fmt.Sprint(e.Location.Row)
- } else {
- prefix += fmt.Sprint(e.Location.Row) + ":" + fmt.Sprint(e.Location.Col)
- }
- }
-
- msg := fmt.Sprintf("%v: %v", e.Code, e.Message)
-
- if len(prefix) > 0 {
- msg = prefix + ": " + msg
- }
-
- if e.Details != nil {
- for _, line := range e.Details.Lines() {
- msg += "\n\t" + line
- }
- }
-
- return msg
-}
+type Error = v1.Error
// NewError returns a new Error object.
-func NewError(code string, loc *Location, f string, a ...interface{}) *Error {
- return &Error{
- Code: code,
- Location: loc,
- Message: fmt.Sprintf(f, a...),
- }
+func NewError(code string, loc *Location, f string, a ...any) *Error {
+ return v1.NewError(code, loc, f, a...)
}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/index.go b/vendor/github.com/open-policy-agent/opa/ast/index.go
index cb0cbea323..7e80bb7716 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/index.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/index.go
@@ -5,904 +5,16 @@
package ast
import (
- "fmt"
- "sort"
- "strings"
-
- "github.com/open-policy-agent/opa/util"
+ v1 "github.com/open-policy-agent/opa/v1/ast"
)
// RuleIndex defines the interface for rule indices.
-type RuleIndex interface {
-
- // Build tries to construct an index for the given rules. If the index was
- // constructed, it returns true, otherwise false.
- Build(rules []*Rule) bool
-
- // Lookup searches the index for rules that will match the provided
- // resolver. If the resolver returns an error, it is returned via err.
- Lookup(resolver ValueResolver) (*IndexResult, error)
-
- // AllRules traverses the index and returns all rules that will match
- // the provided resolver without any optimizations (effectively with
- // indexing disabled). If the resolver returns an error, it is returned
- // via err.
- AllRules(resolver ValueResolver) (*IndexResult, error)
-}
+type RuleIndex v1.RuleIndex
// IndexResult contains the result of an index lookup.
-type IndexResult struct {
- Kind RuleKind
- Rules []*Rule
- Else map[*Rule][]*Rule
- Default *Rule
- EarlyExit bool
- OnlyGroundRefs bool
-}
+type IndexResult = v1.IndexResult
// NewIndexResult returns a new IndexResult object.
func NewIndexResult(kind RuleKind) *IndexResult {
- return &IndexResult{
- Kind: kind,
- Else: map[*Rule][]*Rule{},
- }
-}
-
-// Empty returns true if there are no rules to evaluate.
-func (ir *IndexResult) Empty() bool {
- return len(ir.Rules) == 0 && ir.Default == nil
-}
-
-type baseDocEqIndex struct {
- skipIndexing Set
- isVirtual func(Ref) bool
- root *trieNode
- defaultRule *Rule
- kind RuleKind
- onlyGroundRefs bool
-}
-
-func newBaseDocEqIndex(isVirtual func(Ref) bool) *baseDocEqIndex {
- return &baseDocEqIndex{
- skipIndexing: NewSet(NewTerm(InternalPrint.Ref())),
- isVirtual: isVirtual,
- root: newTrieNodeImpl(),
- onlyGroundRefs: true,
- }
-}
-
-func (i *baseDocEqIndex) Build(rules []*Rule) bool {
- if len(rules) == 0 {
- return false
- }
-
- i.kind = rules[0].Head.RuleKind()
- indices := newrefindices(i.isVirtual)
-
- // build indices for each rule.
- for idx := range rules {
- WalkRules(rules[idx], func(rule *Rule) bool {
- if rule.Default {
- i.defaultRule = rule
- return false
- }
- if i.onlyGroundRefs {
- i.onlyGroundRefs = rule.Head.Reference.IsGround()
- }
- var skip bool
- for _, expr := range rule.Body {
- if op := expr.OperatorTerm(); op != nil && i.skipIndexing.Contains(op) {
- skip = true
- break
- }
- }
- if !skip {
- for _, expr := range rule.Body {
- indices.Update(rule, expr)
- }
- }
- return false
- })
- }
-
- // build trie out of indices.
- for idx := range rules {
- var prio int
- WalkRules(rules[idx], func(rule *Rule) bool {
- if rule.Default {
- return false
- }
- node := i.root
- if indices.Indexed(rule) {
- for _, ref := range indices.Sorted() {
- node = node.Insert(ref, indices.Value(rule, ref), indices.Mapper(rule, ref))
- }
- }
- // Insert rule into trie with (insertion order, priority order)
- // tuple. Retaining the insertion order allows us to return rules
- // in the order they were passed to this function.
- node.append([...]int{idx, prio}, rule)
- prio++
- return false
- })
- }
- return true
-}
-
-func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) {
-
- tr := newTrieTraversalResult()
-
- err := i.root.Traverse(resolver, tr)
- if err != nil {
- return nil, err
- }
-
- result := NewIndexResult(i.kind)
- result.Default = i.defaultRule
- result.OnlyGroundRefs = i.onlyGroundRefs
- result.Rules = make([]*Rule, 0, len(tr.ordering))
-
- for _, pos := range tr.ordering {
- sort.Slice(tr.unordered[pos], func(i, j int) bool {
- return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1]
- })
- nodes := tr.unordered[pos]
- root := nodes[0].rule
-
- result.Rules = append(result.Rules, root)
- if len(nodes) > 1 {
- result.Else[root] = make([]*Rule, len(nodes)-1)
- for i := 1; i < len(nodes); i++ {
- result.Else[root][i-1] = nodes[i].rule
- }
- }
- }
-
- result.EarlyExit = tr.values.Len() == 1 && tr.values.Slice()[0].IsGround()
-
- return result, nil
-}
-
-func (i *baseDocEqIndex) AllRules(_ ValueResolver) (*IndexResult, error) {
- tr := newTrieTraversalResult()
-
- // Walk over the rule trie and accumulate _all_ rules
- rw := &ruleWalker{result: tr}
- i.root.Do(rw)
-
- result := NewIndexResult(i.kind)
- result.Default = i.defaultRule
- result.OnlyGroundRefs = i.onlyGroundRefs
- result.Rules = make([]*Rule, 0, len(tr.ordering))
-
- for _, pos := range tr.ordering {
- sort.Slice(tr.unordered[pos], func(i, j int) bool {
- return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1]
- })
- nodes := tr.unordered[pos]
- root := nodes[0].rule
- result.Rules = append(result.Rules, root)
- if len(nodes) > 1 {
- result.Else[root] = make([]*Rule, len(nodes)-1)
- for i := 1; i < len(nodes); i++ {
- result.Else[root][i-1] = nodes[i].rule
- }
- }
- }
-
- result.EarlyExit = tr.values.Len() == 1 && tr.values.Slice()[0].IsGround()
-
- return result, nil
-}
-
-type ruleWalker struct {
- result *trieTraversalResult
-}
-
-func (r *ruleWalker) Do(x interface{}) trieWalker {
- tn := x.(*trieNode)
- r.result.Add(tn)
- return r
-}
-
-type valueMapper struct {
- Key string
- MapValue func(Value) Value
-}
-
-type refindex struct {
- Ref Ref
- Value Value
- Mapper *valueMapper
-}
-
-type refindices struct {
- isVirtual func(Ref) bool
- rules map[*Rule][]*refindex
- frequency *util.HashMap
- sorted []Ref
-}
-
-func newrefindices(isVirtual func(Ref) bool) *refindices {
- return &refindices{
- isVirtual: isVirtual,
- rules: map[*Rule][]*refindex{},
- frequency: util.NewHashMap(func(a, b util.T) bool {
- r1, r2 := a.(Ref), b.(Ref)
- return r1.Equal(r2)
- }, func(x util.T) int {
- return x.(Ref).Hash()
- }),
- }
-}
-
-// Update attempts to update the refindices for the given expression in the
-// given rule. If the expression cannot be indexed the update does not affect
-// the indices.
-func (i *refindices) Update(rule *Rule, expr *Expr) {
-
- if expr.Negated {
- return
- }
-
- if len(expr.With) > 0 {
- // NOTE(tsandall): In the future, we may need to consider expressions
- // that have with statements applied to them.
- return
- }
-
- op := expr.Operator()
-
- switch {
- case op.Equal(Equality.Ref()):
- i.updateEq(rule, expr)
-
- case op.Equal(Equal.Ref()) && len(expr.Operands()) == 2:
- // NOTE(tsandall): if equal() is called with more than two arguments the
- // output value is being captured in which case the indexer cannot
- // exclude the rule if the equal() call would return false (because the
- // false value must still be produced.)
- i.updateEq(rule, expr)
-
- case op.Equal(GlobMatch.Ref()) && len(expr.Operands()) == 3:
- // NOTE(sr): Same as with equal() above -- 4 operands means the output
- // of `glob.match` is captured and the rule can thus not be excluded.
- i.updateGlobMatch(rule, expr)
- }
-}
-
-// Sorted returns a sorted list of references that the indices were built from.
-// References that appear more frequently in the indexed rules are ordered
-// before less frequently appearing references.
-func (i *refindices) Sorted() []Ref {
-
- if i.sorted == nil {
- counts := make([]int, 0, i.frequency.Len())
- i.sorted = make([]Ref, 0, i.frequency.Len())
-
- i.frequency.Iter(func(k, v util.T) bool {
- counts = append(counts, v.(int))
- i.sorted = append(i.sorted, k.(Ref))
- return false
- })
-
- sort.Slice(i.sorted, func(a, b int) bool {
- if counts[a] > counts[b] {
- return true
- } else if counts[b] > counts[a] {
- return false
- }
- return i.sorted[a][0].Loc().Compare(i.sorted[b][0].Loc()) < 0
- })
- }
-
- return i.sorted
-}
-
-func (i *refindices) Indexed(rule *Rule) bool {
- return len(i.rules[rule]) > 0
-}
-
-func (i *refindices) Value(rule *Rule, ref Ref) Value {
- if index := i.index(rule, ref); index != nil {
- return index.Value
- }
- return nil
-}
-
-func (i *refindices) Mapper(rule *Rule, ref Ref) *valueMapper {
- if index := i.index(rule, ref); index != nil {
- return index.Mapper
- }
- return nil
-}
-
-func (i *refindices) updateEq(rule *Rule, expr *Expr) {
- a, b := expr.Operand(0), expr.Operand(1)
- args := rule.Head.Args
- if idx, ok := eqOperandsToRefAndValue(i.isVirtual, args, a, b); ok {
- i.insert(rule, idx)
- return
- }
- if idx, ok := eqOperandsToRefAndValue(i.isVirtual, args, b, a); ok {
- i.insert(rule, idx)
- return
- }
-}
-
-func (i *refindices) updateGlobMatch(rule *Rule, expr *Expr) {
- args := rule.Head.Args
-
- delim, ok := globDelimiterToString(expr.Operand(1))
- if !ok {
- return
- }
-
- if arr := globPatternToArray(expr.Operand(0), delim); arr != nil {
- // The 3rd operand of glob.match is the value to match. We assume the
- // 3rd operand was a reference that has been rewritten and bound to a
- // variable earlier in the query OR a function argument variable.
- match := expr.Operand(2)
- if _, ok := match.Value.(Var); ok {
- var ref Ref
- for _, other := range i.rules[rule] {
- if _, ok := other.Value.(Var); ok && other.Value.Compare(match.Value) == 0 {
- ref = other.Ref
- }
- }
- if ref == nil {
- for j, arg := range args {
- if arg.Equal(match) {
- ref = Ref{FunctionArgRootDocument, IntNumberTerm(j)}
- }
- }
- }
- if ref != nil {
- i.insert(rule, &refindex{
- Ref: ref,
- Value: arr.Value,
- Mapper: &valueMapper{
- Key: delim,
- MapValue: func(v Value) Value {
- if s, ok := v.(String); ok {
- return stringSliceToArray(splitStringEscaped(string(s), delim))
- }
- return v
- },
- },
- })
- }
- }
- }
-}
-
-func (i *refindices) insert(rule *Rule, index *refindex) {
-
- count, ok := i.frequency.Get(index.Ref)
- if !ok {
- count = 0
- }
-
- i.frequency.Put(index.Ref, count.(int)+1)
-
- for pos, other := range i.rules[rule] {
- if other.Ref.Equal(index.Ref) {
- i.rules[rule][pos] = index
- return
- }
- }
-
- i.rules[rule] = append(i.rules[rule], index)
-}
-
-func (i *refindices) index(rule *Rule, ref Ref) *refindex {
- for _, index := range i.rules[rule] {
- if index.Ref.Equal(ref) {
- return index
- }
- }
- return nil
-}
-
-type trieWalker interface {
- Do(x interface{}) trieWalker
-}
-
-type trieTraversalResult struct {
- unordered map[int][]*ruleNode
- ordering []int
- values Set
-}
-
-func newTrieTraversalResult() *trieTraversalResult {
- return &trieTraversalResult{
- unordered: map[int][]*ruleNode{},
- values: NewSet(),
- }
-}
-
-func (tr *trieTraversalResult) Add(t *trieNode) {
- for _, node := range t.rules {
- root := node.prio[0]
- nodes, ok := tr.unordered[root]
- if !ok {
- tr.ordering = append(tr.ordering, root)
- }
- tr.unordered[root] = append(nodes, node)
- }
- if t.values != nil {
- t.values.Foreach(func(v *Term) { tr.values.Add(v) })
- }
-}
-
-type trieNode struct {
- ref Ref
- values Set
- mappers []*valueMapper
- next *trieNode
- any *trieNode
- undefined *trieNode
- scalars *util.HashMap
- array *trieNode
- rules []*ruleNode
-}
-
-func (node *trieNode) String() string {
- var flags []string
- flags = append(flags, fmt.Sprintf("self:%p", node))
- if len(node.ref) > 0 {
- flags = append(flags, node.ref.String())
- }
- if node.next != nil {
- flags = append(flags, fmt.Sprintf("next:%p", node.next))
- }
- if node.any != nil {
- flags = append(flags, fmt.Sprintf("any:%p", node.any))
- }
- if node.undefined != nil {
- flags = append(flags, fmt.Sprintf("undefined:%p", node.undefined))
- }
- if node.array != nil {
- flags = append(flags, fmt.Sprintf("array:%p", node.array))
- }
- if node.scalars.Len() > 0 {
- buf := make([]string, 0, node.scalars.Len())
- node.scalars.Iter(func(k, v util.T) bool {
- key := k.(Value)
- val := v.(*trieNode)
- buf = append(buf, fmt.Sprintf("scalar(%v):%p", key, val))
- return false
- })
- sort.Strings(buf)
- flags = append(flags, strings.Join(buf, " "))
- }
- if len(node.rules) > 0 {
- flags = append(flags, fmt.Sprintf("%d rule(s)", len(node.rules)))
- }
- if len(node.mappers) > 0 {
- flags = append(flags, fmt.Sprintf("%d mapper(s)", len(node.mappers)))
- }
- if node.values != nil {
- if l := node.values.Len(); l > 0 {
- flags = append(flags, fmt.Sprintf("%d value(s)", l))
- }
- }
- return strings.Join(flags, " ")
-}
-
-func (node *trieNode) append(prio [2]int, rule *Rule) {
- node.rules = append(node.rules, &ruleNode{prio, rule})
-
- if node.values != nil && rule.Head.Value != nil {
- node.values.Add(rule.Head.Value)
- return
- }
-
- if node.values == nil && rule.Head.DocKind() == CompleteDoc {
- node.values = NewSet(rule.Head.Value)
- }
-}
-
-type ruleNode struct {
- prio [2]int
- rule *Rule
-}
-
-func newTrieNodeImpl() *trieNode {
- return &trieNode{
- scalars: util.NewHashMap(valueEq, valueHash),
- }
-}
-
-func (node *trieNode) Do(walker trieWalker) {
- next := walker.Do(node)
- if next == nil {
- return
- }
- if node.any != nil {
- node.any.Do(next)
- }
- if node.undefined != nil {
- node.undefined.Do(next)
- }
-
- node.scalars.Iter(func(_, v util.T) bool {
- child := v.(*trieNode)
- child.Do(next)
- return false
- })
-
- if node.array != nil {
- node.array.Do(next)
- }
- if node.next != nil {
- node.next.Do(next)
- }
-}
-
-func (node *trieNode) Insert(ref Ref, value Value, mapper *valueMapper) *trieNode {
-
- if node.next == nil {
- node.next = newTrieNodeImpl()
- node.next.ref = ref
- }
-
- if mapper != nil {
- node.next.addMapper(mapper)
- }
-
- return node.next.insertValue(value)
-}
-
-func (node *trieNode) Traverse(resolver ValueResolver, tr *trieTraversalResult) error {
-
- if node == nil {
- return nil
- }
-
- tr.Add(node)
-
- return node.next.traverse(resolver, tr)
-}
-
-func (node *trieNode) addMapper(mapper *valueMapper) {
- for i := range node.mappers {
- if node.mappers[i].Key == mapper.Key {
- return
- }
- }
- node.mappers = append(node.mappers, mapper)
-}
-
-func (node *trieNode) insertValue(value Value) *trieNode {
-
- switch value := value.(type) {
- case nil:
- if node.undefined == nil {
- node.undefined = newTrieNodeImpl()
- }
- return node.undefined
- case Var:
- if node.any == nil {
- node.any = newTrieNodeImpl()
- }
- return node.any
- case Null, Boolean, Number, String:
- child, ok := node.scalars.Get(value)
- if !ok {
- child = newTrieNodeImpl()
- node.scalars.Put(value, child)
- }
- return child.(*trieNode)
- case *Array:
- if node.array == nil {
- node.array = newTrieNodeImpl()
- }
- return node.array.insertArray(value)
- }
-
- panic("illegal value")
-}
-
-func (node *trieNode) insertArray(arr *Array) *trieNode {
-
- if arr.Len() == 0 {
- return node
- }
-
- switch head := arr.Elem(0).Value.(type) {
- case Var:
- if node.any == nil {
- node.any = newTrieNodeImpl()
- }
- return node.any.insertArray(arr.Slice(1, -1))
- case Null, Boolean, Number, String:
- child, ok := node.scalars.Get(head)
- if !ok {
- child = newTrieNodeImpl()
- node.scalars.Put(head, child)
- }
- return child.(*trieNode).insertArray(arr.Slice(1, -1))
- }
-
- panic("illegal value")
-}
-
-func (node *trieNode) traverse(resolver ValueResolver, tr *trieTraversalResult) error {
-
- if node == nil {
- return nil
- }
-
- v, err := resolver.Resolve(node.ref)
- if err != nil {
- if IsUnknownValueErr(err) {
- return node.traverseUnknown(resolver, tr)
- }
- return err
- }
-
- if node.undefined != nil {
- err = node.undefined.Traverse(resolver, tr)
- if err != nil {
- return err
- }
- }
-
- if v == nil {
- return nil
- }
-
- if node.any != nil {
- err = node.any.Traverse(resolver, tr)
- if err != nil {
- return err
- }
- }
-
- if err := node.traverseValue(resolver, tr, v); err != nil {
- return err
- }
-
- for i := range node.mappers {
- if err := node.traverseValue(resolver, tr, node.mappers[i].MapValue(v)); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (node *trieNode) traverseValue(resolver ValueResolver, tr *trieTraversalResult, value Value) error {
-
- switch value := value.(type) {
- case *Array:
- if node.array == nil {
- return nil
- }
- return node.array.traverseArray(resolver, tr, value)
-
- case Null, Boolean, Number, String:
- child, ok := node.scalars.Get(value)
- if !ok {
- return nil
- }
- return child.(*trieNode).Traverse(resolver, tr)
- }
-
- return nil
-}
-
-func (node *trieNode) traverseArray(resolver ValueResolver, tr *trieTraversalResult, arr *Array) error {
-
- if arr.Len() == 0 {
- return node.Traverse(resolver, tr)
- }
-
- if node.any != nil {
- err := node.any.traverseArray(resolver, tr, arr.Slice(1, -1))
- if err != nil {
- return err
- }
- }
-
- head := arr.Elem(0).Value
-
- if !IsScalar(head) {
- return nil
- }
-
- child, ok := node.scalars.Get(head)
- if !ok {
- return nil
- }
- return child.(*trieNode).traverseArray(resolver, tr, arr.Slice(1, -1))
-}
-
-func (node *trieNode) traverseUnknown(resolver ValueResolver, tr *trieTraversalResult) error {
-
- if node == nil {
- return nil
- }
-
- if err := node.Traverse(resolver, tr); err != nil {
- return err
- }
-
- if err := node.undefined.traverseUnknown(resolver, tr); err != nil {
- return err
- }
-
- if err := node.any.traverseUnknown(resolver, tr); err != nil {
- return err
- }
-
- if err := node.array.traverseUnknown(resolver, tr); err != nil {
- return err
- }
-
- var iterErr error
- node.scalars.Iter(func(_, v util.T) bool {
- child := v.(*trieNode)
- if iterErr = child.traverseUnknown(resolver, tr); iterErr != nil {
- return true
- }
- return false
- })
-
- return iterErr
-}
-
-// If term `a` is one of the function's operands, we store a Ref: `args[0]`
-// for the argument number. So for `f(x, y) { x = 10; y = 12 }`, we'll
-// bind `args[0]` and `args[1]` to this rule when called for (x=10) and
-// (y=12) respectively.
-func eqOperandsToRefAndValue(isVirtual func(Ref) bool, args []*Term, a, b *Term) (*refindex, bool) {
- switch v := a.Value.(type) {
- case Var:
- for i, arg := range args {
- if arg.Value.Compare(v) == 0 {
- if bval, ok := indexValue(b); ok {
- return &refindex{Ref: Ref{FunctionArgRootDocument, IntNumberTerm(i)}, Value: bval}, true
- }
- }
- }
- case Ref:
- if !RootDocumentNames.Contains(v[0]) {
- return nil, false
- }
- if isVirtual(v) {
- return nil, false
- }
- if v.IsNested() || !v.IsGround() {
- return nil, false
- }
- if bval, ok := indexValue(b); ok {
- return &refindex{Ref: v, Value: bval}, true
- }
- }
- return nil, false
-}
-
-func indexValue(b *Term) (Value, bool) {
- switch b := b.Value.(type) {
- case Null, Boolean, Number, String, Var:
- return b, true
- case *Array:
- stop := false
- first := true
- vis := NewGenericVisitor(func(x interface{}) bool {
- if first {
- first = false
- return false
- }
- switch x.(type) {
- // No nested structures or values that require evaluation (other than var).
- case *Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Ref:
- stop = true
- }
- return stop
- })
- vis.Walk(b)
- if !stop {
- return b, true
- }
- }
-
- return nil, false
-}
-
-func globDelimiterToString(delim *Term) (string, bool) {
-
- arr, ok := delim.Value.(*Array)
- if !ok {
- return "", false
- }
-
- var result string
-
- if arr.Len() == 0 {
- result = "."
- } else {
- for i := 0; i < arr.Len(); i++ {
- term := arr.Elem(i)
- s, ok := term.Value.(String)
- if !ok {
- return "", false
- }
- result += string(s)
- }
- }
-
- return result, true
-}
-
-func globPatternToArray(pattern *Term, delim string) *Term {
-
- s, ok := pattern.Value.(String)
- if !ok {
- return nil
- }
-
- parts := splitStringEscaped(string(s), delim)
- arr := make([]*Term, len(parts))
-
- for i := range parts {
- if parts[i] == "*" {
- arr[i] = VarTerm("$globwildcard")
- } else {
- var escaped bool
- for _, c := range parts[i] {
- if c == '\\' {
- escaped = !escaped
- continue
- }
- if !escaped {
- switch c {
- case '[', '?', '{', '*':
- // TODO(tsandall): super glob and character pattern
- // matching not supported yet.
- return nil
- }
- }
- escaped = false
- }
- arr[i] = StringTerm(parts[i])
- }
- }
-
- return NewTerm(NewArray(arr...))
-}
-
-// splits s on characters in delim except if delim characters have been escaped
-// with reverse solidus.
-func splitStringEscaped(s string, delim string) []string {
-
- var last, curr int
- var escaped bool
- var result []string
-
- for ; curr < len(s); curr++ {
- if s[curr] == '\\' || escaped {
- escaped = !escaped
- continue
- }
- if strings.ContainsRune(delim, rune(s[curr])) {
- result = append(result, s[last:curr])
- last = curr + 1
- }
- }
-
- result = append(result, s[last:])
-
- return result
-}
-
-func stringSliceToArray(s []string) *Array {
- arr := make([]*Term, len(s))
- for i, v := range s {
- arr[i] = StringTerm(v)
- }
- return NewArray(arr...)
+ return v1.NewIndexResult(kind)
}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/interning.go b/vendor/github.com/open-policy-agent/opa/ast/interning.go
new file mode 100644
index 0000000000..29231006aa
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/interning.go
@@ -0,0 +1,24 @@
+// Copyright 2024 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ v1 "github.com/open-policy-agent/opa/v1/ast"
+)
+
+func InternedBooleanTerm(b bool) *Term {
+ return v1.InternedTerm(b)
+}
+
+// InternedIntNumberTerm returns a term with the given integer value. The term is
+// cached between -1 to 512, and for values outside of that range, this function
+// is equivalent to ast.IntNumberTerm.
+func InternedIntNumberTerm(i int) *Term {
+ return v1.InternedTerm(i)
+}
+
+func HasInternedIntNumberTerm(i int) bool {
+ return v1.HasInternedIntNumberTerm(i)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/json/doc.go b/vendor/github.com/open-policy-agent/opa/ast/json/doc.go
new file mode 100644
index 0000000000..26aee9b994
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/json/doc.go
@@ -0,0 +1,8 @@
+// Copyright 2024 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended.
+// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead.
+// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information.
+package json
diff --git a/vendor/github.com/open-policy-agent/opa/ast/json/json.go b/vendor/github.com/open-policy-agent/opa/ast/json/json.go
index 565017d58e..8a3a36bb9b 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/json/json.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/json/json.go
@@ -1,36 +1,15 @@
package json
+import v1 "github.com/open-policy-agent/opa/v1/ast/json"
+
// Options defines the options for JSON operations,
// currently only marshaling can be configured
-type Options struct {
- MarshalOptions MarshalOptions
-}
+type Options = v1.Options
// MarshalOptions defines the options for JSON marshaling,
// currently only toggling the marshaling of location information is supported
-type MarshalOptions struct {
- // IncludeLocation toggles the marshaling of location information
- IncludeLocation NodeToggle
- // IncludeLocationText additionally/optionally includes the text of the location
- IncludeLocationText bool
- // ExcludeLocationFile additionally/optionally excludes the file of the location
- // Note that this is inverted (i.e. not "include" as the default needs to remain false)
- ExcludeLocationFile bool
-}
+type MarshalOptions = v1.MarshalOptions
// NodeToggle is a generic struct to allow the toggling of
// settings for different ast node types
-type NodeToggle struct {
- Term bool
- Package bool
- Comment bool
- Import bool
- Rule bool
- Head bool
- Expr bool
- SomeDecl bool
- Every bool
- With bool
- Annotations bool
- AnnotationsRef bool
-}
+type NodeToggle = v1.NodeToggle
diff --git a/vendor/github.com/open-policy-agent/opa/ast/map.go b/vendor/github.com/open-policy-agent/opa/ast/map.go
index b0cc9eb60f..070ad3e5de 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/map.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/map.go
@@ -5,129 +5,14 @@
package ast
import (
- "encoding/json"
-
- "github.com/open-policy-agent/opa/util"
+ v1 "github.com/open-policy-agent/opa/v1/ast"
)
// ValueMap represents a key/value map between AST term values. Any type of term
// can be used as a key in the map.
-type ValueMap struct {
- hashMap *util.HashMap
-}
+type ValueMap = v1.ValueMap
// NewValueMap returns a new ValueMap.
func NewValueMap() *ValueMap {
- vs := &ValueMap{
- hashMap: util.NewHashMap(valueEq, valueHash),
- }
- return vs
-}
-
-// MarshalJSON provides a custom marshaller for the ValueMap which
-// will include the key, value, and value type.
-func (vs *ValueMap) MarshalJSON() ([]byte, error) {
- var tmp []map[string]interface{}
- vs.Iter(func(k Value, v Value) bool {
- tmp = append(tmp, map[string]interface{}{
- "name": k.String(),
- "type": TypeName(v),
- "value": v,
- })
- return false
- })
- return json.Marshal(tmp)
-}
-
-// Copy returns a shallow copy of the ValueMap.
-func (vs *ValueMap) Copy() *ValueMap {
- if vs == nil {
- return nil
- }
- cpy := NewValueMap()
- cpy.hashMap = vs.hashMap.Copy()
- return cpy
-}
-
-// Equal returns true if this ValueMap equals the other.
-func (vs *ValueMap) Equal(other *ValueMap) bool {
- if vs == nil {
- return other == nil || other.Len() == 0
- }
- if other == nil {
- return vs == nil || vs.Len() == 0
- }
- return vs.hashMap.Equal(other.hashMap)
-}
-
-// Len returns the number of elements in the map.
-func (vs *ValueMap) Len() int {
- if vs == nil {
- return 0
- }
- return vs.hashMap.Len()
-}
-
-// Get returns the value in the map for k.
-func (vs *ValueMap) Get(k Value) Value {
- if vs != nil {
- if v, ok := vs.hashMap.Get(k); ok {
- return v.(Value)
- }
- }
- return nil
-}
-
-// Hash returns a hash code for this ValueMap.
-func (vs *ValueMap) Hash() int {
- if vs == nil {
- return 0
- }
- return vs.hashMap.Hash()
-}
-
-// Iter calls the iter function for each key/value pair in the map. If the iter
-// function returns true, iteration stops.
-func (vs *ValueMap) Iter(iter func(Value, Value) bool) bool {
- if vs == nil {
- return false
- }
- return vs.hashMap.Iter(func(kt, vt util.T) bool {
- k := kt.(Value)
- v := vt.(Value)
- return iter(k, v)
- })
-}
-
-// Put inserts a key k into the map with value v.
-func (vs *ValueMap) Put(k, v Value) {
- if vs == nil {
- panic("put on nil value map")
- }
- vs.hashMap.Put(k, v)
-}
-
-// Delete removes a key k from the map.
-func (vs *ValueMap) Delete(k Value) {
- if vs == nil {
- return
- }
- vs.hashMap.Delete(k)
-}
-
-func (vs *ValueMap) String() string {
- if vs == nil {
- return "{}"
- }
- return vs.hashMap.String()
-}
-
-func valueHash(v util.T) int {
- return v.(Value).Hash()
-}
-
-func valueEq(a, b util.T) bool {
- av := a.(Value)
- bv := b.(Value)
- return av.Compare(bv) == 0
+ return v1.NewValueMap()
}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/marshal.go b/vendor/github.com/open-policy-agent/opa/ast/marshal.go
deleted file mode 100644
index 53fb112044..0000000000
--- a/vendor/github.com/open-policy-agent/opa/ast/marshal.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package ast
-
-import (
- astJSON "github.com/open-policy-agent/opa/ast/json"
-)
-
-// customJSON is an interface that can be implemented by AST nodes that
-// allows the parser to set options for JSON operations on that node.
-type customJSON interface {
- setJSONOptions(astJSON.Options)
-}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/parser.go b/vendor/github.com/open-policy-agent/opa/ast/parser.go
index 09ede2baec..45cd4da06e 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/parser.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/parser.go
@@ -1,2733 +1,49 @@
-// Copyright 2020 The OPA Authors. All rights reserved.
+// Copyright 2024 The OPA Authors. All rights reserved.
// Use of this source code is governed by an Apache2
// license that can be found in the LICENSE file.
package ast
import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "math/big"
- "net/url"
- "regexp"
- "sort"
- "strconv"
- "strings"
- "unicode/utf8"
-
- "gopkg.in/yaml.v3"
-
- "github.com/open-policy-agent/opa/ast/internal/scanner"
- "github.com/open-policy-agent/opa/ast/internal/tokens"
- astJSON "github.com/open-policy-agent/opa/ast/json"
- "github.com/open-policy-agent/opa/ast/location"
+ v1 "github.com/open-policy-agent/opa/v1/ast"
)
-var RegoV1CompatibleRef = Ref{VarTerm("rego"), StringTerm("v1")}
+var RegoV1CompatibleRef = v1.RegoV1CompatibleRef
// RegoVersion defines the Rego syntax requirements for a module.
-type RegoVersion int
+type RegoVersion = v1.RegoVersion
-const DefaultRegoVersion = RegoVersion(0)
+const DefaultRegoVersion = RegoV0
const (
+ RegoUndefined = v1.RegoUndefined
// RegoV0 is the default, original Rego syntax.
- RegoV0 RegoVersion = iota
+ RegoV0 = v1.RegoV0
// RegoV0CompatV1 requires modules to comply with both the RegoV0 and RegoV1 syntax (as when 'rego.v1' is imported in a module).
// Shortly, RegoV1 compatibility is required, but 'rego.v1' or 'future.keywords' must also be imported.
- RegoV0CompatV1
+ RegoV0CompatV1 = v1.RegoV0CompatV1
// RegoV1 is the Rego syntax enforced by OPA 1.0; e.g.:
// future.keywords part of default keyword set, and don't require imports;
// 'if' and 'contains' required in rule heads;
// (some) strict checks on by default.
- RegoV1
+ RegoV1 = v1.RegoV1
)
-func (v RegoVersion) Int() int {
- if v == RegoV1 {
- return 1
- }
- return 0
-}
-
-func (v RegoVersion) String() string {
- switch v {
- case RegoV0:
- return "v0"
- case RegoV1:
- return "v1"
- case RegoV0CompatV1:
- return "v0v1"
- default:
- return "unknown"
- }
-}
-
func RegoVersionFromInt(i int) RegoVersion {
- if i == 1 {
- return RegoV1
- }
- return RegoV0
-}
-
-// Note: This state is kept isolated from the parser so that we
-// can do efficient shallow copies of these values when doing a
-// save() and restore().
-type state struct {
- s *scanner.Scanner
- lastEnd int
- skippedNL bool
- tok tokens.Token
- tokEnd int
- lit string
- loc Location
- errors Errors
- hints []string
- comments []*Comment
- wildcard int
-}
-
-func (s *state) String() string {
- return fmt.Sprintf("", s.s, s.tok, s.lit, s.loc, len(s.errors), len(s.comments))
-}
-
-func (s *state) Loc() *location.Location {
- cpy := s.loc
- return &cpy
-}
-
-func (s *state) Text(offset, end int) []byte {
- bs := s.s.Bytes()
- if offset >= 0 && offset < len(bs) {
- if end >= offset && end <= len(bs) {
- return bs[offset:end]
- }
- }
- return nil
+ return v1.RegoVersionFromInt(i)
}
// Parser is used to parse Rego statements.
-type Parser struct {
- r io.Reader
- s *state
- po ParserOptions
- cache parsedTermCache
-}
-
-type parsedTermCacheItem struct {
- t *Term
- post *state // post is the post-state that's restored on a cache-hit
- offset int
- next *parsedTermCacheItem
-}
-
-type parsedTermCache struct {
- m *parsedTermCacheItem
-}
-
-func (c parsedTermCache) String() string {
- s := strings.Builder{}
- s.WriteRune('{')
- var e *parsedTermCacheItem
- for e = c.m; e != nil; e = e.next {
- s.WriteString(fmt.Sprintf("%v", e))
- }
- s.WriteRune('}')
- return s.String()
-}
-
-func (e *parsedTermCacheItem) String() string {
- return fmt.Sprintf("<%d:%v>", e.offset, e.t)
-}
+type Parser = v1.Parser
// ParserOptions defines the options for parsing Rego statements.
-type ParserOptions struct {
- Capabilities *Capabilities
- ProcessAnnotation bool
- AllFutureKeywords bool
- FutureKeywords []string
- SkipRules bool
- JSONOptions *astJSON.Options
- // RegoVersion is the version of Rego to parse for.
- RegoVersion RegoVersion
- unreleasedKeywords bool // TODO(sr): cleanup
-}
-
-// EffectiveRegoVersion returns the effective RegoVersion to use for parsing.
-// Deprecated: Use RegoVersion instead.
-func (po *ParserOptions) EffectiveRegoVersion() RegoVersion {
- return po.RegoVersion
-}
+type ParserOptions = v1.ParserOptions
// NewParser creates and initializes a Parser.
func NewParser() *Parser {
- p := &Parser{
- s: &state{},
- po: ParserOptions{},
- }
- return p
-}
-
-// WithFilename provides the filename for Location details
-// on parsed statements.
-func (p *Parser) WithFilename(filename string) *Parser {
- p.s.loc.File = filename
- return p
-}
-
-// WithReader provides the io.Reader that the parser will
-// use as its source.
-func (p *Parser) WithReader(r io.Reader) *Parser {
- p.r = r
- return p
-}
-
-// WithProcessAnnotation enables or disables the processing of
-// annotations by the Parser
-func (p *Parser) WithProcessAnnotation(processAnnotation bool) *Parser {
- p.po.ProcessAnnotation = processAnnotation
- return p
-}
-
-// WithFutureKeywords enables "future" keywords, i.e., keywords that can
-// be imported via
-//
-// import future.keywords.kw
-// import future.keywords.other
-//
-// but in a more direct way. The equivalent of this import would be
-//
-// WithFutureKeywords("kw", "other")
-func (p *Parser) WithFutureKeywords(kws ...string) *Parser {
- p.po.FutureKeywords = kws
- return p
-}
-
-// WithAllFutureKeywords enables all "future" keywords, i.e., the
-// ParserOption equivalent of
-//
-// import future.keywords
-func (p *Parser) WithAllFutureKeywords(yes bool) *Parser {
- p.po.AllFutureKeywords = yes
- return p
-}
-
-// withUnreleasedKeywords allows using keywords that haven't surfaced
-// as future keywords (see above) yet, but have tests that require
-// them to be parsed
-func (p *Parser) withUnreleasedKeywords(yes bool) *Parser {
- p.po.unreleasedKeywords = yes
- return p
-}
-
-// WithCapabilities sets the capabilities structure on the parser.
-func (p *Parser) WithCapabilities(c *Capabilities) *Parser {
- p.po.Capabilities = c
- return p
-}
-
-// WithSkipRules instructs the parser not to attempt to parse Rule statements.
-func (p *Parser) WithSkipRules(skip bool) *Parser {
- p.po.SkipRules = skip
- return p
-}
-
-// WithJSONOptions sets the Options which will be set on nodes to configure
-// their JSON marshaling behavior.
-func (p *Parser) WithJSONOptions(jsonOptions *astJSON.Options) *Parser {
- p.po.JSONOptions = jsonOptions
- return p
-}
-
-func (p *Parser) WithRegoVersion(version RegoVersion) *Parser {
- p.po.RegoVersion = version
- return p
-}
-
-func (p *Parser) parsedTermCacheLookup() (*Term, *state) {
- l := p.s.loc.Offset
- // stop comparing once the cached offsets are lower than l
- for h := p.cache.m; h != nil && h.offset >= l; h = h.next {
- if h.offset == l {
- return h.t, h.post
- }
- }
- return nil, nil
-}
-
-func (p *Parser) parsedTermCachePush(t *Term, s0 *state) {
- s1 := p.save()
- o0 := s0.loc.Offset
- entry := parsedTermCacheItem{t: t, post: s1, offset: o0}
-
- // find the first one whose offset is smaller than ours
- var e *parsedTermCacheItem
- for e = p.cache.m; e != nil; e = e.next {
- if e.offset < o0 {
- break
- }
- }
- entry.next = e
- p.cache.m = &entry
-}
-
-// futureParser returns a shallow copy of `p` with an empty
-// cache, and a scanner that knows all future keywords.
-// It's used to present hints in errors, when statements would
-// only parse successfully if some future keyword is enabled.
-func (p *Parser) futureParser() *Parser {
- q := *p
- q.s = p.save()
- q.s.s = p.s.s.WithKeywords(futureKeywords)
- q.cache = parsedTermCache{}
- return &q
-}
-
-// presentParser returns a shallow copy of `p` with an empty
-// cache, and a scanner that knows none of the future keywords.
-// It is used to successfully parse keyword imports, like
-//
-// import future.keywords.in
-//
-// even when the parser has already been informed about the
-// future keyword "in". This parser won't error out because
-// "in" is an identifier.
-func (p *Parser) presentParser() (*Parser, map[string]tokens.Token) {
- var cpy map[string]tokens.Token
- q := *p
- q.s = p.save()
- q.s.s, cpy = p.s.s.WithoutKeywords(futureKeywords)
- q.cache = parsedTermCache{}
- return &q, cpy
-}
-
-// Parse will read the Rego source and parse statements and
-// comments as they are found. Any errors encountered while
-// parsing will be accumulated and returned as a list of Errors.
-func (p *Parser) Parse() ([]Statement, []*Comment, Errors) {
-
- if p.po.Capabilities == nil {
- p.po.Capabilities = CapabilitiesForThisVersion()
- }
-
- allowedFutureKeywords := map[string]tokens.Token{}
-
- if p.po.RegoVersion == RegoV1 {
- // RegoV1 includes all future keywords in the default language definition
- for k, v := range futureKeywords {
- allowedFutureKeywords[k] = v
- }
-
- // For sake of error reporting, we still need to check that keywords in capabilities are known,
- for _, kw := range p.po.Capabilities.FutureKeywords {
- if _, ok := futureKeywords[kw]; !ok {
- return nil, nil, Errors{
- &Error{
- Code: ParseErr,
- Message: fmt.Sprintf("illegal capabilities: unknown keyword: %v", kw),
- Location: nil,
- },
- }
- }
- }
- // and that explicitly requested future keywords are known.
- for _, kw := range p.po.FutureKeywords {
- if _, ok := allowedFutureKeywords[kw]; !ok {
- return nil, nil, Errors{
- &Error{
- Code: ParseErr,
- Message: fmt.Sprintf("unknown future keyword: %v", kw),
- Location: nil,
- },
- }
- }
- }
- } else {
- for _, kw := range p.po.Capabilities.FutureKeywords {
- var ok bool
- allowedFutureKeywords[kw], ok = futureKeywords[kw]
- if !ok {
- return nil, nil, Errors{
- &Error{
- Code: ParseErr,
- Message: fmt.Sprintf("illegal capabilities: unknown keyword: %v", kw),
- Location: nil,
- },
- }
- }
- }
- }
-
- var err error
- p.s.s, err = scanner.New(p.r)
- if err != nil {
- return nil, nil, Errors{
- &Error{
- Code: ParseErr,
- Message: err.Error(),
- Location: nil,
- },
- }
- }
-
- selected := map[string]tokens.Token{}
- if p.po.AllFutureKeywords || p.po.RegoVersion == RegoV1 {
- for kw, tok := range allowedFutureKeywords {
- selected[kw] = tok
- }
- } else {
- for _, kw := range p.po.FutureKeywords {
- tok, ok := allowedFutureKeywords[kw]
- if !ok {
- return nil, nil, Errors{
- &Error{
- Code: ParseErr,
- Message: fmt.Sprintf("unknown future keyword: %v", kw),
- Location: nil,
- },
- }
- }
- selected[kw] = tok
- }
- }
- p.s.s = p.s.s.WithKeywords(selected)
-
- if p.po.RegoVersion == RegoV1 {
- for kw, tok := range allowedFutureKeywords {
- p.s.s.AddKeyword(kw, tok)
- }
- }
-
- // read the first token to initialize the parser
- p.scan()
-
- var stmts []Statement
-
- // Read from the scanner until the last token is reached or no statements
- // can be parsed. Attempt to parse package statements, import statements,
- // rule statements, and then body/query statements (in that order). If a
- // statement cannot be parsed, restore the parser state before trying the
- // next type of statement. If a statement can be parsed, continue from that
- // point trying to parse packages, imports, etc. in the same order.
- for p.s.tok != tokens.EOF {
-
- s := p.save()
-
- if pkg := p.parsePackage(); pkg != nil {
- stmts = append(stmts, pkg)
- continue
- } else if len(p.s.errors) > 0 {
- break
- }
-
- p.restore(s)
- s = p.save()
-
- if imp := p.parseImport(); imp != nil {
- if RegoRootDocument.Equal(imp.Path.Value.(Ref)[0]) {
- p.regoV1Import(imp)
- }
-
- if FutureRootDocument.Equal(imp.Path.Value.(Ref)[0]) {
- p.futureImport(imp, allowedFutureKeywords)
- }
-
- stmts = append(stmts, imp)
- continue
- } else if len(p.s.errors) > 0 {
- break
- }
-
- p.restore(s)
-
- if !p.po.SkipRules {
- s = p.save()
-
- if rules := p.parseRules(); rules != nil {
- for i := range rules {
- stmts = append(stmts, rules[i])
- }
- continue
- } else if len(p.s.errors) > 0 {
- break
- }
-
- p.restore(s)
- }
-
- if body := p.parseQuery(true, tokens.EOF); body != nil {
- stmts = append(stmts, body)
- continue
- }
-
- break
- }
-
- if p.po.ProcessAnnotation {
- stmts = p.parseAnnotations(stmts)
- }
-
- if p.po.JSONOptions != nil {
- for i := range stmts {
- vis := NewGenericVisitor(func(x interface{}) bool {
- if x, ok := x.(customJSON); ok {
- x.setJSONOptions(*p.po.JSONOptions)
- }
- return false
- })
-
- vis.Walk(stmts[i])
- }
- }
-
- return stmts, p.s.comments, p.s.errors
-}
-
-func (p *Parser) parseAnnotations(stmts []Statement) []Statement {
-
- annotStmts, errs := parseAnnotations(p.s.comments)
- for _, err := range errs {
- p.error(err.Location, err.Message)
- }
-
- for _, annotStmt := range annotStmts {
- stmts = append(stmts, annotStmt)
- }
-
- return stmts
-}
-
-func parseAnnotations(comments []*Comment) ([]*Annotations, Errors) {
-
- var hint = []byte("METADATA")
- var curr *metadataParser
- var blocks []*metadataParser
-
- for i := 0; i < len(comments); i++ {
- if curr != nil {
- if comments[i].Location.Row == comments[i-1].Location.Row+1 && comments[i].Location.Col == 1 {
- curr.Append(comments[i])
- continue
- }
- curr = nil
- }
- if bytes.HasPrefix(bytes.TrimSpace(comments[i].Text), hint) {
- curr = newMetadataParser(comments[i].Location)
- blocks = append(blocks, curr)
- }
- }
-
- var stmts []*Annotations
- var errs Errors
- for _, b := range blocks {
- a, err := b.Parse()
- if err != nil {
- errs = append(errs, &Error{
- Code: ParseErr,
- Message: err.Error(),
- Location: b.loc,
- })
- } else {
- stmts = append(stmts, a)
- }
- }
-
- return stmts, errs
-}
-
-func (p *Parser) parsePackage() *Package {
-
- var pkg Package
- pkg.SetLoc(p.s.Loc())
-
- if p.s.tok != tokens.Package {
- return nil
- }
-
- p.scan()
- if p.s.tok != tokens.Ident {
- p.illegalToken()
- return nil
- }
-
- term := p.parseTerm()
-
- if term != nil {
- switch v := term.Value.(type) {
- case Var:
- pkg.Path = Ref{
- DefaultRootDocument.Copy().SetLocation(term.Location),
- StringTerm(string(v)).SetLocation(term.Location),
- }
- case Ref:
- pkg.Path = make(Ref, len(v)+1)
- pkg.Path[0] = DefaultRootDocument.Copy().SetLocation(v[0].Location)
- first, ok := v[0].Value.(Var)
- if !ok {
- p.errorf(v[0].Location, "unexpected %v token: expecting var", TypeName(v[0].Value))
- return nil
- }
- pkg.Path[1] = StringTerm(string(first)).SetLocation(v[0].Location)
- for i := 2; i < len(pkg.Path); i++ {
- switch v[i-1].Value.(type) {
- case String:
- pkg.Path[i] = v[i-1]
- default:
- p.errorf(v[i-1].Location, "unexpected %v token: expecting string", TypeName(v[i-1].Value))
- return nil
- }
- }
- default:
- p.illegalToken()
- return nil
- }
- }
-
- if pkg.Path == nil {
- if len(p.s.errors) == 0 {
- p.error(p.s.Loc(), "expected path")
- }
- return nil
- }
-
- return &pkg
-}
-
-func (p *Parser) parseImport() *Import {
-
- var imp Import
- imp.SetLoc(p.s.Loc())
-
- if p.s.tok != tokens.Import {
- return nil
- }
-
- p.scan()
- if p.s.tok != tokens.Ident {
- p.error(p.s.Loc(), "expected ident")
- return nil
- }
- q, prev := p.presentParser()
- term := q.parseTerm()
- if term != nil {
- switch v := term.Value.(type) {
- case Var:
- imp.Path = RefTerm(term).SetLocation(term.Location)
- case Ref:
- for i := 1; i < len(v); i++ {
- if _, ok := v[i].Value.(String); !ok {
- p.errorf(v[i].Location, "unexpected %v token: expecting string", TypeName(v[i].Value))
- return nil
- }
- }
- imp.Path = term
- }
- }
- // keep advanced parser state, reset known keywords
- p.s = q.s
- p.s.s = q.s.s.WithKeywords(prev)
-
- if imp.Path == nil {
- p.error(p.s.Loc(), "expected path")
- return nil
- }
-
- path := imp.Path.Value.(Ref)
-
- switch {
- case RootDocumentNames.Contains(path[0]):
- case FutureRootDocument.Equal(path[0]):
- case RegoRootDocument.Equal(path[0]):
- default:
- p.hint("if this is unexpected, try updating OPA")
- p.errorf(imp.Path.Location, "unexpected import path, must begin with one of: %v, got: %v",
- RootDocumentNames.Union(NewSet(FutureRootDocument, RegoRootDocument)),
- path[0])
- return nil
- }
-
- if p.s.tok == tokens.As {
- p.scan()
-
- if p.s.tok != tokens.Ident {
- p.illegal("expected var")
- return nil
- }
-
- if alias := p.parseTerm(); alias != nil {
- v, ok := alias.Value.(Var)
- if ok {
- imp.Alias = v
- return &imp
- }
- }
- p.illegal("expected var")
- return nil
- }
-
- return &imp
-}
-
-func (p *Parser) parseRules() []*Rule {
-
- var rule Rule
- rule.SetLoc(p.s.Loc())
-
- if p.s.tok == tokens.Default {
- p.scan()
- rule.Default = true
- }
-
- if p.s.tok != tokens.Ident {
- return nil
- }
-
- usesContains := false
- if rule.Head, usesContains = p.parseHead(rule.Default); rule.Head == nil {
- return nil
- }
-
- if usesContains {
- rule.Head.keywords = append(rule.Head.keywords, tokens.Contains)
- }
-
- if rule.Default {
- if !p.validateDefaultRuleValue(&rule) {
- return nil
- }
-
- if len(rule.Head.Args) > 0 {
- if !p.validateDefaultRuleArgs(&rule) {
- return nil
- }
- }
-
- rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location))
- return []*Rule{&rule}
- }
-
- // back-compat with `p[x] { ... }``
- hasIf := p.s.tok == tokens.If
-
- // p[x] if ... becomes a single-value rule p[x]
- if hasIf && !usesContains && len(rule.Head.Ref()) == 2 {
- if !rule.Head.Ref()[1].IsGround() && len(rule.Head.Args) == 0 {
- rule.Head.Key = rule.Head.Ref()[1]
- }
-
- if rule.Head.Value == nil {
- rule.Head.generatedValue = true
- rule.Head.Value = BooleanTerm(true).SetLocation(rule.Head.Location)
- } else {
- // p[x] = y if becomes a single-value rule p[x] with value y, but needs name for compat
- v, ok := rule.Head.Ref()[0].Value.(Var)
- if !ok {
- return nil
- }
- rule.Head.Name = v
- }
- }
-
- // p[x] becomes a multi-value rule p
- if !hasIf && !usesContains &&
- len(rule.Head.Args) == 0 && // not a function
- len(rule.Head.Ref()) == 2 { // ref like 'p[x]'
- v, ok := rule.Head.Ref()[0].Value.(Var)
- if !ok {
- return nil
- }
- rule.Head.Name = v
- rule.Head.Key = rule.Head.Ref()[1]
- if rule.Head.Value == nil {
- rule.Head.SetRef(rule.Head.Ref()[:len(rule.Head.Ref())-1])
- }
- }
-
- switch {
- case hasIf:
- rule.Head.keywords = append(rule.Head.keywords, tokens.If)
- p.scan()
- s := p.save()
- if expr := p.parseLiteral(); expr != nil {
- // NOTE(sr): set literals are never false or undefined, so parsing this as
- // p if { true }
- // ^^^^^^^^ set of one element, `true`
- // isn't valid.
- isSetLiteral := false
- if t, ok := expr.Terms.(*Term); ok {
- _, isSetLiteral = t.Value.(Set)
- }
- // expr.Term is []*Term or Every
- if !isSetLiteral {
- rule.Body.Append(expr)
- break
- }
- }
-
- // parsing as literal didn't work out, expect '{ BODY }'
- p.restore(s)
- fallthrough
-
- case p.s.tok == tokens.LBrace:
- p.scan()
- if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil {
- return nil
- }
- p.scan()
-
- case usesContains:
- rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location))
- rule.generatedBody = true
- rule.Location = rule.Head.Location
-
- return []*Rule{&rule}
-
- default:
- return nil
- }
-
- if p.s.tok == tokens.Else {
- if r := rule.Head.Ref(); len(r) > 1 && !r.IsGround() {
- p.error(p.s.Loc(), "else keyword cannot be used on rules with variables in head")
- return nil
- }
- if rule.Head.Key != nil {
- p.error(p.s.Loc(), "else keyword cannot be used on multi-value rules")
- return nil
- }
-
- if rule.Else = p.parseElse(rule.Head); rule.Else == nil {
- return nil
- }
- }
-
- rule.Location.Text = p.s.Text(rule.Location.Offset, p.s.lastEnd)
-
- rules := []*Rule{&rule}
-
- for p.s.tok == tokens.LBrace {
-
- if rule.Else != nil {
- p.error(p.s.Loc(), "expected else keyword")
- return nil
- }
-
- loc := p.s.Loc()
-
- p.scan()
- var next Rule
-
- if next.Body = p.parseBody(tokens.RBrace); next.Body == nil {
- return nil
- }
- p.scan()
-
- loc.Text = p.s.Text(loc.Offset, p.s.lastEnd)
- next.SetLoc(loc)
-
- // Chained rule head's keep the original
- // rule's head AST but have their location
- // set to the rule body.
- next.Head = rule.Head.Copy()
- next.Head.keywords = rule.Head.keywords
- for i := range next.Head.Args {
- if v, ok := next.Head.Args[i].Value.(Var); ok && v.IsWildcard() {
- next.Head.Args[i].Value = Var(p.genwildcard())
- }
- }
- setLocRecursive(next.Head, loc)
-
- rules = append(rules, &next)
- }
-
- return rules
-}
-
-func (p *Parser) parseElse(head *Head) *Rule {
-
- var rule Rule
- rule.SetLoc(p.s.Loc())
-
- rule.Head = head.Copy()
- rule.Head.generatedValue = false
- for i := range rule.Head.Args {
- if v, ok := rule.Head.Args[i].Value.(Var); ok && v.IsWildcard() {
- rule.Head.Args[i].Value = Var(p.genwildcard())
- }
- }
- rule.Head.SetLoc(p.s.Loc())
-
- defer func() {
- rule.Location.Text = p.s.Text(rule.Location.Offset, p.s.lastEnd)
- }()
-
- p.scan()
-
- switch p.s.tok {
- case tokens.LBrace, tokens.If: // no value, but a body follows directly
- rule.Head.generatedValue = true
- rule.Head.Value = BooleanTerm(true)
- case tokens.Assign, tokens.Unify:
- rule.Head.Assign = tokens.Assign == p.s.tok
- p.scan()
- rule.Head.Value = p.parseTermInfixCall()
- if rule.Head.Value == nil {
- return nil
- }
- rule.Head.Location.Text = p.s.Text(rule.Head.Location.Offset, p.s.lastEnd)
- default:
- p.illegal("expected else value term or rule body")
- return nil
- }
-
- hasIf := p.s.tok == tokens.If
- hasLBrace := p.s.tok == tokens.LBrace
-
- if !hasIf && !hasLBrace {
- rule.Body = NewBody(NewExpr(BooleanTerm(true)))
- rule.generatedBody = true
- setLocRecursive(rule.Body, rule.Location)
- return &rule
- }
-
- if hasIf {
- rule.Head.keywords = append(rule.Head.keywords, tokens.If)
- p.scan()
- }
-
- if p.s.tok == tokens.LBrace {
- p.scan()
- if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil {
- return nil
- }
- p.scan()
- } else if p.s.tok != tokens.EOF {
- expr := p.parseLiteral()
- if expr == nil {
- return nil
- }
- rule.Body.Append(expr)
- setLocRecursive(rule.Body, rule.Location)
- } else {
- p.illegal("rule body expected")
- return nil
- }
-
- if p.s.tok == tokens.Else {
- if rule.Else = p.parseElse(head); rule.Else == nil {
- return nil
- }
- }
- return &rule
-}
-
-func (p *Parser) parseHead(defaultRule bool) (*Head, bool) {
- head := &Head{}
- loc := p.s.Loc()
- defer func() {
- if head != nil {
- head.SetLoc(loc)
- head.Location.Text = p.s.Text(head.Location.Offset, p.s.lastEnd)
- }
- }()
-
- term := p.parseVar()
- if term == nil {
- return nil, false
- }
-
- ref := p.parseTermFinish(term, true)
- if ref == nil {
- p.illegal("expected rule head name")
- return nil, false
- }
-
- switch x := ref.Value.(type) {
- case Var:
- // Modify the code to add the location to the head ref
- // and set the head ref's jsonOptions.
- head = VarHead(x, ref.Location, p.po.JSONOptions)
- case Ref:
- head = RefHead(x)
- case Call:
- op, args := x[0], x[1:]
- var ref Ref
- switch y := op.Value.(type) {
- case Var:
- ref = Ref{op}
- case Ref:
- if _, ok := y[0].Value.(Var); !ok {
- p.illegal("rule head ref %v invalid", y)
- return nil, false
- }
- ref = y
- }
- head = RefHead(ref)
- head.Args = append([]*Term{}, args...)
-
- default:
- return nil, false
- }
-
- name := head.Ref().String()
-
- switch p.s.tok {
- case tokens.Contains: // NOTE: no Value for `contains` heads, we return here
- // Catch error case of using 'contains' with a function definition rule head.
- if head.Args != nil {
- p.illegal("the contains keyword can only be used with multi-value rule definitions (e.g., %s contains { ... })", name)
- }
- p.scan()
- head.Key = p.parseTermInfixCall()
- if head.Key == nil {
- p.illegal("expected rule key term (e.g., %s contains { ... })", name)
- }
- return head, true
-
- case tokens.Unify:
- p.scan()
- head.Value = p.parseTermInfixCall()
- if head.Value == nil {
- // FIX HEAD.String()
- p.illegal("expected rule value term (e.g., %s[%s] = { ... })", name, head.Key)
- }
- case tokens.Assign:
- p.scan()
- head.Assign = true
- head.Value = p.parseTermInfixCall()
- if head.Value == nil {
- switch {
- case len(head.Args) > 0:
- p.illegal("expected function value term (e.g., %s(...) := { ... })", name)
- case head.Key != nil:
- p.illegal("expected partial rule value term (e.g., %s[...] := { ... })", name)
- case defaultRule:
- p.illegal("expected default rule value term (e.g., default %s := )", name)
- default:
- p.illegal("expected rule value term (e.g., %s := { ... })", name)
- }
- }
- }
-
- if head.Value == nil && head.Key == nil {
- if len(head.Ref()) != 2 || len(head.Args) > 0 {
- head.generatedValue = true
- head.Value = BooleanTerm(true).SetLocation(head.Location)
- }
- }
- return head, false
-}
-
-func (p *Parser) parseBody(end tokens.Token) Body {
- return p.parseQuery(false, end)
-}
-
-func (p *Parser) parseQuery(requireSemi bool, end tokens.Token) Body {
- body := Body{}
-
- if p.s.tok == end {
- p.error(p.s.Loc(), "found empty body")
- return nil
- }
-
- for {
- expr := p.parseLiteral()
- if expr == nil {
- return nil
- }
-
- body.Append(expr)
-
- if p.s.tok == tokens.Semicolon {
- p.scan()
- continue
- }
-
- if p.s.tok == end || requireSemi {
- return body
- }
-
- if !p.s.skippedNL {
- // If there was already an error then don't pile this one on
- if len(p.s.errors) == 0 {
- p.illegal(`expected \n or %s or %s`, tokens.Semicolon, end)
- }
- return nil
- }
- }
-}
-
-func (p *Parser) parseLiteral() (expr *Expr) {
-
- offset := p.s.loc.Offset
- loc := p.s.Loc()
-
- defer func() {
- if expr != nil {
- loc.Text = p.s.Text(offset, p.s.lastEnd)
- expr.SetLoc(loc)
- }
- }()
-
- var negated bool
- if p.s.tok == tokens.Not {
- p.scan()
- negated = true
- }
-
- switch p.s.tok {
- case tokens.Some:
- if negated {
- p.illegal("illegal negation of 'some'")
- return nil
- }
- return p.parseSome()
- case tokens.Every:
- if negated {
- p.illegal("illegal negation of 'every'")
- return nil
- }
- return p.parseEvery()
- default:
- s := p.save()
- expr := p.parseExpr()
- if expr != nil {
- expr.Negated = negated
- if p.s.tok == tokens.With {
- if expr.With = p.parseWith(); expr.With == nil {
- return nil
- }
- }
- // If we find a plain `every` identifier, attempt to parse an every expression,
- // add hint if it succeeds.
- if term, ok := expr.Terms.(*Term); ok && Var("every").Equal(term.Value) {
- var hint bool
- t := p.save()
- p.restore(s)
- if expr := p.futureParser().parseEvery(); expr != nil {
- _, hint = expr.Terms.(*Every)
- }
- p.restore(t)
- if hint {
- p.hint("`import future.keywords.every` for `every x in xs { ... }` expressions")
- }
- }
- return expr
- }
- return nil
- }
-}
-
-func (p *Parser) parseWith() []*With {
-
- withs := []*With{}
-
- for {
-
- with := With{
- Location: p.s.Loc(),
- }
- p.scan()
-
- if p.s.tok != tokens.Ident {
- p.illegal("expected ident")
- return nil
- }
-
- with.Target = p.parseTerm()
- if with.Target == nil {
- return nil
- }
-
- switch with.Target.Value.(type) {
- case Ref, Var:
- break
- default:
- p.illegal("expected with target path")
- }
-
- if p.s.tok != tokens.As {
- p.illegal("expected as keyword")
- return nil
- }
-
- p.scan()
-
- if with.Value = p.parseTermInfixCall(); with.Value == nil {
- return nil
- }
-
- with.Location.Text = p.s.Text(with.Location.Offset, p.s.lastEnd)
-
- withs = append(withs, &with)
-
- if p.s.tok != tokens.With {
- break
- }
- }
-
- return withs
-}
-
-func (p *Parser) parseSome() *Expr {
-
- decl := &SomeDecl{}
- decl.SetLoc(p.s.Loc())
-
- // Attempt to parse "some x in xs", which will end up in
- // SomeDecl{Symbols: ["member(x, xs)"]}
- s := p.save()
- p.scan()
- if term := p.parseTermInfixCall(); term != nil {
- if call, ok := term.Value.(Call); ok {
- switch call[0].String() {
- case Member.Name:
- if len(call) != 3 {
- p.illegal("illegal domain")
- return nil
- }
- case MemberWithKey.Name:
- if len(call) != 4 {
- p.illegal("illegal domain")
- return nil
- }
- default:
- p.illegal("expected `x in xs` or `x, y in xs` expression")
- return nil
- }
-
- decl.Symbols = []*Term{term}
- expr := NewExpr(decl).SetLocation(decl.Location)
- if p.s.tok == tokens.With {
- if expr.With = p.parseWith(); expr.With == nil {
- return nil
- }
- }
- return expr
- }
- }
-
- p.restore(s)
- s = p.save() // new copy for later
- var hint bool
- p.scan()
- if term := p.futureParser().parseTermInfixCall(); term != nil {
- if call, ok := term.Value.(Call); ok {
- switch call[0].String() {
- case Member.Name, MemberWithKey.Name:
- hint = true
- }
- }
- }
-
- // go on as before, it's `some x[...]` or illegal
- p.restore(s)
- if hint {
- p.hint("`import future.keywords.in` for `some x in xs` expressions")
- }
-
- for { // collecting var args
-
- p.scan()
-
- if p.s.tok != tokens.Ident {
- p.illegal("expected var")
- return nil
- }
-
- decl.Symbols = append(decl.Symbols, p.parseVar())
-
- p.scan()
-
- if p.s.tok != tokens.Comma {
- break
- }
- }
-
- return NewExpr(decl).SetLocation(decl.Location)
-}
-
-func (p *Parser) parseEvery() *Expr {
- qb := &Every{}
- qb.SetLoc(p.s.Loc())
-
- // TODO(sr): We'd get more accurate error messages if we didn't rely on
- // parseTermInfixCall here, but parsed "var [, var] in term" manually.
- p.scan()
- term := p.parseTermInfixCall()
- if term == nil {
- return nil
- }
- call, ok := term.Value.(Call)
- if !ok {
- p.illegal("expected `x[, y] in xs { ... }` expression")
- return nil
- }
- switch call[0].String() {
- case Member.Name: // x in xs
- if len(call) != 3 {
- p.illegal("illegal domain")
- return nil
- }
- qb.Value = call[1]
- qb.Domain = call[2]
- case MemberWithKey.Name: // k, v in xs
- if len(call) != 4 {
- p.illegal("illegal domain")
- return nil
- }
- qb.Key = call[1]
- qb.Value = call[2]
- qb.Domain = call[3]
- if _, ok := qb.Key.Value.(Var); !ok {
- p.illegal("expected key to be a variable")
- return nil
- }
- default:
- p.illegal("expected `x[, y] in xs { ... }` expression")
- return nil
- }
- if _, ok := qb.Value.Value.(Var); !ok {
- p.illegal("expected value to be a variable")
- return nil
- }
- if p.s.tok == tokens.LBrace { // every x in xs { ... }
- p.scan()
- body := p.parseBody(tokens.RBrace)
- if body == nil {
- return nil
- }
- p.scan()
- qb.Body = body
- expr := NewExpr(qb).SetLocation(qb.Location)
-
- if p.s.tok == tokens.With {
- if expr.With = p.parseWith(); expr.With == nil {
- return nil
- }
- }
- return expr
- }
-
- p.illegal("missing body")
- return nil
-}
-
-func (p *Parser) parseExpr() *Expr {
-
- lhs := p.parseTermInfixCall()
- if lhs == nil {
- return nil
- }
-
- if op := p.parseTermOp(tokens.Assign, tokens.Unify); op != nil {
- if rhs := p.parseTermInfixCall(); rhs != nil {
- return NewExpr([]*Term{op, lhs, rhs})
- }
- return nil
- }
-
- // NOTE(tsandall): the top-level call term is converted to an expr because
- // the evaluator does not support the call term type (nested calls are
- // rewritten by the compiler.)
- if call, ok := lhs.Value.(Call); ok {
- return NewExpr([]*Term(call))
- }
-
- return NewExpr(lhs)
-}
-
-// parseTermInfixCall consumes the next term from the input and returns it. If a
-// term cannot be parsed the return value is nil and error will be recorded. The
-// scanner will be advanced to the next token before returning.
-// By starting out with infix relations (==, !=, <, etc) and further calling the
-// other binary operators (|, &, arithmetics), it constitutes the binding
-// precedence.
-func (p *Parser) parseTermInfixCall() *Term {
- return p.parseTermIn(nil, true, p.s.loc.Offset)
-}
-
-func (p *Parser) parseTermInfixCallInList() *Term {
- return p.parseTermIn(nil, false, p.s.loc.Offset)
-}
-
-func (p *Parser) parseTermIn(lhs *Term, keyVal bool, offset int) *Term {
- // NOTE(sr): `in` is a bit special: besides `lhs in rhs`, it also
- // supports `key, val in rhs`, so it can have an optional second lhs.
- // `keyVal` triggers if we attempt to parse a second lhs argument (`mhs`).
- if lhs == nil {
- lhs = p.parseTermRelation(nil, offset)
- }
- if lhs != nil {
- if keyVal && p.s.tok == tokens.Comma { // second "lhs", or "middle hand side"
- s := p.save()
- p.scan()
- if mhs := p.parseTermRelation(nil, offset); mhs != nil {
- if op := p.parseTermOpName(MemberWithKey.Ref(), tokens.In); op != nil {
- if rhs := p.parseTermRelation(nil, p.s.loc.Offset); rhs != nil {
- call := p.setLoc(CallTerm(op, lhs, mhs, rhs), lhs.Location, offset, p.s.lastEnd)
- switch p.s.tok {
- case tokens.In:
- return p.parseTermIn(call, keyVal, offset)
- default:
- return call
- }
- }
- }
- }
- p.restore(s)
- }
- if op := p.parseTermOpName(Member.Ref(), tokens.In); op != nil {
- if rhs := p.parseTermRelation(nil, p.s.loc.Offset); rhs != nil {
- call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd)
- switch p.s.tok {
- case tokens.In:
- return p.parseTermIn(call, keyVal, offset)
- default:
- return call
- }
- }
- }
- }
- return lhs
-}
-
-func (p *Parser) parseTermRelation(lhs *Term, offset int) *Term {
- if lhs == nil {
- lhs = p.parseTermOr(nil, offset)
- }
- if lhs != nil {
- if op := p.parseTermOp(tokens.Equal, tokens.Neq, tokens.Lt, tokens.Gt, tokens.Lte, tokens.Gte); op != nil {
- if rhs := p.parseTermOr(nil, p.s.loc.Offset); rhs != nil {
- call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd)
- switch p.s.tok {
- case tokens.Equal, tokens.Neq, tokens.Lt, tokens.Gt, tokens.Lte, tokens.Gte:
- return p.parseTermRelation(call, offset)
- default:
- return call
- }
- }
- }
- }
- return lhs
-}
-
-func (p *Parser) parseTermOr(lhs *Term, offset int) *Term {
- if lhs == nil {
- lhs = p.parseTermAnd(nil, offset)
- }
- if lhs != nil {
- if op := p.parseTermOp(tokens.Or); op != nil {
- if rhs := p.parseTermAnd(nil, p.s.loc.Offset); rhs != nil {
- call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd)
- switch p.s.tok {
- case tokens.Or:
- return p.parseTermOr(call, offset)
- default:
- return call
- }
- }
- }
- return lhs
- }
- return nil
-}
-
-func (p *Parser) parseTermAnd(lhs *Term, offset int) *Term {
- if lhs == nil {
- lhs = p.parseTermArith(nil, offset)
- }
- if lhs != nil {
- if op := p.parseTermOp(tokens.And); op != nil {
- if rhs := p.parseTermArith(nil, p.s.loc.Offset); rhs != nil {
- call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd)
- switch p.s.tok {
- case tokens.And:
- return p.parseTermAnd(call, offset)
- default:
- return call
- }
- }
- }
- return lhs
- }
- return nil
-}
-
-func (p *Parser) parseTermArith(lhs *Term, offset int) *Term {
- if lhs == nil {
- lhs = p.parseTermFactor(nil, offset)
- }
- if lhs != nil {
- if op := p.parseTermOp(tokens.Add, tokens.Sub); op != nil {
- if rhs := p.parseTermFactor(nil, p.s.loc.Offset); rhs != nil {
- call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd)
- switch p.s.tok {
- case tokens.Add, tokens.Sub:
- return p.parseTermArith(call, offset)
- default:
- return call
- }
- }
- }
- }
- return lhs
-}
-
-func (p *Parser) parseTermFactor(lhs *Term, offset int) *Term {
- if lhs == nil {
- lhs = p.parseTerm()
- }
- if lhs != nil {
- if op := p.parseTermOp(tokens.Mul, tokens.Quo, tokens.Rem); op != nil {
- if rhs := p.parseTerm(); rhs != nil {
- call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd)
- switch p.s.tok {
- case tokens.Mul, tokens.Quo, tokens.Rem:
- return p.parseTermFactor(call, offset)
- default:
- return call
- }
- }
- }
- }
- return lhs
-}
-
-func (p *Parser) parseTerm() *Term {
- if term, s := p.parsedTermCacheLookup(); s != nil {
- p.restore(s)
- return term
- }
- s0 := p.save()
-
- var term *Term
- switch p.s.tok {
- case tokens.Null:
- term = NullTerm().SetLocation(p.s.Loc())
- case tokens.True:
- term = BooleanTerm(true).SetLocation(p.s.Loc())
- case tokens.False:
- term = BooleanTerm(false).SetLocation(p.s.Loc())
- case tokens.Sub, tokens.Dot, tokens.Number:
- term = p.parseNumber()
- case tokens.String:
- term = p.parseString()
- case tokens.Ident, tokens.Contains: // NOTE(sr): contains anywhere BUT in rule heads gets no special treatment
- term = p.parseVar()
- case tokens.LBrack:
- term = p.parseArray()
- case tokens.LBrace:
- term = p.parseSetOrObject()
- case tokens.LParen:
- offset := p.s.loc.Offset
- p.scan()
- if r := p.parseTermInfixCall(); r != nil {
- if p.s.tok == tokens.RParen {
- r.Location.Text = p.s.Text(offset, p.s.tokEnd)
- term = r
- } else {
- p.error(p.s.Loc(), "non-terminated expression")
- }
- }
- default:
- p.illegalToken()
- }
-
- term = p.parseTermFinish(term, false)
- p.parsedTermCachePush(term, s0)
- return term
-}
-
-func (p *Parser) parseTermFinish(head *Term, skipws bool) *Term {
- if head == nil {
- return nil
- }
- offset := p.s.loc.Offset
- p.doScan(skipws)
-
- switch p.s.tok {
- case tokens.LParen, tokens.Dot, tokens.LBrack:
- return p.parseRef(head, offset)
- case tokens.Whitespace:
- p.scan()
- fallthrough
- default:
- if _, ok := head.Value.(Var); ok && RootDocumentNames.Contains(head) {
- return RefTerm(head).SetLocation(head.Location)
- }
- return head
- }
-}
-
-func (p *Parser) parseNumber() *Term {
- var prefix string
- loc := p.s.Loc()
- if p.s.tok == tokens.Sub {
- prefix = "-"
- p.scan()
- switch p.s.tok {
- case tokens.Number, tokens.Dot:
- break
- default:
- p.illegal("expected number")
- return nil
- }
- }
- if p.s.tok == tokens.Dot {
- prefix += "."
- p.scan()
- if p.s.tok != tokens.Number {
- p.illegal("expected number")
- return nil
- }
- }
-
- // Check for multiple leading 0's, parsed by math/big.Float.Parse as decimal 0:
- // https://golang.org/pkg/math/big/#Float.Parse
- if ((len(prefix) != 0 && prefix[0] == '-') || len(prefix) == 0) &&
- len(p.s.lit) > 1 && p.s.lit[0] == '0' && p.s.lit[1] == '0' {
- p.illegal("expected number")
- return nil
- }
-
- // Ensure that the number is valid
- s := prefix + p.s.lit
- f, ok := new(big.Float).SetString(s)
- if !ok {
- p.illegal("invalid float")
- return nil
- }
-
- // Put limit on size of exponent to prevent non-linear cost of String()
- // function on big.Float from causing denial of service: https://github.com/golang/go/issues/11068
- //
- // n == sign * mantissa * 2^exp
- // 0.5 <= mantissa < 1.0
- //
- // The limit is arbitrary.
- exp := f.MantExp(nil)
- if exp > 1e5 || exp < -1e5 || f.IsInf() { // +/- inf, exp is 0
- p.error(p.s.Loc(), "number too big")
- return nil
- }
-
- // Note: Use the original string, do *not* round trip from
- // the big.Float as it can cause precision loss.
- r := NumberTerm(json.Number(s)).SetLocation(loc)
- return r
-}
-
-func (p *Parser) parseString() *Term {
- if p.s.lit[0] == '"' {
- var s string
- err := json.Unmarshal([]byte(p.s.lit), &s)
- if err != nil {
- p.errorf(p.s.Loc(), "illegal string literal: %s", p.s.lit)
- return nil
- }
- term := StringTerm(s).SetLocation(p.s.Loc())
- return term
- }
- return p.parseRawString()
-}
-
-func (p *Parser) parseRawString() *Term {
- if len(p.s.lit) < 2 {
- return nil
- }
- term := StringTerm(p.s.lit[1 : len(p.s.lit)-1]).SetLocation(p.s.Loc())
- return term
-}
-
-// this is the name to use for instantiating an empty set, e.g., `set()`.
-var setConstructor = RefTerm(VarTerm("set"))
-
-func (p *Parser) parseCall(operator *Term, offset int) (term *Term) {
-
- loc := operator.Location
- var end int
-
- defer func() {
- p.setLoc(term, loc, offset, end)
- }()
-
- p.scan() // steps over '('
-
- if p.s.tok == tokens.RParen { // no args, i.e. set() or any.func()
- end = p.s.tokEnd
- p.scanWS()
- if operator.Equal(setConstructor) {
- return SetTerm()
- }
- return CallTerm(operator)
- }
-
- if r := p.parseTermList(tokens.RParen, []*Term{operator}); r != nil {
- end = p.s.tokEnd
- p.scanWS()
- return CallTerm(r...)
- }
-
- return nil
-}
-
-func (p *Parser) parseRef(head *Term, offset int) (term *Term) {
-
- loc := head.Location
- var end int
-
- defer func() {
- p.setLoc(term, loc, offset, end)
- }()
-
- switch h := head.Value.(type) {
- case Var, *Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call:
- // ok
- default:
- p.errorf(loc, "illegal ref (head cannot be %v)", TypeName(h))
- }
-
- ref := []*Term{head}
-
- for {
- switch p.s.tok {
- case tokens.Dot:
- p.scanWS()
- if p.s.tok != tokens.Ident {
- p.illegal("expected %v", tokens.Ident)
- return nil
- }
- ref = append(ref, StringTerm(p.s.lit).SetLocation(p.s.Loc()))
- p.scanWS()
- case tokens.LParen:
- term = p.parseCall(p.setLoc(RefTerm(ref...), loc, offset, p.s.loc.Offset), offset)
- if term != nil {
- switch p.s.tok {
- case tokens.Whitespace:
- p.scan()
- end = p.s.lastEnd
- return term
- case tokens.Dot, tokens.LBrack:
- term = p.parseRef(term, offset)
- }
- }
- end = p.s.tokEnd
- return term
- case tokens.LBrack:
- p.scan()
- if term := p.parseTermInfixCall(); term != nil {
- if p.s.tok != tokens.RBrack {
- p.illegal("expected %v", tokens.LBrack)
- return nil
- }
- ref = append(ref, term)
- p.scanWS()
- } else {
- return nil
- }
- case tokens.Whitespace:
- end = p.s.lastEnd
- p.scan()
- return RefTerm(ref...)
- default:
- end = p.s.lastEnd
- return RefTerm(ref...)
- }
- }
-}
-
-func (p *Parser) parseArray() (term *Term) {
-
- loc := p.s.Loc()
- offset := p.s.loc.Offset
-
- defer func() {
- p.setLoc(term, loc, offset, p.s.tokEnd)
- }()
-
- p.scan()
-
- if p.s.tok == tokens.RBrack {
- return ArrayTerm()
- }
-
- potentialComprehension := true
-
- // Skip leading commas, eg [, x, y]
- // Supported for backwards compatibility. In the future
- // we should make this a parse error.
- if p.s.tok == tokens.Comma {
- potentialComprehension = false
- p.scan()
- }
-
- s := p.save()
-
- // NOTE(tsandall): The parser cannot attempt a relational term here because
- // of ambiguity around comprehensions. For example, given:
- //
- // {1 | 1}
- //
- // Does this represent a set comprehension or a set containing binary OR
- // call? We resolve the ambiguity by prioritizing comprehensions.
- head := p.parseTerm()
-
- if head == nil {
- return nil
- }
-
- switch p.s.tok {
- case tokens.RBrack:
- return ArrayTerm(head)
- case tokens.Comma:
- p.scan()
- if terms := p.parseTermList(tokens.RBrack, []*Term{head}); terms != nil {
- return NewTerm(NewArray(terms...))
- }
- return nil
- case tokens.Or:
- if potentialComprehension {
- // Try to parse as if it is an array comprehension
- p.scan()
- if body := p.parseBody(tokens.RBrack); body != nil {
- return ArrayComprehensionTerm(head, body)
- }
- if p.s.tok != tokens.Comma {
- return nil
- }
- }
- // fall back to parsing as a normal array definition
- }
-
- p.restore(s)
-
- if terms := p.parseTermList(tokens.RBrack, nil); terms != nil {
- return NewTerm(NewArray(terms...))
- }
- return nil
-}
-
-func (p *Parser) parseSetOrObject() (term *Term) {
- loc := p.s.Loc()
- offset := p.s.loc.Offset
-
- defer func() {
- p.setLoc(term, loc, offset, p.s.tokEnd)
- }()
-
- p.scan()
-
- if p.s.tok == tokens.RBrace {
- return ObjectTerm()
- }
-
- potentialComprehension := true
-
- // Skip leading commas, eg {, x, y}
- // Supported for backwards compatibility. In the future
- // we should make this a parse error.
- if p.s.tok == tokens.Comma {
- potentialComprehension = false
- p.scan()
- }
-
- s := p.save()
-
- // Try parsing just a single term first to give comprehensions higher
- // priority to "or" calls in ambiguous situations. Eg: { a | b }
- // will be a set comprehension.
- //
- // Note: We don't know yet if it is a set or object being defined.
- head := p.parseTerm()
- if head == nil {
- return nil
- }
-
- switch p.s.tok {
- case tokens.Or:
- if potentialComprehension {
- return p.parseSet(s, head, potentialComprehension)
- }
- case tokens.RBrace, tokens.Comma:
- return p.parseSet(s, head, potentialComprehension)
- case tokens.Colon:
- return p.parseObject(head, potentialComprehension)
- }
-
- p.restore(s)
-
- head = p.parseTermInfixCallInList()
- if head == nil {
- return nil
- }
-
- switch p.s.tok {
- case tokens.RBrace, tokens.Comma:
- return p.parseSet(s, head, false)
- case tokens.Colon:
- // It still might be an object comprehension, eg { a+1: b | ... }
- return p.parseObject(head, potentialComprehension)
- }
-
- p.illegal("non-terminated set")
- return nil
-}
-
-func (p *Parser) parseSet(s *state, head *Term, potentialComprehension bool) *Term {
- switch p.s.tok {
- case tokens.RBrace:
- return SetTerm(head)
- case tokens.Comma:
- p.scan()
- if terms := p.parseTermList(tokens.RBrace, []*Term{head}); terms != nil {
- return SetTerm(terms...)
- }
- case tokens.Or:
- if potentialComprehension {
- // Try to parse as if it is a set comprehension
- p.scan()
- if body := p.parseBody(tokens.RBrace); body != nil {
- return SetComprehensionTerm(head, body)
- }
- if p.s.tok != tokens.Comma {
- return nil
- }
- }
- // Fall back to parsing as normal set definition
- p.restore(s)
- if terms := p.parseTermList(tokens.RBrace, nil); terms != nil {
- return SetTerm(terms...)
- }
- }
- return nil
-}
-
-func (p *Parser) parseObject(k *Term, potentialComprehension bool) *Term {
- // NOTE(tsandall): Assumption: this function is called after parsing the key
- // of the head element and then receiving a colon token from the scanner.
- // Advance beyond the colon and attempt to parse an object.
- if p.s.tok != tokens.Colon {
- panic("expected colon")
- }
- p.scan()
-
- s := p.save()
-
- // NOTE(sr): We first try to parse the value as a term (`v`), and see
- // if we can parse `{ x: v | ...}` as a comprehension.
- // However, if we encounter either a Comma or an RBace, it cannot be
- // parsed as a comprehension -- so we save double work further down
- // where `parseObjectFinish(k, v, false)` would only exercise the
- // same code paths once more.
- v := p.parseTerm()
- if v == nil {
- return nil
- }
-
- potentialRelation := true
- if potentialComprehension {
- switch p.s.tok {
- case tokens.RBrace, tokens.Comma:
- potentialRelation = false
- fallthrough
- case tokens.Or:
- if term := p.parseObjectFinish(k, v, true); term != nil {
- return term
- }
- }
- }
-
- p.restore(s)
-
- if potentialRelation {
- v := p.parseTermInfixCallInList()
- if v == nil {
- return nil
- }
-
- switch p.s.tok {
- case tokens.RBrace, tokens.Comma:
- return p.parseObjectFinish(k, v, false)
- }
- }
-
- p.illegal("non-terminated object")
- return nil
-}
-
-func (p *Parser) parseObjectFinish(key, val *Term, potentialComprehension bool) *Term {
- switch p.s.tok {
- case tokens.RBrace:
- return ObjectTerm([2]*Term{key, val})
- case tokens.Or:
- if potentialComprehension {
- p.scan()
- if body := p.parseBody(tokens.RBrace); body != nil {
- return ObjectComprehensionTerm(key, val, body)
- }
- } else {
- p.illegal("non-terminated object")
- }
- case tokens.Comma:
- p.scan()
- if r := p.parseTermPairList(tokens.RBrace, [][2]*Term{{key, val}}); r != nil {
- return ObjectTerm(r...)
- }
- }
- return nil
-}
-
-func (p *Parser) parseTermList(end tokens.Token, r []*Term) []*Term {
- if p.s.tok == end {
- return r
- }
- for {
- term := p.parseTermInfixCallInList()
- if term != nil {
- r = append(r, term)
- switch p.s.tok {
- case end:
- return r
- case tokens.Comma:
- p.scan()
- if p.s.tok == end {
- return r
- }
- continue
- default:
- p.illegal(fmt.Sprintf("expected %q or %q", tokens.Comma, end))
- return nil
- }
- }
- return nil
- }
-}
-
-func (p *Parser) parseTermPairList(end tokens.Token, r [][2]*Term) [][2]*Term {
- if p.s.tok == end {
- return r
- }
- for {
- key := p.parseTermInfixCallInList()
- if key != nil {
- switch p.s.tok {
- case tokens.Colon:
- p.scan()
- if val := p.parseTermInfixCallInList(); val != nil {
- r = append(r, [2]*Term{key, val})
- switch p.s.tok {
- case end:
- return r
- case tokens.Comma:
- p.scan()
- if p.s.tok == end {
- return r
- }
- continue
- default:
- p.illegal(fmt.Sprintf("expected %q or %q", tokens.Comma, end))
- return nil
- }
- }
- default:
- p.illegal(fmt.Sprintf("expected %q", tokens.Colon))
- return nil
- }
- }
- return nil
- }
-}
-
-func (p *Parser) parseTermOp(values ...tokens.Token) *Term {
- for i := range values {
- if p.s.tok == values[i] {
- r := RefTerm(VarTerm(fmt.Sprint(p.s.tok)).SetLocation(p.s.Loc())).SetLocation(p.s.Loc())
- p.scan()
- return r
- }
- }
- return nil
-}
-
-func (p *Parser) parseTermOpName(ref Ref, values ...tokens.Token) *Term {
- for i := range values {
- if p.s.tok == values[i] {
- for _, r := range ref {
- r.SetLocation(p.s.Loc())
- }
- t := RefTerm(ref...)
- t.SetLocation(p.s.Loc())
- p.scan()
- return t
- }
- }
- return nil
-}
-
-func (p *Parser) parseVar() *Term {
-
- s := p.s.lit
-
- term := VarTerm(s).SetLocation(p.s.Loc())
-
- // Update wildcard values with unique identifiers
- if term.Equal(Wildcard) {
- term.Value = Var(p.genwildcard())
- }
-
- return term
-}
-
-func (p *Parser) genwildcard() string {
- c := p.s.wildcard
- p.s.wildcard++
- return fmt.Sprintf("%v%d", WildcardPrefix, c)
-}
-
-func (p *Parser) error(loc *location.Location, reason string) {
- p.errorf(loc, reason)
-}
-
-func (p *Parser) errorf(loc *location.Location, f string, a ...interface{}) {
- msg := strings.Builder{}
- msg.WriteString(fmt.Sprintf(f, a...))
-
- switch len(p.s.hints) {
- case 0: // nothing to do
- case 1:
- msg.WriteString(" (hint: ")
- msg.WriteString(p.s.hints[0])
- msg.WriteRune(')')
- default:
- msg.WriteString(" (hints: ")
- for i, h := range p.s.hints {
- if i > 0 {
- msg.WriteString(", ")
- }
- msg.WriteString(h)
- }
- msg.WriteRune(')')
- }
-
- p.s.errors = append(p.s.errors, &Error{
- Code: ParseErr,
- Message: msg.String(),
- Location: loc,
- Details: newParserErrorDetail(p.s.s.Bytes(), loc.Offset),
- })
- p.s.hints = nil
-}
-
-func (p *Parser) hint(f string, a ...interface{}) {
- p.s.hints = append(p.s.hints, fmt.Sprintf(f, a...))
-}
-
-func (p *Parser) illegal(note string, a ...interface{}) {
- tok := p.s.tok.String()
-
- if p.s.tok == tokens.Illegal {
- p.errorf(p.s.Loc(), "illegal token")
- return
- }
-
- tokType := "token"
- if tokens.IsKeyword(p.s.tok) {
- tokType = "keyword"
- }
- if _, ok := futureKeywords[p.s.tok.String()]; ok {
- tokType = "keyword"
- }
-
- note = fmt.Sprintf(note, a...)
- if len(note) > 0 {
- p.errorf(p.s.Loc(), "unexpected %s %s: %s", tok, tokType, note)
- } else {
- p.errorf(p.s.Loc(), "unexpected %s %s", tok, tokType)
- }
-}
-
-func (p *Parser) illegalToken() {
- p.illegal("")
-}
-
-func (p *Parser) scan() {
- p.doScan(true)
-}
-
-func (p *Parser) scanWS() {
- p.doScan(false)
-}
-
-func (p *Parser) doScan(skipws bool) {
-
- // NOTE(tsandall): the last position is used to compute the "text" field for
- // complex AST nodes. Whitespace never affects the last position of an AST
- // node so do not update it when scanning.
- if p.s.tok != tokens.Whitespace {
- p.s.lastEnd = p.s.tokEnd
- p.s.skippedNL = false
- }
-
- var errs []scanner.Error
- for {
- var pos scanner.Position
- p.s.tok, pos, p.s.lit, errs = p.s.s.Scan()
-
- p.s.tokEnd = pos.End
- p.s.loc.Row = pos.Row
- p.s.loc.Col = pos.Col
- p.s.loc.Offset = pos.Offset
- p.s.loc.Text = p.s.Text(pos.Offset, pos.End)
- p.s.loc.Tabs = pos.Tabs
-
- for _, err := range errs {
- p.error(p.s.Loc(), err.Message)
- }
-
- if len(errs) > 0 {
- p.s.tok = tokens.Illegal
- }
-
- if p.s.tok == tokens.Whitespace {
- if p.s.lit == "\n" {
- p.s.skippedNL = true
- }
- if skipws {
- continue
- }
- }
-
- if p.s.tok != tokens.Comment {
- break
- }
-
- // For backwards compatibility leave a nil
- // Text value if there is no text rather than
- // an empty string.
- var commentText []byte
- if len(p.s.lit) > 1 {
- commentText = []byte(p.s.lit[1:])
- }
- comment := NewComment(commentText)
- comment.SetLoc(p.s.Loc())
- p.s.comments = append(p.s.comments, comment)
- }
-}
-
-func (p *Parser) save() *state {
- cpy := *p.s
- s := *cpy.s
- cpy.s = &s
- return &cpy
-}
-
-func (p *Parser) restore(s *state) {
- p.s = s
-}
-
-func setLocRecursive(x interface{}, loc *location.Location) {
- NewGenericVisitor(func(x interface{}) bool {
- if node, ok := x.(Node); ok {
- node.SetLoc(loc)
- }
- return false
- }).Walk(x)
-}
-
-func (p *Parser) setLoc(term *Term, loc *location.Location, offset, end int) *Term {
- if term != nil {
- cpy := *loc
- term.Location = &cpy
- term.Location.Text = p.s.Text(offset, end)
- }
- return term
-}
-
-func (p *Parser) validateDefaultRuleValue(rule *Rule) bool {
- if rule.Head.Value == nil {
- p.error(rule.Loc(), "illegal default rule (must have a value)")
- return false
- }
-
- valid := true
- vis := NewGenericVisitor(func(x interface{}) bool {
- switch x.(type) {
- case *ArrayComprehension, *ObjectComprehension, *SetComprehension: // skip closures
- return true
- case Ref, Var, Call:
- p.error(rule.Loc(), fmt.Sprintf("illegal default rule (value cannot contain %v)", TypeName(x)))
- valid = false
- return true
- }
- return false
- })
-
- vis.Walk(rule.Head.Value.Value)
- return valid
-}
-
-func (p *Parser) validateDefaultRuleArgs(rule *Rule) bool {
-
- valid := true
- vars := NewVarSet()
-
- vis := NewGenericVisitor(func(x interface{}) bool {
- switch x := x.(type) {
- case Var:
- if vars.Contains(x) {
- p.error(rule.Loc(), fmt.Sprintf("illegal default rule (arguments cannot be repeated %v)", x))
- valid = false
- return true
- }
- vars.Add(x)
-
- case *Term:
- switch v := x.Value.(type) {
- case Var: // do nothing
- default:
- p.error(rule.Loc(), fmt.Sprintf("illegal default rule (arguments cannot contain %v)", TypeName(v)))
- valid = false
- return true
- }
- }
-
- return false
- })
-
- vis.Walk(rule.Head.Args)
- return valid
-}
-
-// We explicitly use yaml unmarshalling, to accommodate for the '_' in 'related_resources',
-// which isn't handled properly by json for some reason.
-type rawAnnotation struct {
- Scope string `yaml:"scope"`
- Title string `yaml:"title"`
- Entrypoint bool `yaml:"entrypoint"`
- Description string `yaml:"description"`
- Organizations []string `yaml:"organizations"`
- RelatedResources []interface{} `yaml:"related_resources"`
- Authors []interface{} `yaml:"authors"`
- Schemas []map[string]any `yaml:"schemas"`
- Custom map[string]interface{} `yaml:"custom"`
-}
-
-type metadataParser struct {
- buf *bytes.Buffer
- comments []*Comment
- loc *location.Location
-}
-
-func newMetadataParser(loc *Location) *metadataParser {
- return &metadataParser{loc: loc, buf: bytes.NewBuffer(nil)}
-}
-
-func (b *metadataParser) Append(c *Comment) {
- b.buf.Write(bytes.TrimPrefix(c.Text, []byte(" ")))
- b.buf.WriteByte('\n')
- b.comments = append(b.comments, c)
-}
-
-var yamlLineErrRegex = regexp.MustCompile(`^yaml:(?: unmarshal errors:[\n\s]*)? line ([[:digit:]]+):`)
-
-func (b *metadataParser) Parse() (*Annotations, error) {
-
- var raw rawAnnotation
-
- if len(bytes.TrimSpace(b.buf.Bytes())) == 0 {
- return nil, fmt.Errorf("expected METADATA block, found whitespace")
- }
-
- if err := yaml.Unmarshal(b.buf.Bytes(), &raw); err != nil {
- var comment *Comment
- match := yamlLineErrRegex.FindStringSubmatch(err.Error())
- if len(match) == 2 {
- index, err2 := strconv.Atoi(match[1])
- if err2 == nil {
- if index >= len(b.comments) {
- comment = b.comments[len(b.comments)-1]
- } else {
- comment = b.comments[index]
- }
- b.loc = comment.Location
- }
- }
-
- if match == nil && len(b.comments) > 0 {
- b.loc = b.comments[0].Location
- }
-
- return nil, augmentYamlError(err, b.comments)
- }
-
- var result Annotations
- result.comments = b.comments
- result.Scope = raw.Scope
- result.Entrypoint = raw.Entrypoint
- result.Title = raw.Title
- result.Description = raw.Description
- result.Organizations = raw.Organizations
-
- for _, v := range raw.RelatedResources {
- rr, err := parseRelatedResource(v)
- if err != nil {
- return nil, fmt.Errorf("invalid related-resource definition %s: %w", v, err)
- }
- result.RelatedResources = append(result.RelatedResources, rr)
- }
-
- for _, pair := range raw.Schemas {
- k, v := unwrapPair(pair)
-
- var a SchemaAnnotation
- var err error
-
- a.Path, err = ParseRef(k)
- if err != nil {
- return nil, fmt.Errorf("invalid document reference")
- }
-
- switch v := v.(type) {
- case string:
- a.Schema, err = parseSchemaRef(v)
- if err != nil {
- return nil, err
- }
- case map[string]any:
- w, err := convertYAMLMapKeyTypes(v, nil)
- if err != nil {
- return nil, fmt.Errorf("invalid schema definition: %w", err)
- }
- a.Definition = &w
- default:
- return nil, fmt.Errorf("invalid schema declaration for path %q", k)
- }
-
- result.Schemas = append(result.Schemas, &a)
- }
-
- for _, v := range raw.Authors {
- author, err := parseAuthor(v)
- if err != nil {
- return nil, fmt.Errorf("invalid author definition %s: %w", v, err)
- }
- result.Authors = append(result.Authors, author)
- }
-
- result.Custom = make(map[string]interface{})
- for k, v := range raw.Custom {
- val, err := convertYAMLMapKeyTypes(v, nil)
- if err != nil {
- return nil, err
- }
- result.Custom[k] = val
- }
-
- result.Location = b.loc
-
- // recreate original text of entire metadata block for location text attribute
- sb := strings.Builder{}
- sb.WriteString("# METADATA\n")
-
- lines := bytes.Split(b.buf.Bytes(), []byte{'\n'})
-
- for _, line := range lines[:len(lines)-1] {
- sb.WriteString("# ")
- sb.Write(line)
- sb.WriteByte('\n')
- }
-
- result.Location.Text = []byte(strings.TrimSuffix(sb.String(), "\n"))
-
- return &result, nil
-}
-
-// augmentYamlError augments a YAML error with hints intended to help the user figure out the cause of an otherwise
-// cryptic error. These are hints, instead of proper errors, because they are educated guesses, and aren't guaranteed
-// to be correct.
-func augmentYamlError(err error, comments []*Comment) error {
- // Adding hints for when key/value ':' separator isn't suffixed with a legal YAML space symbol
- for _, comment := range comments {
- txt := string(comment.Text)
- parts := strings.Split(txt, ":")
- if len(parts) > 1 {
- parts = parts[1:]
- var invalidSpaces []string
- for partIndex, part := range parts {
- if len(part) == 0 && partIndex == len(parts)-1 {
- invalidSpaces = []string{}
- break
- }
-
- r, _ := utf8.DecodeRuneInString(part)
- if r == ' ' || r == '\t' {
- invalidSpaces = []string{}
- break
- }
-
- invalidSpaces = append(invalidSpaces, fmt.Sprintf("%+q", r))
- }
- if len(invalidSpaces) > 0 {
- err = fmt.Errorf(
- "%s\n Hint: on line %d, symbol(s) %v immediately following a key/value separator ':' is not a legal yaml space character",
- err.Error(), comment.Location.Row, invalidSpaces)
- }
- }
- }
- return err
-}
-
-func unwrapPair(pair map[string]interface{}) (string, interface{}) {
- for k, v := range pair {
- return k, v
- }
- return "", nil
-}
-
-var errInvalidSchemaRef = fmt.Errorf("invalid schema reference")
-
-// NOTE(tsandall): 'schema' is not registered as a root because it's not
-// supported by the compiler or evaluator today. Once we fix that, we can remove
-// this function.
-func parseSchemaRef(s string) (Ref, error) {
-
- term, err := ParseTerm(s)
- if err == nil {
- switch v := term.Value.(type) {
- case Var:
- if term.Equal(SchemaRootDocument) {
- return SchemaRootRef.Copy(), nil
- }
- case Ref:
- if v.HasPrefix(SchemaRootRef) {
- return v, nil
- }
- }
- }
-
- return nil, errInvalidSchemaRef
-}
-
-func parseRelatedResource(rr interface{}) (*RelatedResourceAnnotation, error) {
- rr, err := convertYAMLMapKeyTypes(rr, nil)
- if err != nil {
- return nil, err
- }
-
- switch rr := rr.(type) {
- case string:
- if len(rr) > 0 {
- u, err := url.Parse(rr)
- if err != nil {
- return nil, err
- }
- return &RelatedResourceAnnotation{Ref: *u}, nil
- }
- return nil, fmt.Errorf("ref URL may not be empty string")
- case map[string]interface{}:
- description := strings.TrimSpace(getSafeString(rr, "description"))
- ref := strings.TrimSpace(getSafeString(rr, "ref"))
- if len(ref) > 0 {
- u, err := url.Parse(ref)
- if err != nil {
- return nil, err
- }
- return &RelatedResourceAnnotation{Description: description, Ref: *u}, nil
- }
- return nil, fmt.Errorf("'ref' value required in object")
- }
-
- return nil, fmt.Errorf("invalid value type, must be string or map")
-}
-
-func parseAuthor(a interface{}) (*AuthorAnnotation, error) {
- a, err := convertYAMLMapKeyTypes(a, nil)
- if err != nil {
- return nil, err
- }
-
- switch a := a.(type) {
- case string:
- return parseAuthorString(a)
- case map[string]interface{}:
- name := strings.TrimSpace(getSafeString(a, "name"))
- email := strings.TrimSpace(getSafeString(a, "email"))
- if len(name) > 0 || len(email) > 0 {
- return &AuthorAnnotation{name, email}, nil
- }
- return nil, fmt.Errorf("'name' and/or 'email' values required in object")
- }
-
- return nil, fmt.Errorf("invalid value type, must be string or map")
-}
-
-func getSafeString(m map[string]interface{}, k string) string {
- if v, found := m[k]; found {
- if s, ok := v.(string); ok {
- return s
- }
- }
- return ""
-}
-
-const emailPrefix = "<"
-const emailSuffix = ">"
-
-// parseAuthor parses a string into an AuthorAnnotation. If the last word of the input string is enclosed within <>,
-// it is extracted as the author's email. The email may not contain whitelines, as it then will be interpreted as
-// multiple words.
-func parseAuthorString(s string) (*AuthorAnnotation, error) {
- parts := strings.Fields(s)
-
- if len(parts) == 0 {
- return nil, fmt.Errorf("author is an empty string")
- }
-
- namePartCount := len(parts)
- trailing := parts[namePartCount-1]
- var email string
- if len(trailing) >= len(emailPrefix)+len(emailSuffix) && strings.HasPrefix(trailing, emailPrefix) &&
- strings.HasSuffix(trailing, emailSuffix) {
- email = trailing[len(emailPrefix):]
- email = email[0 : len(email)-len(emailSuffix)]
- namePartCount = namePartCount - 1
- }
-
- name := strings.Join(parts[0:namePartCount], " ")
-
- return &AuthorAnnotation{Name: name, Email: email}, nil
-}
-
-func convertYAMLMapKeyTypes(x any, path []string) (any, error) {
- var err error
- switch x := x.(type) {
- case map[any]any:
- result := make(map[string]any, len(x))
- for k, v := range x {
- str, ok := k.(string)
- if !ok {
- return nil, fmt.Errorf("invalid map key type(s): %v", strings.Join(path, "/"))
- }
- result[str], err = convertYAMLMapKeyTypes(v, append(path, str))
- if err != nil {
- return nil, err
- }
- }
- return result, nil
- case []any:
- for i := range x {
- x[i], err = convertYAMLMapKeyTypes(x[i], append(path, fmt.Sprintf("%d", i)))
- if err != nil {
- return nil, err
- }
- }
- return x, nil
- default:
- return x, nil
- }
-}
-
-// futureKeywords is the source of truth for future keywords that will
-// eventually become standard keywords inside of Rego.
-var futureKeywords = map[string]tokens.Token{
- "in": tokens.In,
- "every": tokens.Every,
- "contains": tokens.Contains,
- "if": tokens.If,
+ return v1.NewParser().WithRegoVersion(DefaultRegoVersion)
}
func IsFutureKeyword(s string) bool {
- _, ok := futureKeywords[s]
- return ok
-}
-
-func (p *Parser) futureImport(imp *Import, allowedFutureKeywords map[string]tokens.Token) {
- path := imp.Path.Value.(Ref)
-
- if len(path) == 1 || !path[1].Equal(StringTerm("keywords")) {
- p.errorf(imp.Path.Location, "invalid import, must be `future.keywords`")
- return
- }
-
- if imp.Alias != "" {
- p.errorf(imp.Path.Location, "`future` imports cannot be aliased")
- return
- }
-
- if p.s.s.RegoV1Compatible() {
- p.errorf(imp.Path.Location, "the `%s` import implies `future.keywords`, these are therefore mutually exclusive", RegoV1CompatibleRef)
- return
- }
-
- kwds := make([]string, 0, len(allowedFutureKeywords))
- for k := range allowedFutureKeywords {
- kwds = append(kwds, k)
- }
-
- switch len(path) {
- case 2: // all keywords imported, nothing to do
- case 3: // one keyword imported
- kw, ok := path[2].Value.(String)
- if !ok {
- p.errorf(imp.Path.Location, "invalid import, must be `future.keywords.x`, e.g. `import future.keywords.in`")
- return
- }
- keyword := string(kw)
- _, ok = allowedFutureKeywords[keyword]
- if !ok {
- sort.Strings(kwds) // so the error message is stable
- p.errorf(imp.Path.Location, "unexpected keyword, must be one of %v", kwds)
- return
- }
-
- kwds = []string{keyword} // overwrite
- }
- for _, kw := range kwds {
- p.s.s.AddKeyword(kw, allowedFutureKeywords[kw])
- }
-}
-
-func (p *Parser) regoV1Import(imp *Import) {
- if !p.po.Capabilities.ContainsFeature(FeatureRegoV1Import) {
- p.errorf(imp.Path.Location, "invalid import, `%s` is not supported by current capabilities", RegoV1CompatibleRef)
- return
- }
-
- path := imp.Path.Value.(Ref)
-
- // v1 is only valid option
- if len(path) == 1 || !path[1].Equal(RegoV1CompatibleRef[1]) || len(path) > 2 {
- p.errorf(imp.Path.Location, "invalid import `%s`, must be `%s`", path, RegoV1CompatibleRef)
- return
- }
-
- if p.po.RegoVersion == RegoV1 {
- // We're parsing for Rego v1, where the 'rego.v1' import is a no-op.
- return
- }
-
- if imp.Alias != "" {
- p.errorf(imp.Path.Location, "`rego` imports cannot be aliased")
- return
- }
-
- // import all future keywords with the rego.v1 import
- kwds := make([]string, 0, len(futureKeywords))
- for k := range futureKeywords {
- kwds = append(kwds, k)
- }
-
- if p.s.s.HasKeyword(futureKeywords) && !p.s.s.RegoV1Compatible() {
- // We have imported future keywords, but they didn't come from another `rego.v1` import.
- p.errorf(imp.Path.Location, "the `%s` import implies `future.keywords`, these are therefore mutually exclusive", RegoV1CompatibleRef)
- return
- }
-
- p.s.s.SetRegoV1Compatible()
- for _, kw := range kwds {
- p.s.s.AddKeyword(kw, futureKeywords[kw])
- }
+ return v1.IsFutureKeywordForRegoVersion(s, RegoV0)
}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go b/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go
index 83c87e47b1..2d59616932 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go
@@ -1,24 +1,14 @@
-// Copyright 2016 The OPA Authors. All rights reserved.
+// Copyright 2024 The OPA Authors. All rights reserved.
// Use of this source code is governed by an Apache2
// license that can be found in the LICENSE file.
-// This file contains extra functions for parsing Rego.
-// Most of the parsing is handled by the code in parser.go,
-// however, there are additional utilities that are
-// helpful for dealing with Rego source inputs (e.g., REPL
-// statements, source files, etc.)
-
package ast
import (
- "bytes"
"errors"
"fmt"
- "strings"
- "unicode"
- "github.com/open-policy-agent/opa/ast/internal/tokens"
- astJSON "github.com/open-policy-agent/opa/ast/json"
+ v1 "github.com/open-policy-agent/opa/v1/ast"
)
// MustParseBody returns a parsed body.
@@ -30,11 +20,7 @@ func MustParseBody(input string) Body {
// MustParseBodyWithOpts returns a parsed body.
// If an error occurs during parsing, panic.
func MustParseBodyWithOpts(input string, opts ParserOptions) Body {
- parsed, err := ParseBodyWithOpts(input, opts)
- if err != nil {
- panic(err)
- }
- return parsed
+ return v1.MustParseBodyWithOpts(input, setDefaultRegoVersion(opts))
}
// MustParseExpr returns a parsed expression.
@@ -66,11 +52,7 @@ func MustParseModule(input string) *Module {
// MustParseModuleWithOpts returns a parsed module.
// If an error occurs during parsing, panic.
func MustParseModuleWithOpts(input string, opts ParserOptions) *Module {
- parsed, err := ParseModuleWithOpts("", input, opts)
- if err != nil {
- panic(err)
- }
- return parsed
+ return v1.MustParseModuleWithOpts(input, setDefaultRegoVersion(opts))
}
// MustParsePackage returns a Package.
@@ -104,11 +86,7 @@ func MustParseStatement(input string) Statement {
}
func MustParseStatementWithOpts(input string, popts ParserOptions) Statement {
- parsed, err := ParseStatementWithOpts(input, popts)
- if err != nil {
- panic(err)
- }
- return parsed
+ return v1.MustParseStatementWithOpts(input, setDefaultRegoVersion(popts))
}
// MustParseRef returns a parsed reference.
@@ -134,11 +112,7 @@ func MustParseRule(input string) *Rule {
// MustParseRuleWithOpts returns a parsed rule.
// If an error occurs during parsing, panic.
func MustParseRuleWithOpts(input string, opts ParserOptions) *Rule {
- parsed, err := ParseRuleWithOpts(input, opts)
- if err != nil {
- panic(err)
- }
- return parsed
+ return v1.MustParseRuleWithOpts(input, setDefaultRegoVersion(opts))
}
// MustParseTerm returns a parsed term.
@@ -154,331 +128,59 @@ func MustParseTerm(input string) *Term {
// ParseRuleFromBody returns a rule if the body can be interpreted as a rule
// definition. Otherwise, an error is returned.
func ParseRuleFromBody(module *Module, body Body) (*Rule, error) {
-
- if len(body) != 1 {
- return nil, fmt.Errorf("multiple expressions cannot be used for rule head")
- }
-
- return ParseRuleFromExpr(module, body[0])
+ return v1.ParseRuleFromBody(module, body)
}
// ParseRuleFromExpr returns a rule if the expression can be interpreted as a
// rule definition.
func ParseRuleFromExpr(module *Module, expr *Expr) (*Rule, error) {
-
- if len(expr.With) > 0 {
- return nil, fmt.Errorf("expressions using with keyword cannot be used for rule head")
- }
-
- if expr.Negated {
- return nil, fmt.Errorf("negated expressions cannot be used for rule head")
- }
-
- if _, ok := expr.Terms.(*SomeDecl); ok {
- return nil, errors.New("'some' declarations cannot be used for rule head")
- }
-
- if term, ok := expr.Terms.(*Term); ok {
- switch v := term.Value.(type) {
- case Ref:
- if len(v) > 2 { // 2+ dots
- return ParseCompleteDocRuleWithDotsFromTerm(module, term)
- }
- return ParsePartialSetDocRuleFromTerm(module, term)
- default:
- return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(v))
- }
- }
-
- if _, ok := expr.Terms.([]*Term); !ok {
- // This is a defensive check in case other kinds of expression terms are
- // introduced in the future.
- return nil, errors.New("expression cannot be used for rule head")
- }
-
- if expr.IsEquality() {
- return parseCompleteRuleFromEq(module, expr)
- } else if expr.IsAssignment() {
- rule, err := parseCompleteRuleFromEq(module, expr)
- if err != nil {
- return nil, err
- }
- rule.Head.Assign = true
- return rule, nil
- }
-
- if _, ok := BuiltinMap[expr.Operator().String()]; ok {
- return nil, fmt.Errorf("rule name conflicts with built-in function")
- }
-
- return ParseRuleFromCallExpr(module, expr.Terms.([]*Term))
-}
-
-func parseCompleteRuleFromEq(module *Module, expr *Expr) (rule *Rule, err error) {
-
- // ensure the rule location is set to the expr location
- // the helper functions called below try to set the location based
- // on the terms they've been provided but that is not as accurate.
- defer func() {
- if rule != nil {
- rule.Location = expr.Location
- rule.Head.Location = expr.Location
- }
- }()
-
- lhs, rhs := expr.Operand(0), expr.Operand(1)
- if lhs == nil || rhs == nil {
- return nil, errors.New("assignment requires two operands")
- }
-
- rule, err = ParseRuleFromCallEqExpr(module, lhs, rhs)
- if err == nil {
- return rule, nil
- }
-
- rule, err = ParsePartialObjectDocRuleFromEqExpr(module, lhs, rhs)
- if err == nil {
- return rule, nil
- }
-
- return ParseCompleteDocRuleFromEqExpr(module, lhs, rhs)
+ return v1.ParseRuleFromExpr(module, expr)
}
// ParseCompleteDocRuleFromAssignmentExpr returns a rule if the expression can
// be interpreted as a complete document definition declared with the assignment
// operator.
func ParseCompleteDocRuleFromAssignmentExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
-
- rule, err := ParseCompleteDocRuleFromEqExpr(module, lhs, rhs)
- if err != nil {
- return nil, err
- }
-
- rule.Head.Assign = true
-
- return rule, nil
+ return v1.ParseCompleteDocRuleFromAssignmentExpr(module, lhs, rhs)
}
// ParseCompleteDocRuleFromEqExpr returns a rule if the expression can be
// interpreted as a complete document definition.
func ParseCompleteDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
- var head *Head
-
- if v, ok := lhs.Value.(Var); ok {
- // Modify the code to add the location to the head ref
- // and set the head ref's jsonOptions.
- head = VarHead(v, lhs.Location, &lhs.jsonOptions)
- } else if r, ok := lhs.Value.(Ref); ok { // groundness ?
- if _, ok := r[0].Value.(Var); !ok {
- return nil, fmt.Errorf("invalid rule head: %v", r)
- }
- head = RefHead(r)
- if len(r) > 1 && !r[len(r)-1].IsGround() {
- return nil, fmt.Errorf("ref not ground")
- }
- } else {
- return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(lhs.Value))
- }
- head.Value = rhs
- head.Location = lhs.Location
- head.setJSONOptions(lhs.jsonOptions)
-
- body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location))
- setJSONOptions(body, &rhs.jsonOptions)
-
- return &Rule{
- Location: lhs.Location,
- Head: head,
- Body: body,
- Module: module,
- jsonOptions: lhs.jsonOptions,
- generatedBody: true,
- }, nil
+ return v1.ParseCompleteDocRuleFromEqExpr(module, lhs, rhs)
}
func ParseCompleteDocRuleWithDotsFromTerm(module *Module, term *Term) (*Rule, error) {
- ref, ok := term.Value.(Ref)
- if !ok {
- return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(term.Value))
- }
-
- if _, ok := ref[0].Value.(Var); !ok {
- return nil, fmt.Errorf("invalid rule head: %v", ref)
- }
- head := RefHead(ref, BooleanTerm(true).SetLocation(term.Location))
- head.generatedValue = true
- head.Location = term.Location
- head.jsonOptions = term.jsonOptions
-
- body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location))
- setJSONOptions(body, &term.jsonOptions)
-
- return &Rule{
- Location: term.Location,
- Head: head,
- Body: body,
- Module: module,
-
- jsonOptions: term.jsonOptions,
- }, nil
+ return v1.ParseCompleteDocRuleWithDotsFromTerm(module, term)
}
// ParsePartialObjectDocRuleFromEqExpr returns a rule if the expression can be
// interpreted as a partial object document definition.
func ParsePartialObjectDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
- ref, ok := lhs.Value.(Ref)
- if !ok {
- return nil, fmt.Errorf("%v cannot be used as rule name", TypeName(lhs.Value))
- }
-
- if _, ok := ref[0].Value.(Var); !ok {
- return nil, fmt.Errorf("invalid rule head: %v", ref)
- }
-
- head := RefHead(ref, rhs)
- if len(ref) == 2 { // backcompat for naked `foo.bar = "baz"` statements
- head.Name = ref[0].Value.(Var)
- head.Key = ref[1]
- }
- head.Location = rhs.Location
- head.jsonOptions = rhs.jsonOptions
-
- body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location))
- setJSONOptions(body, &rhs.jsonOptions)
-
- rule := &Rule{
- Location: rhs.Location,
- Head: head,
- Body: body,
- Module: module,
- jsonOptions: rhs.jsonOptions,
- }
-
- return rule, nil
+ return v1.ParsePartialObjectDocRuleFromEqExpr(module, lhs, rhs)
}
// ParsePartialSetDocRuleFromTerm returns a rule if the term can be interpreted
// as a partial set document definition.
func ParsePartialSetDocRuleFromTerm(module *Module, term *Term) (*Rule, error) {
-
- ref, ok := term.Value.(Ref)
- if !ok || len(ref) == 1 {
- return nil, fmt.Errorf("%vs cannot be used for rule head", TypeName(term.Value))
- }
- if _, ok := ref[0].Value.(Var); !ok {
- return nil, fmt.Errorf("invalid rule head: %v", ref)
- }
-
- head := RefHead(ref)
- if len(ref) == 2 {
- v, ok := ref[0].Value.(Var)
- if !ok {
- return nil, fmt.Errorf("%vs cannot be used for rule head", TypeName(term.Value))
- }
- // Modify the code to add the location to the head ref
- // and set the head ref's jsonOptions.
- head = VarHead(v, ref[0].Location, &ref[0].jsonOptions)
- head.Key = ref[1]
- }
- head.Location = term.Location
- head.jsonOptions = term.jsonOptions
-
- body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location))
- setJSONOptions(body, &term.jsonOptions)
-
- rule := &Rule{
- Location: term.Location,
- Head: head,
- Body: body,
- Module: module,
- jsonOptions: term.jsonOptions,
- }
-
- return rule, nil
+ return v1.ParsePartialSetDocRuleFromTerm(module, term)
}
// ParseRuleFromCallEqExpr returns a rule if the term can be interpreted as a
// function definition (e.g., f(x) = y => f(x) = y { true }).
func ParseRuleFromCallEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
-
- call, ok := lhs.Value.(Call)
- if !ok {
- return nil, fmt.Errorf("must be call")
- }
-
- ref, ok := call[0].Value.(Ref)
- if !ok {
- return nil, fmt.Errorf("%vs cannot be used in function signature", TypeName(call[0].Value))
- }
- if _, ok := ref[0].Value.(Var); !ok {
- return nil, fmt.Errorf("invalid rule head: %v", ref)
- }
-
- head := RefHead(ref, rhs)
- head.Location = lhs.Location
- head.Args = Args(call[1:])
- head.jsonOptions = lhs.jsonOptions
-
- body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location))
- setJSONOptions(body, &rhs.jsonOptions)
-
- rule := &Rule{
- Location: lhs.Location,
- Head: head,
- Body: body,
- Module: module,
- jsonOptions: lhs.jsonOptions,
- }
-
- return rule, nil
+ return v1.ParseRuleFromCallEqExpr(module, lhs, rhs)
}
// ParseRuleFromCallExpr returns a rule if the terms can be interpreted as a
// function returning true or some value (e.g., f(x) => f(x) = true { true }).
func ParseRuleFromCallExpr(module *Module, terms []*Term) (*Rule, error) {
-
- if len(terms) <= 1 {
- return nil, fmt.Errorf("rule argument list must take at least one argument")
- }
-
- loc := terms[0].Location
- ref := terms[0].Value.(Ref)
- if _, ok := ref[0].Value.(Var); !ok {
- return nil, fmt.Errorf("invalid rule head: %v", ref)
- }
- head := RefHead(ref, BooleanTerm(true).SetLocation(loc))
- head.Location = loc
- head.Args = terms[1:]
- head.jsonOptions = terms[0].jsonOptions
-
- body := NewBody(NewExpr(BooleanTerm(true).SetLocation(loc)).SetLocation(loc))
- setJSONOptions(body, &terms[0].jsonOptions)
-
- rule := &Rule{
- Location: loc,
- Head: head,
- Module: module,
- Body: body,
- jsonOptions: terms[0].jsonOptions,
- }
- return rule, nil
+ return v1.ParseRuleFromCallExpr(module, terms)
}
// ParseImports returns a slice of Import objects.
func ParseImports(input string) ([]*Import, error) {
- stmts, _, err := ParseStatements("", input)
- if err != nil {
- return nil, err
- }
- result := []*Import{}
- for _, stmt := range stmts {
- if imp, ok := stmt.(*Import); ok {
- result = append(result, imp)
- } else {
- return nil, fmt.Errorf("expected import but got %T", stmt)
- }
- }
- return result, nil
+ return v1.ParseImports(input)
}
// ParseModule returns a parsed Module object.
@@ -492,11 +194,7 @@ func ParseModule(filename, input string) (*Module, error) {
// For details on Module objects and their fields, see policy.go.
// Empty input will return nil, nil.
func ParseModuleWithOpts(filename, input string, popts ParserOptions) (*Module, error) {
- stmts, comments, err := ParseStatementsWithOpts(filename, input, popts)
- if err != nil {
- return nil, err
- }
- return parseModule(filename, stmts, comments, popts.RegoVersion)
+ return v1.ParseModuleWithOpts(filename, input, setDefaultRegoVersion(popts))
}
// ParseBody returns exactly one body.
@@ -508,28 +206,7 @@ func ParseBody(input string) (Body, error) {
// ParseBodyWithOpts returns exactly one body. It does _not_ set SkipRules: true on its own,
// but respects whatever ParserOptions it's been given.
func ParseBodyWithOpts(input string, popts ParserOptions) (Body, error) {
-
- stmts, _, err := ParseStatementsWithOpts("", input, popts)
- if err != nil {
- return nil, err
- }
-
- result := Body{}
-
- for _, stmt := range stmts {
- switch stmt := stmt.(type) {
- case Body:
- for i := range stmt {
- result.Append(stmt[i])
- }
- case *Comment:
- // skip
- default:
- return nil, fmt.Errorf("expected body but got %T", stmt)
- }
- }
-
- return result, nil
+ return v1.ParseBodyWithOpts(input, setDefaultRegoVersion(popts))
}
// ParseExpr returns exactly one expression.
@@ -548,15 +225,7 @@ func ParseExpr(input string) (*Expr, error) {
// ParsePackage returns exactly one Package.
// If multiple statements are parsed, an error is returned.
func ParsePackage(input string) (*Package, error) {
- stmt, err := ParseStatement(input)
- if err != nil {
- return nil, err
- }
- pkg, ok := stmt.(*Package)
- if !ok {
- return nil, fmt.Errorf("expected package but got %T", stmt)
- }
- return pkg, nil
+ return v1.ParsePackage(input)
}
// ParseTerm returns exactly one term.
@@ -592,18 +261,7 @@ func ParseRef(input string) (Ref, error) {
// ParseRuleWithOpts returns exactly one rule.
// If multiple rules are parsed, an error is returned.
func ParseRuleWithOpts(input string, opts ParserOptions) (*Rule, error) {
- stmts, _, err := ParseStatementsWithOpts("", input, opts)
- if err != nil {
- return nil, err
- }
- if len(stmts) != 1 {
- return nil, fmt.Errorf("expected exactly one statement (rule), got %v = %T, %T", stmts, stmts[0], stmts[1])
- }
- rule, ok := stmts[0].(*Rule)
- if !ok {
- return nil, fmt.Errorf("expected rule but got %T", stmts[0])
- }
- return rule, nil
+ return v1.ParseRuleWithOpts(input, setDefaultRegoVersion(opts))
}
// ParseRule returns exactly one rule.
@@ -622,20 +280,13 @@ func ParseStatement(input string) (Statement, error) {
return nil, err
}
if len(stmts) != 1 {
- return nil, fmt.Errorf("expected exactly one statement")
+ return nil, errors.New("expected exactly one statement")
}
return stmts[0], nil
}
func ParseStatementWithOpts(input string, popts ParserOptions) (Statement, error) {
- stmts, _, err := ParseStatementsWithOpts("", input, popts)
- if err != nil {
- return nil, err
- }
- if len(stmts) != 1 {
- return nil, fmt.Errorf("expected exactly one statement")
- }
- return stmts[0], nil
+ return v1.ParseStatementWithOpts(input, setDefaultRegoVersion(popts))
}
// ParseStatements is deprecated. Use ParseStatementWithOpts instead.
@@ -646,204 +297,15 @@ func ParseStatements(filename, input string) ([]Statement, []*Comment, error) {
// ParseStatementsWithOpts returns a slice of parsed statements. This is the
// default return value from the parser.
func ParseStatementsWithOpts(filename, input string, popts ParserOptions) ([]Statement, []*Comment, error) {
-
- parser := NewParser().
- WithFilename(filename).
- WithReader(bytes.NewBufferString(input)).
- WithProcessAnnotation(popts.ProcessAnnotation).
- WithFutureKeywords(popts.FutureKeywords...).
- WithAllFutureKeywords(popts.AllFutureKeywords).
- WithCapabilities(popts.Capabilities).
- WithSkipRules(popts.SkipRules).
- WithJSONOptions(popts.JSONOptions).
- WithRegoVersion(popts.RegoVersion).
- withUnreleasedKeywords(popts.unreleasedKeywords)
-
- stmts, comments, errs := parser.Parse()
-
- if len(errs) > 0 {
- return nil, nil, errs
- }
-
- return stmts, comments, nil
-}
-
-func parseModule(filename string, stmts []Statement, comments []*Comment, regoCompatibilityMode RegoVersion) (*Module, error) {
-
- if len(stmts) == 0 {
- return nil, NewError(ParseErr, &Location{File: filename}, "empty module")
- }
-
- var errs Errors
-
- pkg, ok := stmts[0].(*Package)
- if !ok {
- loc := stmts[0].Loc()
- errs = append(errs, NewError(ParseErr, loc, "package expected"))
- }
-
- mod := &Module{
- Package: pkg,
- stmts: stmts,
- }
-
- // The comments slice only holds comments that were not their own statements.
- mod.Comments = append(mod.Comments, comments...)
- mod.regoVersion = regoCompatibilityMode
-
- for i, stmt := range stmts[1:] {
- switch stmt := stmt.(type) {
- case *Import:
- mod.Imports = append(mod.Imports, stmt)
- if mod.regoVersion == RegoV0 && Compare(stmt.Path.Value, RegoV1CompatibleRef) == 0 {
- mod.regoVersion = RegoV0CompatV1
- }
- case *Rule:
- setRuleModule(stmt, mod)
- mod.Rules = append(mod.Rules, stmt)
- case Body:
- rule, err := ParseRuleFromBody(mod, stmt)
- if err != nil {
- errs = append(errs, NewError(ParseErr, stmt[0].Location, err.Error()))
- continue
- }
- rule.generatedBody = true
- mod.Rules = append(mod.Rules, rule)
-
- // NOTE(tsandall): the statement should now be interpreted as a
- // rule so update the statement list. This is important for the
- // logic below that associates annotations with statements.
- stmts[i+1] = rule
- case *Package:
- errs = append(errs, NewError(ParseErr, stmt.Loc(), "unexpected package"))
- case *Annotations:
- mod.Annotations = append(mod.Annotations, stmt)
- case *Comment:
- // Ignore comments, they're handled above.
- default:
- panic("illegal value") // Indicates grammar is out-of-sync with code.
- }
- }
-
- if mod.regoVersion == RegoV0CompatV1 || mod.regoVersion == RegoV1 {
- for _, rule := range mod.Rules {
- for r := rule; r != nil; r = r.Else {
- errs = append(errs, CheckRegoV1(r)...)
- }
- }
- }
-
- if len(errs) > 0 {
- return nil, errs
- }
-
- errs = append(errs, attachAnnotationsNodes(mod)...)
-
- if len(errs) > 0 {
- return nil, errs
- }
-
- attachRuleAnnotations(mod)
-
- return mod, nil
-}
-
-func ruleDeclarationHasKeyword(rule *Rule, keyword tokens.Token) bool {
- for _, kw := range rule.Head.keywords {
- if kw == keyword {
- return true
- }
- }
- return false
-}
-
-func newScopeAttachmentErr(a *Annotations, want string) *Error {
- var have string
- if a.node != nil {
- have = fmt.Sprintf(" (have %v)", TypeName(a.node))
- }
- return NewError(ParseErr, a.Loc(), "annotation scope '%v' must be applied to %v%v", a.Scope, want, have)
-}
-
-func setRuleModule(rule *Rule, module *Module) {
- rule.Module = module
- if rule.Else != nil {
- setRuleModule(rule.Else, module)
- }
-}
-
-func setJSONOptions(x interface{}, jsonOptions *astJSON.Options) {
- vis := NewGenericVisitor(func(x interface{}) bool {
- if x, ok := x.(customJSON); ok {
- x.setJSONOptions(*jsonOptions)
- }
- return false
- })
- vis.Walk(x)
+ return v1.ParseStatementsWithOpts(filename, input, setDefaultRegoVersion(popts))
}
// ParserErrorDetail holds additional details for parser errors.
-type ParserErrorDetail struct {
- Line string `json:"line"`
- Idx int `json:"idx"`
-}
-
-func newParserErrorDetail(bs []byte, offset int) *ParserErrorDetail {
-
- // Find first non-space character at or before offset position.
- if offset >= len(bs) {
- offset = len(bs) - 1
- } else if offset < 0 {
- offset = 0
- }
-
- for offset > 0 && unicode.IsSpace(rune(bs[offset])) {
- offset--
- }
-
- // Find beginning of line containing offset.
- begin := offset
-
- for begin > 0 && !isNewLineChar(bs[begin]) {
- begin--
- }
+type ParserErrorDetail = v1.ParserErrorDetail
- if isNewLineChar(bs[begin]) {
- begin++
+func setDefaultRegoVersion(opts ParserOptions) ParserOptions {
+ if opts.RegoVersion == RegoUndefined {
+ opts.RegoVersion = DefaultRegoVersion
}
-
- // Find end of line containing offset.
- end := offset
-
- for end < len(bs) && !isNewLineChar(bs[end]) {
- end++
- }
-
- if begin > end {
- begin = end
- }
-
- // Extract line and compute index of offset byte in line.
- line := bs[begin:end]
- index := offset - begin
-
- return &ParserErrorDetail{
- Line: string(line),
- Idx: index,
- }
-}
-
-// Lines returns the pretty formatted line output for the error details.
-func (d ParserErrorDetail) Lines() []string {
- line := strings.TrimLeft(d.Line, "\t") // remove leading tabs
- tabCount := len(d.Line) - len(line)
- indent := d.Idx - tabCount
- if indent < 0 {
- indent = 0
- }
- return []string{line, strings.Repeat(" ", indent) + "^"}
-}
-
-func isNewLineChar(b byte) bool {
- return b == '\r' || b == '\n'
+ return opts
}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/policy.go b/vendor/github.com/open-policy-agent/opa/ast/policy.go
index 43e9bba4a3..5055e8f23f 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/policy.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/policy.go
@@ -1,196 +1,113 @@
-// Copyright 2016 The OPA Authors. All rights reserved.
+// Copyright 2024 The OPA Authors. All rights reserved.
// Use of this source code is governed by an Apache2
// license that can be found in the LICENSE file.
package ast
import (
- "bytes"
- "encoding/json"
- "fmt"
- "math/rand"
- "strings"
- "time"
-
- "github.com/open-policy-agent/opa/ast/internal/tokens"
astJSON "github.com/open-policy-agent/opa/ast/json"
- "github.com/open-policy-agent/opa/util"
+ v1 "github.com/open-policy-agent/opa/v1/ast"
)
-// Initialize seed for term hashing. This is intentionally placed before the
-// root document sets are constructed to ensure they use the same hash seed as
-// subsequent lookups. If the hash seeds are out of sync, lookups will fail.
-var hashSeed = rand.New(rand.NewSource(time.Now().UnixNano()))
-var hashSeed0 = (uint64(hashSeed.Uint32()) << 32) | uint64(hashSeed.Uint32())
-
// DefaultRootDocument is the default root document.
//
// All package directives inside source files are implicitly prefixed with the
// DefaultRootDocument value.
-var DefaultRootDocument = VarTerm("data")
+var DefaultRootDocument = v1.DefaultRootDocument
// InputRootDocument names the document containing query arguments.
-var InputRootDocument = VarTerm("input")
+var InputRootDocument = v1.InputRootDocument
// SchemaRootDocument names the document containing external data schemas.
-var SchemaRootDocument = VarTerm("schema")
+var SchemaRootDocument = v1.SchemaRootDocument
// FunctionArgRootDocument names the document containing function arguments.
// It's only for internal usage, for referencing function arguments between
// the index and topdown.
-var FunctionArgRootDocument = VarTerm("args")
+var FunctionArgRootDocument = v1.FunctionArgRootDocument
// FutureRootDocument names the document containing new, to-become-default,
// features.
-var FutureRootDocument = VarTerm("future")
+var FutureRootDocument = v1.FutureRootDocument
// RegoRootDocument names the document containing new, to-become-default,
// features in a future versioned release.
-var RegoRootDocument = VarTerm("rego")
+var RegoRootDocument = v1.RegoRootDocument
// RootDocumentNames contains the names of top-level documents that can be
// referred to in modules and queries.
//
// Note, the schema document is not currently implemented in the evaluator so it
// is not registered as a root document name (yet).
-var RootDocumentNames = NewSet(
- DefaultRootDocument,
- InputRootDocument,
-)
+var RootDocumentNames = v1.RootDocumentNames
// DefaultRootRef is a reference to the root of the default document.
//
// All refs to data in the policy engine's storage layer are prefixed with this ref.
-var DefaultRootRef = Ref{DefaultRootDocument}
+var DefaultRootRef = v1.DefaultRootRef
// InputRootRef is a reference to the root of the input document.
//
// All refs to query arguments are prefixed with this ref.
-var InputRootRef = Ref{InputRootDocument}
+var InputRootRef = v1.InputRootRef
// SchemaRootRef is a reference to the root of the schema document.
//
// All refs to schema documents are prefixed with this ref. Note, the schema
// document is not currently implemented in the evaluator so it is not
// registered as a root document ref (yet).
-var SchemaRootRef = Ref{SchemaRootDocument}
+var SchemaRootRef = v1.SchemaRootRef
// RootDocumentRefs contains the prefixes of top-level documents that all
// non-local references start with.
-var RootDocumentRefs = NewSet(
- NewTerm(DefaultRootRef),
- NewTerm(InputRootRef),
-)
+var RootDocumentRefs = v1.RootDocumentRefs
// SystemDocumentKey is the name of the top-level key that identifies the system
// document.
-var SystemDocumentKey = String("system")
+const SystemDocumentKey = v1.SystemDocumentKey
// ReservedVars is the set of names that refer to implicitly ground vars.
-var ReservedVars = NewVarSet(
- DefaultRootDocument.Value.(Var),
- InputRootDocument.Value.(Var),
-)
+var ReservedVars = v1.ReservedVars
// Wildcard represents the wildcard variable as defined in the language.
-var Wildcard = &Term{Value: Var("_")}
+var Wildcard = v1.Wildcard
// WildcardPrefix is the special character that all wildcard variables are
// prefixed with when the statement they are contained in is parsed.
-var WildcardPrefix = "$"
+const WildcardPrefix = v1.WildcardPrefix
// Keywords contains strings that map to language keywords.
-var Keywords = KeywordsForRegoVersion(DefaultRegoVersion)
+var Keywords = v1.Keywords
-var KeywordsV0 = [...]string{
- "not",
- "package",
- "import",
- "as",
- "default",
- "else",
- "with",
- "null",
- "true",
- "false",
- "some",
-}
+var KeywordsV0 = v1.KeywordsV0
-var KeywordsV1 = [...]string{
- "not",
- "package",
- "import",
- "as",
- "default",
- "else",
- "with",
- "null",
- "true",
- "false",
- "some",
- "if",
- "contains",
- "in",
- "every",
-}
+var KeywordsV1 = v1.KeywordsV1
func KeywordsForRegoVersion(v RegoVersion) []string {
- switch v {
- case RegoV0:
- return KeywordsV0[:]
- case RegoV1, RegoV0CompatV1:
- return KeywordsV1[:]
- }
- return nil
+ return v1.KeywordsForRegoVersion(v)
}
// IsKeyword returns true if s is a language keyword.
func IsKeyword(s string) bool {
- return IsInKeywords(s, Keywords)
+ return v1.IsKeyword(s)
}
func IsInKeywords(s string, keywords []string) bool {
- for _, x := range keywords {
- if x == s {
- return true
- }
- }
- return false
+ return v1.IsInKeywords(s, keywords)
}
// IsKeywordInRegoVersion returns true if s is a language keyword.
func IsKeywordInRegoVersion(s string, regoVersion RegoVersion) bool {
- switch regoVersion {
- case RegoV0:
- for _, x := range KeywordsV0 {
- if x == s {
- return true
- }
- }
- case RegoV1, RegoV0CompatV1:
- for _, x := range KeywordsV1 {
- if x == s {
- return true
- }
- }
- }
-
- return false
+ return v1.IsKeywordInRegoVersion(s, regoVersion)
}
type (
// Node represents a node in an AST. Nodes may be statements in a policy module
// or elements of an ad-hoc query, expression, etc.
- Node interface {
- fmt.Stringer
- Loc() *Location
- SetLoc(*Location)
- }
+ Node = v1.Node
// Statement represents a single statement in a policy module.
- Statement interface {
- Node
- }
+ Statement = v1.Statement
)
type (
@@ -198,1894 +115,121 @@ type (
// Module represents a collection of policies (defined by rules)
// within a namespace (defined by the package) and optional
// dependencies on external documents (defined by imports).
- Module struct {
- Package *Package `json:"package"`
- Imports []*Import `json:"imports,omitempty"`
- Annotations []*Annotations `json:"annotations,omitempty"`
- Rules []*Rule `json:"rules,omitempty"`
- Comments []*Comment `json:"comments,omitempty"`
- stmts []Statement
- regoVersion RegoVersion
- }
+ Module = v1.Module
// Comment contains the raw text from the comment in the definition.
- Comment struct {
- // TODO: these fields have inconsistent JSON keys with other structs in this package.
- Text []byte
- Location *Location
-
- jsonOptions astJSON.Options
- }
+ Comment = v1.Comment
// Package represents the namespace of the documents produced
// by rules inside the module.
- Package struct {
- Path Ref `json:"path"`
- Location *Location `json:"location,omitempty"`
-
- jsonOptions astJSON.Options
- }
+ Package = v1.Package
// Import represents a dependency on a document outside of the policy
// namespace. Imports are optional.
- Import struct {
- Path *Term `json:"path"`
- Alias Var `json:"alias,omitempty"`
- Location *Location `json:"location,omitempty"`
-
- jsonOptions astJSON.Options
- }
+ Import = v1.Import
// Rule represents a rule as defined in the language. Rules define the
// content of documents that represent policy decisions.
- Rule struct {
- Default bool `json:"default,omitempty"`
- Head *Head `json:"head"`
- Body Body `json:"body"`
- Else *Rule `json:"else,omitempty"`
- Location *Location `json:"location,omitempty"`
- Annotations []*Annotations `json:"annotations,omitempty"`
-
- // Module is a pointer to the module containing this rule. If the rule
- // was NOT created while parsing/constructing a module, this should be
- // left unset. The pointer is not included in any standard operations
- // on the rule (e.g., printing, comparison, visiting, etc.)
- Module *Module `json:"-"`
-
- generatedBody bool
- jsonOptions astJSON.Options
- }
+ Rule = v1.Rule
// Head represents the head of a rule.
- Head struct {
- Name Var `json:"name,omitempty"`
- Reference Ref `json:"ref,omitempty"`
- Args Args `json:"args,omitempty"`
- Key *Term `json:"key,omitempty"`
- Value *Term `json:"value,omitempty"`
- Assign bool `json:"assign,omitempty"`
- Location *Location `json:"location,omitempty"`
-
- keywords []tokens.Token
- generatedValue bool
- jsonOptions astJSON.Options
- }
+ Head = v1.Head
// Args represents zero or more arguments to a rule.
- Args []*Term
+ Args = v1.Args
// Body represents one or more expressions contained inside a rule or user
// function.
- Body []*Expr
+ Body = v1.Body
// Expr represents a single expression contained inside the body of a rule.
- Expr struct {
- With []*With `json:"with,omitempty"`
- Terms interface{} `json:"terms"`
- Index int `json:"index"`
- Generated bool `json:"generated,omitempty"`
- Negated bool `json:"negated,omitempty"`
- Location *Location `json:"location,omitempty"`
-
- jsonOptions astJSON.Options
- generatedFrom *Expr
- generates []*Expr
- }
+ Expr = v1.Expr
// SomeDecl represents a variable declaration statement. The symbols are variables.
- SomeDecl struct {
- Symbols []*Term `json:"symbols"`
- Location *Location `json:"location,omitempty"`
+ SomeDecl = v1.SomeDecl
- jsonOptions astJSON.Options
- }
-
- Every struct {
- Key *Term `json:"key"`
- Value *Term `json:"value"`
- Domain *Term `json:"domain"`
- Body Body `json:"body"`
- Location *Location `json:"location,omitempty"`
-
- jsonOptions astJSON.Options
- }
+ Every = v1.Every
// With represents a modifier on an expression.
- With struct {
- Target *Term `json:"target"`
- Value *Term `json:"value"`
- Location *Location `json:"location,omitempty"`
-
- jsonOptions astJSON.Options
- }
+ With = v1.With
)
-// Compare returns an integer indicating whether mod is less than, equal to,
-// or greater than other.
-func (mod *Module) Compare(other *Module) int {
- if mod == nil {
- if other == nil {
- return 0
- }
- return -1
- } else if other == nil {
- return 1
- }
- if cmp := mod.Package.Compare(other.Package); cmp != 0 {
- return cmp
- }
- if cmp := importsCompare(mod.Imports, other.Imports); cmp != 0 {
- return cmp
- }
- if cmp := annotationsCompare(mod.Annotations, other.Annotations); cmp != 0 {
- return cmp
- }
- return rulesCompare(mod.Rules, other.Rules)
-}
-
-// Copy returns a deep copy of mod.
-func (mod *Module) Copy() *Module {
- cpy := *mod
- cpy.Rules = make([]*Rule, len(mod.Rules))
-
- nodes := make(map[Node]Node, len(mod.Rules)+len(mod.Imports)+1 /* package */)
-
- for i := range mod.Rules {
- cpy.Rules[i] = mod.Rules[i].Copy()
- cpy.Rules[i].Module = &cpy
- nodes[mod.Rules[i]] = cpy.Rules[i]
- }
-
- cpy.Imports = make([]*Import, len(mod.Imports))
- for i := range mod.Imports {
- cpy.Imports[i] = mod.Imports[i].Copy()
- nodes[mod.Imports[i]] = cpy.Imports[i]
- }
-
- cpy.Package = mod.Package.Copy()
- nodes[mod.Package] = cpy.Package
-
- cpy.Annotations = make([]*Annotations, len(mod.Annotations))
- for i, a := range mod.Annotations {
- cpy.Annotations[i] = a.Copy(nodes[a.node])
- }
-
- cpy.Comments = make([]*Comment, len(mod.Comments))
- for i := range mod.Comments {
- cpy.Comments[i] = mod.Comments[i].Copy()
- }
-
- cpy.stmts = make([]Statement, len(mod.stmts))
- for i := range mod.stmts {
- cpy.stmts[i] = nodes[mod.stmts[i]]
- }
-
- return &cpy
-}
-
-// Equal returns true if mod equals other.
-func (mod *Module) Equal(other *Module) bool {
- return mod.Compare(other) == 0
-}
-
-func (mod *Module) String() string {
- byNode := map[Node][]*Annotations{}
- for _, a := range mod.Annotations {
- byNode[a.node] = append(byNode[a.node], a)
- }
-
- appendAnnotationStrings := func(buf []string, node Node) []string {
- if as, ok := byNode[node]; ok {
- for i := range as {
- buf = append(buf, "# METADATA")
- buf = append(buf, "# "+as[i].String())
- }
- }
- return buf
- }
-
- buf := []string{}
- buf = appendAnnotationStrings(buf, mod.Package)
- buf = append(buf, mod.Package.String())
-
- if len(mod.Imports) > 0 {
- buf = append(buf, "")
- for _, imp := range mod.Imports {
- buf = appendAnnotationStrings(buf, imp)
- buf = append(buf, imp.String())
- }
- }
- if len(mod.Rules) > 0 {
- buf = append(buf, "")
- for _, rule := range mod.Rules {
- buf = appendAnnotationStrings(buf, rule)
- buf = append(buf, rule.stringWithOpts(toStringOpts{regoVersion: mod.regoVersion}))
- }
- }
- return strings.Join(buf, "\n")
-}
-
-// RuleSet returns a RuleSet containing named rules in the mod.
-func (mod *Module) RuleSet(name Var) RuleSet {
- rs := NewRuleSet()
- for _, rule := range mod.Rules {
- if rule.Head.Name.Equal(name) {
- rs.Add(rule)
- }
- }
- return rs
-}
-
-// UnmarshalJSON parses bs and stores the result in mod. The rules in the module
-// will have their module pointer set to mod.
-func (mod *Module) UnmarshalJSON(bs []byte) error {
-
- // Declare a new type and use a type conversion to avoid recursively calling
- // Module#UnmarshalJSON.
- type module Module
-
- if err := util.UnmarshalJSON(bs, (*module)(mod)); err != nil {
- return err
- }
-
- WalkRules(mod, func(rule *Rule) bool {
- rule.Module = mod
- return false
- })
-
- return nil
-}
-
-func (mod *Module) regoV1Compatible() bool {
- return mod.regoVersion == RegoV1 || mod.regoVersion == RegoV0CompatV1
-}
-
-func (mod *Module) RegoVersion() RegoVersion {
- return mod.regoVersion
-}
-
-// SetRegoVersion sets the RegoVersion for the module.
-// Note: Setting a rego-version that does not match the module's rego-version might have unintended consequences.
-func (mod *Module) SetRegoVersion(v RegoVersion) {
- mod.regoVersion = v
-}
-
// NewComment returns a new Comment object.
func NewComment(text []byte) *Comment {
- return &Comment{
- Text: text,
- }
-}
-
-// Loc returns the location of the comment in the definition.
-func (c *Comment) Loc() *Location {
- if c == nil {
- return nil
- }
- return c.Location
-}
-
-// SetLoc sets the location on c.
-func (c *Comment) SetLoc(loc *Location) {
- c.Location = loc
-}
-
-func (c *Comment) String() string {
- return "#" + string(c.Text)
-}
-
-// Copy returns a deep copy of c.
-func (c *Comment) Copy() *Comment {
- cpy := *c
- cpy.Text = make([]byte, len(c.Text))
- copy(cpy.Text, c.Text)
- return &cpy
-}
-
-// Equal returns true if this comment equals the other comment.
-// Unlike other equality checks on AST nodes, comment equality
-// depends on location.
-func (c *Comment) Equal(other *Comment) bool {
- return c.Location.Equal(other.Location) && bytes.Equal(c.Text, other.Text)
-}
-
-func (c *Comment) setJSONOptions(opts astJSON.Options) {
- // Note: this is not used for location since Comments use default JSON marshaling
- // behavior with struct field names in JSON.
- c.jsonOptions = opts
- if c.Location != nil {
- c.Location.JSONOptions = opts
- }
-}
-
-// Compare returns an integer indicating whether pkg is less than, equal to,
-// or greater than other.
-func (pkg *Package) Compare(other *Package) int {
- return Compare(pkg.Path, other.Path)
-}
-
-// Copy returns a deep copy of pkg.
-func (pkg *Package) Copy() *Package {
- cpy := *pkg
- cpy.Path = pkg.Path.Copy()
- return &cpy
-}
-
-// Equal returns true if pkg is equal to other.
-func (pkg *Package) Equal(other *Package) bool {
- return pkg.Compare(other) == 0
-}
-
-// Loc returns the location of the Package in the definition.
-func (pkg *Package) Loc() *Location {
- if pkg == nil {
- return nil
- }
- return pkg.Location
-}
-
-// SetLoc sets the location on pkg.
-func (pkg *Package) SetLoc(loc *Location) {
- pkg.Location = loc
-}
-
-func (pkg *Package) String() string {
- if pkg == nil {
- return ""
- } else if len(pkg.Path) <= 1 {
- return fmt.Sprintf("package ", pkg.Path)
- }
- // Omit head as all packages have the DefaultRootDocument prepended at parse time.
- path := make(Ref, len(pkg.Path)-1)
- path[0] = VarTerm(string(pkg.Path[1].Value.(String)))
- copy(path[1:], pkg.Path[2:])
- return fmt.Sprintf("package %v", path)
-}
-
-func (pkg *Package) setJSONOptions(opts astJSON.Options) {
- pkg.jsonOptions = opts
- if pkg.Location != nil {
- pkg.Location.JSONOptions = opts
- }
-}
-
-func (pkg *Package) MarshalJSON() ([]byte, error) {
- data := map[string]interface{}{
- "path": pkg.Path,
- }
-
- if pkg.jsonOptions.MarshalOptions.IncludeLocation.Package {
- if pkg.Location != nil {
- data["location"] = pkg.Location
- }
- }
-
- return json.Marshal(data)
+ return v1.NewComment(text)
}
// IsValidImportPath returns an error indicating if the import path is invalid.
// If the import path is valid, err is nil.
func IsValidImportPath(v Value) (err error) {
- switch v := v.(type) {
- case Var:
- if !v.Equal(DefaultRootDocument.Value) && !v.Equal(InputRootDocument.Value) {
- return fmt.Errorf("invalid path %v: path must begin with input or data", v)
- }
- case Ref:
- if err := IsValidImportPath(v[0].Value); err != nil {
- return fmt.Errorf("invalid path %v: path must begin with input or data", v)
- }
- for _, e := range v[1:] {
- if _, ok := e.Value.(String); !ok {
- return fmt.Errorf("invalid path %v: path elements must be strings", v)
- }
- }
- default:
- return fmt.Errorf("invalid path %v: path must be ref or var", v)
- }
- return nil
-}
-
-// Compare returns an integer indicating whether imp is less than, equal to,
-// or greater than other.
-func (imp *Import) Compare(other *Import) int {
- if imp == nil {
- if other == nil {
- return 0
- }
- return -1
- } else if other == nil {
- return 1
- }
- if cmp := Compare(imp.Path, other.Path); cmp != 0 {
- return cmp
- }
- return Compare(imp.Alias, other.Alias)
-}
-
-// Copy returns a deep copy of imp.
-func (imp *Import) Copy() *Import {
- cpy := *imp
- cpy.Path = imp.Path.Copy()
- return &cpy
-}
-
-// Equal returns true if imp is equal to other.
-func (imp *Import) Equal(other *Import) bool {
- return imp.Compare(other) == 0
-}
-
-// Loc returns the location of the Import in the definition.
-func (imp *Import) Loc() *Location {
- if imp == nil {
- return nil
- }
- return imp.Location
-}
-
-// SetLoc sets the location on imp.
-func (imp *Import) SetLoc(loc *Location) {
- imp.Location = loc
-}
-
-// Name returns the variable that is used to refer to the imported virtual
-// document. This is the alias if defined otherwise the last element in the
-// path.
-func (imp *Import) Name() Var {
- if len(imp.Alias) != 0 {
- return imp.Alias
- }
- switch v := imp.Path.Value.(type) {
- case Var:
- return v
- case Ref:
- if len(v) == 1 {
- return v[0].Value.(Var)
- }
- return Var(v[len(v)-1].Value.(String))
- }
- panic("illegal import")
-}
-
-func (imp *Import) String() string {
- buf := []string{"import", imp.Path.String()}
- if len(imp.Alias) > 0 {
- buf = append(buf, "as "+imp.Alias.String())
- }
- return strings.Join(buf, " ")
-}
-
-func (imp *Import) setJSONOptions(opts astJSON.Options) {
- imp.jsonOptions = opts
- if imp.Location != nil {
- imp.Location.JSONOptions = opts
- }
-}
-
-func (imp *Import) MarshalJSON() ([]byte, error) {
- data := map[string]interface{}{
- "path": imp.Path,
- }
-
- if len(imp.Alias) != 0 {
- data["alias"] = imp.Alias
- }
-
- if imp.jsonOptions.MarshalOptions.IncludeLocation.Import {
- if imp.Location != nil {
- data["location"] = imp.Location
- }
- }
-
- return json.Marshal(data)
-}
-
-// Compare returns an integer indicating whether rule is less than, equal to,
-// or greater than other.
-func (rule *Rule) Compare(other *Rule) int {
- if rule == nil {
- if other == nil {
- return 0
- }
- return -1
- } else if other == nil {
- return 1
- }
- if cmp := rule.Head.Compare(other.Head); cmp != 0 {
- return cmp
- }
- if cmp := util.Compare(rule.Default, other.Default); cmp != 0 {
- return cmp
- }
- if cmp := rule.Body.Compare(other.Body); cmp != 0 {
- return cmp
- }
-
- if cmp := annotationsCompare(rule.Annotations, other.Annotations); cmp != 0 {
- return cmp
- }
-
- return rule.Else.Compare(other.Else)
-}
-
-// Copy returns a deep copy of rule.
-func (rule *Rule) Copy() *Rule {
- cpy := *rule
- cpy.Head = rule.Head.Copy()
- cpy.Body = rule.Body.Copy()
-
- cpy.Annotations = make([]*Annotations, len(rule.Annotations))
- for i, a := range rule.Annotations {
- cpy.Annotations[i] = a.Copy(&cpy)
- }
-
- if cpy.Else != nil {
- cpy.Else = rule.Else.Copy()
- }
- return &cpy
-}
-
-// Equal returns true if rule is equal to other.
-func (rule *Rule) Equal(other *Rule) bool {
- return rule.Compare(other) == 0
-}
-
-// Loc returns the location of the Rule in the definition.
-func (rule *Rule) Loc() *Location {
- if rule == nil {
- return nil
- }
- return rule.Location
-}
-
-// SetLoc sets the location on rule.
-func (rule *Rule) SetLoc(loc *Location) {
- rule.Location = loc
-}
-
-// Path returns a ref referring to the document produced by this rule. If rule
-// is not contained in a module, this function panics.
-// Deprecated: Poor handling of ref rules. Use `(*Rule).Ref()` instead.
-func (rule *Rule) Path() Ref {
- if rule.Module == nil {
- panic("assertion failed")
- }
- return rule.Module.Package.Path.Extend(rule.Head.Ref().GroundPrefix())
-}
-
-// Ref returns a ref referring to the document produced by this rule. If rule
-// is not contained in a module, this function panics. The returned ref may
-// contain variables in the last position.
-func (rule *Rule) Ref() Ref {
- if rule.Module == nil {
- panic("assertion failed")
- }
- return rule.Module.Package.Path.Extend(rule.Head.Ref())
-}
-
-func (rule *Rule) String() string {
- return rule.stringWithOpts(toStringOpts{})
-}
-
-type toStringOpts struct {
- regoVersion RegoVersion
-}
-
-func (rule *Rule) stringWithOpts(opts toStringOpts) string {
- buf := []string{}
- if rule.Default {
- buf = append(buf, "default")
- }
- buf = append(buf, rule.Head.stringWithOpts(opts))
- if !rule.Default {
- switch opts.regoVersion {
- case RegoV1, RegoV0CompatV1:
- buf = append(buf, "if")
- }
- buf = append(buf, "{")
- buf = append(buf, rule.Body.String())
- buf = append(buf, "}")
- }
- if rule.Else != nil {
- buf = append(buf, rule.Else.elseString(opts))
- }
- return strings.Join(buf, " ")
-}
-
-func (rule *Rule) isFunction() bool {
- return len(rule.Head.Args) > 0
-}
-
-func (rule *Rule) setJSONOptions(opts astJSON.Options) {
- rule.jsonOptions = opts
- if rule.Location != nil {
- rule.Location.JSONOptions = opts
- }
-}
-
-func (rule *Rule) MarshalJSON() ([]byte, error) {
- data := map[string]interface{}{
- "head": rule.Head,
- "body": rule.Body,
- }
-
- if rule.Default {
- data["default"] = true
- }
-
- if rule.Else != nil {
- data["else"] = rule.Else
- }
-
- if rule.jsonOptions.MarshalOptions.IncludeLocation.Rule {
- if rule.Location != nil {
- data["location"] = rule.Location
- }
- }
-
- if len(rule.Annotations) != 0 {
- data["annotations"] = rule.Annotations
- }
-
- return json.Marshal(data)
-}
-
-func (rule *Rule) elseString(opts toStringOpts) string {
- var buf []string
-
- buf = append(buf, "else")
-
- value := rule.Head.Value
- if value != nil {
- buf = append(buf, "=")
- buf = append(buf, value.String())
- }
-
- switch opts.regoVersion {
- case RegoV1, RegoV0CompatV1:
- buf = append(buf, "if")
- }
-
- buf = append(buf, "{")
- buf = append(buf, rule.Body.String())
- buf = append(buf, "}")
-
- if rule.Else != nil {
- buf = append(buf, rule.Else.elseString(opts))
- }
-
- return strings.Join(buf, " ")
+ return v1.IsValidImportPath(v)
}
// NewHead returns a new Head object. If args are provided, the first will be
// used for the key and the second will be used for the value.
func NewHead(name Var, args ...*Term) *Head {
- head := &Head{
- Name: name, // backcompat
- Reference: []*Term{NewTerm(name)},
- }
- if len(args) == 0 {
- return head
- }
- head.Key = args[0]
- if len(args) == 1 {
- return head
- }
- head.Value = args[1]
- if head.Key != nil && head.Value != nil {
- head.Reference = head.Reference.Append(args[0])
- }
- return head
+ return v1.NewHead(name, args...)
}
// VarHead creates a head object, initializes its Name, Location, and Options,
// and returns the new head.
func VarHead(name Var, location *Location, jsonOpts *astJSON.Options) *Head {
- h := NewHead(name)
- h.Reference[0].Location = location
- if jsonOpts != nil {
- h.Reference[0].setJSONOptions(*jsonOpts)
- }
- return h
+ return v1.VarHead(name, location, jsonOpts)
}
// RefHead returns a new Head object with the passed Ref. If args are provided,
// the first will be used for the value.
func RefHead(ref Ref, args ...*Term) *Head {
- head := &Head{}
- head.SetRef(ref)
- if len(ref) < 2 {
- head.Name = ref[0].Value.(Var)
- }
- if len(args) >= 1 {
- head.Value = args[0]
- }
- return head
+ return v1.RefHead(ref, args...)
}
// DocKind represents the collection of document types that can be produced by rules.
-type DocKind int
+type DocKind = v1.DocKind
const (
// CompleteDoc represents a document that is completely defined by the rule.
- CompleteDoc = iota
+ CompleteDoc = v1.CompleteDoc
// PartialSetDoc represents a set document that is partially defined by the rule.
- PartialSetDoc
+ PartialSetDoc = v1.PartialSetDoc
// PartialObjectDoc represents an object document that is partially defined by the rule.
- PartialObjectDoc
-) // TODO(sr): Deprecate?
-
-// DocKind returns the type of document produced by this rule.
-func (head *Head) DocKind() DocKind {
- if head.Key != nil {
- if head.Value != nil {
- return PartialObjectDoc
- }
- return PartialSetDoc
- }
- return CompleteDoc
-}
+ PartialObjectDoc = v1.PartialObjectDoc
+)
-type RuleKind int
+type RuleKind = v1.RuleKind
const (
- SingleValue = iota
- MultiValue
+ SingleValue = v1.SingleValue
+ MultiValue = v1.MultiValue
)
-// RuleKind returns the type of rule this is
-func (head *Head) RuleKind() RuleKind {
- // NOTE(sr): This is bit verbose, since the key is irrelevant for single vs
- // multi value, but as good a spot as to assert the invariant.
- switch {
- case head.Value != nil:
- return SingleValue
- case head.Key != nil:
- return MultiValue
- default:
- panic("unreachable")
- }
-}
-
-// Ref returns the Ref of the rule. If it doesn't have one, it's filled in
-// via the Head's Name.
-func (head *Head) Ref() Ref {
- if len(head.Reference) > 0 {
- return head.Reference
- }
- return Ref{&Term{Value: head.Name}}
-}
-
-// SetRef can be used to set a rule head's Reference
-func (head *Head) SetRef(r Ref) {
- head.Reference = r
-}
-
-// Compare returns an integer indicating whether head is less than, equal to,
-// or greater than other.
-func (head *Head) Compare(other *Head) int {
- if head == nil {
- if other == nil {
- return 0
- }
- return -1
- } else if other == nil {
- return 1
- }
- if head.Assign && !other.Assign {
- return -1
- } else if !head.Assign && other.Assign {
- return 1
- }
- if cmp := Compare(head.Args, other.Args); cmp != 0 {
- return cmp
- }
- if cmp := Compare(head.Reference, other.Reference); cmp != 0 {
- return cmp
- }
- if cmp := Compare(head.Name, other.Name); cmp != 0 {
- return cmp
- }
- if cmp := Compare(head.Key, other.Key); cmp != 0 {
- return cmp
- }
- return Compare(head.Value, other.Value)
-}
-
-// Copy returns a deep copy of head.
-func (head *Head) Copy() *Head {
- cpy := *head
- cpy.Reference = head.Reference.Copy()
- cpy.Args = head.Args.Copy()
- cpy.Key = head.Key.Copy()
- cpy.Value = head.Value.Copy()
- cpy.keywords = nil
- return &cpy
-}
-
-// Equal returns true if this head equals other.
-func (head *Head) Equal(other *Head) bool {
- return head.Compare(other) == 0
-}
-
-func (head *Head) String() string {
- return head.stringWithOpts(toStringOpts{})
-}
-
-func (head *Head) stringWithOpts(opts toStringOpts) string {
- buf := strings.Builder{}
- buf.WriteString(head.Ref().String())
- containsAdded := false
-
- switch {
- case len(head.Args) != 0:
- buf.WriteString(head.Args.String())
- case len(head.Reference) == 1 && head.Key != nil:
- switch opts.regoVersion {
- case RegoV0:
- buf.WriteRune('[')
- buf.WriteString(head.Key.String())
- buf.WriteRune(']')
- default:
- containsAdded = true
- buf.WriteString(" contains ")
- buf.WriteString(head.Key.String())
- }
- }
- if head.Value != nil {
- if head.Assign {
- buf.WriteString(" := ")
- } else {
- buf.WriteString(" = ")
- }
- buf.WriteString(head.Value.String())
- } else if !containsAdded && head.Name == "" && head.Key != nil {
- buf.WriteString(" contains ")
- buf.WriteString(head.Key.String())
- }
- return buf.String()
-}
-
-func (head *Head) setJSONOptions(opts astJSON.Options) {
- head.jsonOptions = opts
- if head.Location != nil {
- head.Location.JSONOptions = opts
- }
-}
-
-func (head *Head) MarshalJSON() ([]byte, error) {
- var loc *Location
- includeLoc := head.jsonOptions.MarshalOptions.IncludeLocation
- if includeLoc.Head {
- if head.Location != nil {
- loc = head.Location
- }
-
- for _, term := range head.Reference {
- if term.Location != nil {
- term.jsonOptions.MarshalOptions.IncludeLocation.Term = includeLoc.Term
- }
- }
- }
-
- // NOTE(sr): we do this to override the rendering of `head.Reference`.
- // It's still what'll be used via the default means of encoding/json
- // for unmarshaling a json object into a Head struct!
- type h Head
- return json.Marshal(struct {
- h
- Ref Ref `json:"ref"`
- Location *Location `json:"location,omitempty"`
- }{
- h: h(*head),
- Ref: head.Ref(),
- Location: loc,
- })
-}
-
-// Vars returns a set of vars found in the head.
-func (head *Head) Vars() VarSet {
- vis := &VarVisitor{vars: VarSet{}}
- // TODO: improve test coverage for this.
- if head.Args != nil {
- vis.Walk(head.Args)
- }
- if head.Key != nil {
- vis.Walk(head.Key)
- }
- if head.Value != nil {
- vis.Walk(head.Value)
- }
- if len(head.Reference) > 0 {
- vis.Walk(head.Reference[1:])
- }
- return vis.vars
-}
-
-// Loc returns the Location of head.
-func (head *Head) Loc() *Location {
- if head == nil {
- return nil
- }
- return head.Location
-}
-
-// SetLoc sets the location on head.
-func (head *Head) SetLoc(loc *Location) {
- head.Location = loc
-}
-
-func (head *Head) HasDynamicRef() bool {
- pos := head.Reference.Dynamic()
- // Ref is dynamic if it has one non-constant term that isn't the first or last term or if it's a partial set rule.
- return pos > 0 && (pos < len(head.Reference)-1 || head.RuleKind() == MultiValue)
-}
-
-// Copy returns a deep copy of a.
-func (a Args) Copy() Args {
- cpy := Args{}
- for _, t := range a {
- cpy = append(cpy, t.Copy())
- }
- return cpy
-}
-
-func (a Args) String() string {
- buf := make([]string, 0, len(a))
- for _, t := range a {
- buf = append(buf, t.String())
- }
- return "(" + strings.Join(buf, ", ") + ")"
-}
-
-// Loc returns the Location of a.
-func (a Args) Loc() *Location {
- if len(a) == 0 {
- return nil
- }
- return a[0].Location
-}
-
-// SetLoc sets the location on a.
-func (a Args) SetLoc(loc *Location) {
- if len(a) != 0 {
- a[0].SetLocation(loc)
- }
-}
-
-// Vars returns a set of vars that appear in a.
-func (a Args) Vars() VarSet {
- vis := &VarVisitor{vars: VarSet{}}
- vis.Walk(a)
- return vis.vars
-}
-
// NewBody returns a new Body containing the given expressions. The indices of
// the immediate expressions will be reset.
func NewBody(exprs ...*Expr) Body {
- for i, expr := range exprs {
- expr.Index = i
- }
- return Body(exprs)
-}
-
-// MarshalJSON returns JSON encoded bytes representing body.
-func (body Body) MarshalJSON() ([]byte, error) {
- // Serialize empty Body to empty array. This handles both the empty case and the
- // nil case (whereas by default the result would be null if body was nil.)
- if len(body) == 0 {
- return []byte(`[]`), nil
- }
- ret, err := json.Marshal([]*Expr(body))
- return ret, err
-}
-
-// Append adds the expr to the body and updates the expr's index accordingly.
-func (body *Body) Append(expr *Expr) {
- n := len(*body)
- expr.Index = n
- *body = append(*body, expr)
-}
-
-// Set sets the expr in the body at the specified position and updates the
-// expr's index accordingly.
-func (body Body) Set(expr *Expr, pos int) {
- body[pos] = expr
- expr.Index = pos
-}
-
-// Compare returns an integer indicating whether body is less than, equal to,
-// or greater than other.
-//
-// If body is a subset of other, it is considered less than (and vice versa).
-func (body Body) Compare(other Body) int {
- minLen := len(body)
- if len(other) < minLen {
- minLen = len(other)
- }
- for i := 0; i < minLen; i++ {
- if cmp := body[i].Compare(other[i]); cmp != 0 {
- return cmp
- }
- }
- if len(body) < len(other) {
- return -1
- }
- if len(other) < len(body) {
- return 1
- }
- return 0
-}
-
-// Copy returns a deep copy of body.
-func (body Body) Copy() Body {
- cpy := make(Body, len(body))
- for i := range body {
- cpy[i] = body[i].Copy()
- }
- return cpy
-}
-
-// Contains returns true if this body contains the given expression.
-func (body Body) Contains(x *Expr) bool {
- for _, e := range body {
- if e.Equal(x) {
- return true
- }
- }
- return false
-}
-
-// Equal returns true if this Body is equal to the other Body.
-func (body Body) Equal(other Body) bool {
- return body.Compare(other) == 0
-}
-
-// Hash returns the hash code for the Body.
-func (body Body) Hash() int {
- s := 0
- for _, e := range body {
- s += e.Hash()
- }
- return s
-}
-
-// IsGround returns true if all of the expressions in the Body are ground.
-func (body Body) IsGround() bool {
- for _, e := range body {
- if !e.IsGround() {
- return false
- }
- }
- return true
-}
-
-// Loc returns the location of the Body in the definition.
-func (body Body) Loc() *Location {
- if len(body) == 0 {
- return nil
- }
- return body[0].Location
-}
-
-// SetLoc sets the location on body.
-func (body Body) SetLoc(loc *Location) {
- if len(body) != 0 {
- body[0].SetLocation(loc)
- }
-}
-
-func (body Body) String() string {
- buf := make([]string, 0, len(body))
- for _, v := range body {
- buf = append(buf, v.String())
- }
- return strings.Join(buf, "; ")
-}
-
-// Vars returns a VarSet containing variables in body. The params can be set to
-// control which vars are included.
-func (body Body) Vars(params VarVisitorParams) VarSet {
- vis := NewVarVisitor().WithParams(params)
- vis.Walk(body)
- return vis.Vars()
+ return v1.NewBody(exprs...)
}
// NewExpr returns a new Expr object.
-func NewExpr(terms interface{}) *Expr {
- switch terms.(type) {
- case *SomeDecl, *Every, *Term, []*Term: // ok
- default:
- panic("unreachable")
- }
- return &Expr{
- Negated: false,
- Terms: terms,
- Index: 0,
- With: nil,
- }
-}
-
-// Complement returns a copy of this expression with the negation flag flipped.
-func (expr *Expr) Complement() *Expr {
- cpy := *expr
- cpy.Negated = !cpy.Negated
- return &cpy
-}
-
-// Equal returns true if this Expr equals the other Expr.
-func (expr *Expr) Equal(other *Expr) bool {
- return expr.Compare(other) == 0
-}
-
-// Compare returns an integer indicating whether expr is less than, equal to,
-// or greater than other.
-//
-// Expressions are compared as follows:
-//
-// 1. Declarations are always less than other expressions.
-// 2. Preceding expression (by Index) is always less than the other expression.
-// 3. Non-negated expressions are always less than negated expressions.
-// 4. Single term expressions are always less than built-in expressions.
-//
-// Otherwise, the expression terms are compared normally. If both expressions
-// have the same terms, the modifiers are compared.
-func (expr *Expr) Compare(other *Expr) int {
-
- if expr == nil {
- if other == nil {
- return 0
- }
- return -1
- } else if other == nil {
- return 1
- }
-
- o1 := expr.sortOrder()
- o2 := other.sortOrder()
- if o1 < o2 {
- return -1
- } else if o2 < o1 {
- return 1
- }
-
- switch {
- case expr.Index < other.Index:
- return -1
- case expr.Index > other.Index:
- return 1
- }
-
- switch {
- case expr.Negated && !other.Negated:
- return 1
- case !expr.Negated && other.Negated:
- return -1
- }
-
- switch t := expr.Terms.(type) {
- case *Term:
- if cmp := Compare(t.Value, other.Terms.(*Term).Value); cmp != 0 {
- return cmp
- }
- case []*Term:
- if cmp := termSliceCompare(t, other.Terms.([]*Term)); cmp != 0 {
- return cmp
- }
- case *SomeDecl:
- if cmp := Compare(t, other.Terms.(*SomeDecl)); cmp != 0 {
- return cmp
- }
- case *Every:
- if cmp := Compare(t, other.Terms.(*Every)); cmp != 0 {
- return cmp
- }
- }
-
- return withSliceCompare(expr.With, other.With)
-}
-
-func (expr *Expr) sortOrder() int {
- switch expr.Terms.(type) {
- case *SomeDecl:
- return 0
- case *Term:
- return 1
- case []*Term:
- return 2
- case *Every:
- return 3
- }
- return -1
-}
-
-// CopyWithoutTerms returns a deep copy of expr without its Terms
-func (expr *Expr) CopyWithoutTerms() *Expr {
- cpy := *expr
-
- cpy.With = make([]*With, len(expr.With))
- for i := range expr.With {
- cpy.With[i] = expr.With[i].Copy()
- }
-
- return &cpy
-}
-
-// Copy returns a deep copy of expr.
-func (expr *Expr) Copy() *Expr {
-
- cpy := expr.CopyWithoutTerms()
-
- switch ts := expr.Terms.(type) {
- case *SomeDecl:
- cpy.Terms = ts.Copy()
- case []*Term:
- cpyTs := make([]*Term, len(ts))
- for i := range ts {
- cpyTs[i] = ts[i].Copy()
- }
- cpy.Terms = cpyTs
- case *Term:
- cpy.Terms = ts.Copy()
- case *Every:
- cpy.Terms = ts.Copy()
- }
-
- return cpy
-}
-
-// Hash returns the hash code of the Expr.
-func (expr *Expr) Hash() int {
- s := expr.Index
- switch ts := expr.Terms.(type) {
- case *SomeDecl:
- s += ts.Hash()
- case []*Term:
- for _, t := range ts {
- s += t.Value.Hash()
- }
- case *Term:
- s += ts.Value.Hash()
- }
- if expr.Negated {
- s++
- }
- for _, w := range expr.With {
- s += w.Hash()
- }
- return s
-}
-
-// IncludeWith returns a copy of expr with the with modifier appended.
-func (expr *Expr) IncludeWith(target *Term, value *Term) *Expr {
- cpy := *expr
- cpy.With = append(cpy.With, &With{Target: target, Value: value})
- return &cpy
-}
-
-// NoWith returns a copy of expr where the with modifier has been removed.
-func (expr *Expr) NoWith() *Expr {
- cpy := *expr
- cpy.With = nil
- return &cpy
-}
-
-// IsEquality returns true if this is an equality expression.
-func (expr *Expr) IsEquality() bool {
- return isGlobalBuiltin(expr, Var(Equality.Name))
-}
-
-// IsAssignment returns true if this an assignment expression.
-func (expr *Expr) IsAssignment() bool {
- return isGlobalBuiltin(expr, Var(Assign.Name))
-}
-
-// IsCall returns true if this expression calls a function.
-func (expr *Expr) IsCall() bool {
- _, ok := expr.Terms.([]*Term)
- return ok
-}
-
-// IsEvery returns true if this expression is an 'every' expression.
-func (expr *Expr) IsEvery() bool {
- _, ok := expr.Terms.(*Every)
- return ok
-}
-
-// IsSome returns true if this expression is a 'some' expression.
-func (expr *Expr) IsSome() bool {
- _, ok := expr.Terms.(*SomeDecl)
- return ok
-}
-
-// Operator returns the name of the function or built-in this expression refers
-// to. If this expression is not a function call, returns nil.
-func (expr *Expr) Operator() Ref {
- op := expr.OperatorTerm()
- if op == nil {
- return nil
- }
- return op.Value.(Ref)
-}
-
-// OperatorTerm returns the name of the function or built-in this expression
-// refers to. If this expression is not a function call, returns nil.
-func (expr *Expr) OperatorTerm() *Term {
- terms, ok := expr.Terms.([]*Term)
- if !ok || len(terms) == 0 {
- return nil
- }
- return terms[0]
-}
-
-// Operand returns the term at the zero-based pos. If the expr does not include
-// at least pos+1 terms, this function returns nil.
-func (expr *Expr) Operand(pos int) *Term {
- terms, ok := expr.Terms.([]*Term)
- if !ok {
- return nil
- }
- idx := pos + 1
- if idx < len(terms) {
- return terms[idx]
- }
- return nil
-}
-
-// Operands returns the built-in function operands.
-func (expr *Expr) Operands() []*Term {
- terms, ok := expr.Terms.([]*Term)
- if !ok {
- return nil
- }
- return terms[1:]
-}
-
-// IsGround returns true if all of the expression terms are ground.
-func (expr *Expr) IsGround() bool {
- switch ts := expr.Terms.(type) {
- case []*Term:
- for _, t := range ts[1:] {
- if !t.IsGround() {
- return false
- }
- }
- case *Term:
- return ts.IsGround()
- }
- return true
-}
-
-// SetOperator sets the expr's operator and returns the expr itself. If expr is
-// not a call expr, this function will panic.
-func (expr *Expr) SetOperator(term *Term) *Expr {
- expr.Terms.([]*Term)[0] = term
- return expr
-}
-
-// SetLocation sets the expr's location and returns the expr itself.
-func (expr *Expr) SetLocation(loc *Location) *Expr {
- expr.Location = loc
- return expr
-}
-
-// Loc returns the Location of expr.
-func (expr *Expr) Loc() *Location {
- if expr == nil {
- return nil
- }
- return expr.Location
-}
-
-// SetLoc sets the location on expr.
-func (expr *Expr) SetLoc(loc *Location) {
- expr.SetLocation(loc)
-}
-
-func (expr *Expr) String() string {
- buf := make([]string, 0, 2+len(expr.With))
- if expr.Negated {
- buf = append(buf, "not")
- }
- switch t := expr.Terms.(type) {
- case []*Term:
- if expr.IsEquality() && validEqAssignArgCount(expr) {
- buf = append(buf, fmt.Sprintf("%v %v %v", t[1], Equality.Infix, t[2]))
- } else {
- buf = append(buf, Call(t).String())
- }
- case fmt.Stringer:
- buf = append(buf, t.String())
- }
-
- for i := range expr.With {
- buf = append(buf, expr.With[i].String())
- }
-
- return strings.Join(buf, " ")
-}
-
-func (expr *Expr) setJSONOptions(opts astJSON.Options) {
- expr.jsonOptions = opts
- if expr.Location != nil {
- expr.Location.JSONOptions = opts
- }
-}
-
-func (expr *Expr) MarshalJSON() ([]byte, error) {
- data := map[string]interface{}{
- "terms": expr.Terms,
- "index": expr.Index,
- }
-
- if len(expr.With) > 0 {
- data["with"] = expr.With
- }
-
- if expr.Generated {
- data["generated"] = true
- }
-
- if expr.Negated {
- data["negated"] = true
- }
-
- if expr.jsonOptions.MarshalOptions.IncludeLocation.Expr {
- if expr.Location != nil {
- data["location"] = expr.Location
- }
- }
-
- return json.Marshal(data)
-}
-
-// UnmarshalJSON parses the byte array and stores the result in expr.
-func (expr *Expr) UnmarshalJSON(bs []byte) error {
- v := map[string]interface{}{}
- if err := util.UnmarshalJSON(bs, &v); err != nil {
- return err
- }
- return unmarshalExpr(expr, v)
-}
-
-// Vars returns a VarSet containing variables in expr. The params can be set to
-// control which vars are included.
-func (expr *Expr) Vars(params VarVisitorParams) VarSet {
- vis := NewVarVisitor().WithParams(params)
- vis.Walk(expr)
- return vis.Vars()
+func NewExpr(terms any) *Expr {
+ return v1.NewExpr(terms)
}
// NewBuiltinExpr creates a new Expr object with the supplied terms.
// The builtin operator must be the first term.
func NewBuiltinExpr(terms ...*Term) *Expr {
- return &Expr{Terms: terms}
-}
-
-func (expr *Expr) CogeneratedExprs() []*Expr {
- visited := map[*Expr]struct{}{}
- visitCogeneratedExprs(expr, func(e *Expr) bool {
- if expr.Equal(e) {
- return true
- }
- if _, ok := visited[e]; ok {
- return true
- }
- visited[e] = struct{}{}
- return false
- })
-
- result := make([]*Expr, 0, len(visited))
- for e := range visited {
- result = append(result, e)
- }
- return result
-}
-
-func (expr *Expr) BaseCogeneratedExpr() *Expr {
- if expr.generatedFrom == nil {
- return expr
- }
- return expr.generatedFrom.BaseCogeneratedExpr()
-}
-
-func visitCogeneratedExprs(expr *Expr, f func(*Expr) bool) {
- if parent := expr.generatedFrom; parent != nil {
- if stop := f(parent); !stop {
- visitCogeneratedExprs(parent, f)
- }
- }
- for _, child := range expr.generates {
- if stop := f(child); !stop {
- visitCogeneratedExprs(child, f)
- }
- }
-}
-
-func (d *SomeDecl) String() string {
- if call, ok := d.Symbols[0].Value.(Call); ok {
- if len(call) == 4 {
- return "some " + call[1].String() + ", " + call[2].String() + " in " + call[3].String()
- }
- return "some " + call[1].String() + " in " + call[2].String()
- }
- buf := make([]string, len(d.Symbols))
- for i := range buf {
- buf[i] = d.Symbols[i].String()
- }
- return "some " + strings.Join(buf, ", ")
-}
-
-// SetLoc sets the Location on d.
-func (d *SomeDecl) SetLoc(loc *Location) {
- d.Location = loc
-}
-
-// Loc returns the Location of d.
-func (d *SomeDecl) Loc() *Location {
- return d.Location
-}
-
-// Copy returns a deep copy of d.
-func (d *SomeDecl) Copy() *SomeDecl {
- cpy := *d
- cpy.Symbols = termSliceCopy(d.Symbols)
- return &cpy
-}
-
-// Compare returns an integer indicating whether d is less than, equal to, or
-// greater than other.
-func (d *SomeDecl) Compare(other *SomeDecl) int {
- return termSliceCompare(d.Symbols, other.Symbols)
-}
-
-// Hash returns a hash code of d.
-func (d *SomeDecl) Hash() int {
- return termSliceHash(d.Symbols)
-}
-
-func (d *SomeDecl) setJSONOptions(opts astJSON.Options) {
- d.jsonOptions = opts
- if d.Location != nil {
- d.Location.JSONOptions = opts
- }
-}
-
-func (d *SomeDecl) MarshalJSON() ([]byte, error) {
- data := map[string]interface{}{
- "symbols": d.Symbols,
- }
-
- if d.jsonOptions.MarshalOptions.IncludeLocation.SomeDecl {
- if d.Location != nil {
- data["location"] = d.Location
- }
- }
-
- return json.Marshal(data)
-}
-
-func (q *Every) String() string {
- if q.Key != nil {
- return fmt.Sprintf("every %s, %s in %s { %s }",
- q.Key,
- q.Value,
- q.Domain,
- q.Body)
- }
- return fmt.Sprintf("every %s in %s { %s }",
- q.Value,
- q.Domain,
- q.Body)
-}
-
-func (q *Every) Loc() *Location {
- return q.Location
-}
-
-func (q *Every) SetLoc(l *Location) {
- q.Location = l
-}
-
-// Copy returns a deep copy of d.
-func (q *Every) Copy() *Every {
- cpy := *q
- cpy.Key = q.Key.Copy()
- cpy.Value = q.Value.Copy()
- cpy.Domain = q.Domain.Copy()
- cpy.Body = q.Body.Copy()
- return &cpy
-}
-
-func (q *Every) Compare(other *Every) int {
- for _, terms := range [][2]*Term{
- {q.Key, other.Key},
- {q.Value, other.Value},
- {q.Domain, other.Domain},
- } {
- if d := Compare(terms[0], terms[1]); d != 0 {
- return d
- }
- }
- return q.Body.Compare(other.Body)
-}
-
-// KeyValueVars returns the key and val arguments of an `every`
-// expression, if they are non-nil and not wildcards.
-func (q *Every) KeyValueVars() VarSet {
- vis := &VarVisitor{vars: VarSet{}}
- if q.Key != nil {
- vis.Walk(q.Key)
- }
- vis.Walk(q.Value)
- return vis.vars
-}
-
-func (q *Every) setJSONOptions(opts astJSON.Options) {
- q.jsonOptions = opts
- if q.Location != nil {
- q.Location.JSONOptions = opts
- }
-}
-
-func (q *Every) MarshalJSON() ([]byte, error) {
- data := map[string]interface{}{
- "key": q.Key,
- "value": q.Value,
- "domain": q.Domain,
- "body": q.Body,
- }
-
- if q.jsonOptions.MarshalOptions.IncludeLocation.Every {
- if q.Location != nil {
- data["location"] = q.Location
- }
- }
-
- return json.Marshal(data)
-}
-
-func (w *With) String() string {
- return "with " + w.Target.String() + " as " + w.Value.String()
-}
-
-// Equal returns true if this With is equals the other With.
-func (w *With) Equal(other *With) bool {
- return Compare(w, other) == 0
-}
-
-// Compare returns an integer indicating whether w is less than, equal to, or
-// greater than other.
-func (w *With) Compare(other *With) int {
- if w == nil {
- if other == nil {
- return 0
- }
- return -1
- } else if other == nil {
- return 1
- }
- if cmp := Compare(w.Target, other.Target); cmp != 0 {
- return cmp
- }
- return Compare(w.Value, other.Value)
-}
-
-// Copy returns a deep copy of w.
-func (w *With) Copy() *With {
- cpy := *w
- cpy.Value = w.Value.Copy()
- cpy.Target = w.Target.Copy()
- return &cpy
-}
-
-// Hash returns the hash code of the With.
-func (w With) Hash() int {
- return w.Target.Hash() + w.Value.Hash()
-}
-
-// SetLocation sets the location on w.
-func (w *With) SetLocation(loc *Location) *With {
- w.Location = loc
- return w
-}
-
-// Loc returns the Location of w.
-func (w *With) Loc() *Location {
- if w == nil {
- return nil
- }
- return w.Location
-}
-
-// SetLoc sets the location on w.
-func (w *With) SetLoc(loc *Location) {
- w.Location = loc
-}
-
-func (w *With) setJSONOptions(opts astJSON.Options) {
- w.jsonOptions = opts
- if w.Location != nil {
- w.Location.JSONOptions = opts
- }
-}
-
-func (w *With) MarshalJSON() ([]byte, error) {
- data := map[string]interface{}{
- "target": w.Target,
- "value": w.Value,
- }
-
- if w.jsonOptions.MarshalOptions.IncludeLocation.With {
- if w.Location != nil {
- data["location"] = w.Location
- }
- }
-
- return json.Marshal(data)
+ return v1.NewBuiltinExpr(terms...)
}
// Copy returns a deep copy of the AST node x. If x is not an AST node, x is returned unmodified.
-func Copy(x interface{}) interface{} {
- switch x := x.(type) {
- case *Module:
- return x.Copy()
- case *Package:
- return x.Copy()
- case *Import:
- return x.Copy()
- case *Rule:
- return x.Copy()
- case *Head:
- return x.Copy()
- case Args:
- return x.Copy()
- case Body:
- return x.Copy()
- case *Expr:
- return x.Copy()
- case *With:
- return x.Copy()
- case *SomeDecl:
- return x.Copy()
- case *Every:
- return x.Copy()
- case *Term:
- return x.Copy()
- case *ArrayComprehension:
- return x.Copy()
- case *SetComprehension:
- return x.Copy()
- case *ObjectComprehension:
- return x.Copy()
- case Set:
- return x.Copy()
- case *object:
- return x.Copy()
- case *Array:
- return x.Copy()
- case Ref:
- return x.Copy()
- case Call:
- return x.Copy()
- case *Comment:
- return x.Copy()
- }
- return x
+func Copy(x any) any {
+ return v1.Copy(x)
}
// RuleSet represents a collection of rules that produce a virtual document.
-type RuleSet []*Rule
+type RuleSet = v1.RuleSet
// NewRuleSet returns a new RuleSet containing the given rules.
func NewRuleSet(rules ...*Rule) RuleSet {
- rs := make(RuleSet, 0, len(rules))
- for _, rule := range rules {
- rs.Add(rule)
- }
- return rs
-}
-
-// Add inserts the rule into rs.
-func (rs *RuleSet) Add(rule *Rule) {
- for _, exist := range *rs {
- if exist.Equal(rule) {
- return
- }
- }
- *rs = append(*rs, rule)
-}
-
-// Contains returns true if rs contains rule.
-func (rs RuleSet) Contains(rule *Rule) bool {
- for i := range rs {
- if rs[i].Equal(rule) {
- return true
- }
- }
- return false
-}
-
-// Diff returns a new RuleSet containing rules in rs that are not in other.
-func (rs RuleSet) Diff(other RuleSet) RuleSet {
- result := NewRuleSet()
- for i := range rs {
- if !other.Contains(rs[i]) {
- result.Add(rs[i])
- }
- }
- return result
-}
-
-// Equal returns true if rs equals other.
-func (rs RuleSet) Equal(other RuleSet) bool {
- return len(rs.Diff(other)) == 0 && len(other.Diff(rs)) == 0
-}
-
-// Merge returns a ruleset containing the union of rules from rs an other.
-func (rs RuleSet) Merge(other RuleSet) RuleSet {
- result := NewRuleSet()
- for i := range rs {
- result.Add(rs[i])
- }
- for i := range other {
- result.Add(other[i])
- }
- return result
-}
-
-func (rs RuleSet) String() string {
- buf := make([]string, 0, len(rs))
- for _, rule := range rs {
- buf = append(buf, rule.String())
- }
- return "{" + strings.Join(buf, ", ") + "}"
-}
-
-// Returns true if the equality or assignment expression referred to by expr
-// has a valid number of arguments.
-func validEqAssignArgCount(expr *Expr) bool {
- return len(expr.Operands()) == 2
-}
-
-// this function checks if the expr refers to a non-namespaced (global) built-in
-// function like eq, gt, plus, etc.
-func isGlobalBuiltin(expr *Expr, name Var) bool {
- terms, ok := expr.Terms.([]*Term)
- if !ok {
- return false
- }
-
- // NOTE(tsandall): do not use Term#Equal or Value#Compare to avoid
- // allocation here.
- ref, ok := terms[0].Value.(Ref)
- if !ok || len(ref) != 1 {
- return false
- }
- if head, ok := ref[0].Value.(Var); ok {
- return head.Equal(name)
- }
- return false
+ return v1.NewRuleSet(rules...)
}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/pretty.go b/vendor/github.com/open-policy-agent/opa/ast/pretty.go
index b4f05ad501..84e42f9aec 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/pretty.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/pretty.go
@@ -5,78 +5,14 @@
package ast
import (
- "fmt"
"io"
- "strings"
+
+ v1 "github.com/open-policy-agent/opa/v1/ast"
)
// Pretty writes a pretty representation of the AST rooted at x to w.
//
// This is function is intended for debug purposes when inspecting ASTs.
-func Pretty(w io.Writer, x interface{}) {
- pp := &prettyPrinter{
- depth: -1,
- w: w,
- }
- NewBeforeAfterVisitor(pp.Before, pp.After).Walk(x)
-}
-
-type prettyPrinter struct {
- depth int
- w io.Writer
-}
-
-func (pp *prettyPrinter) Before(x interface{}) bool {
- switch x.(type) {
- case *Term:
- default:
- pp.depth++
- }
-
- switch x := x.(type) {
- case *Term:
- return false
- case Args:
- if len(x) == 0 {
- return false
- }
- pp.writeType(x)
- case *Expr:
- extras := []string{}
- if x.Negated {
- extras = append(extras, "negated")
- }
- extras = append(extras, fmt.Sprintf("index=%d", x.Index))
- pp.writeIndent("%v %v", TypeName(x), strings.Join(extras, " "))
- case Null, Boolean, Number, String, Var:
- pp.writeValue(x)
- default:
- pp.writeType(x)
- }
- return false
-}
-
-func (pp *prettyPrinter) After(x interface{}) {
- switch x.(type) {
- case *Term:
- default:
- pp.depth--
- }
-}
-
-func (pp *prettyPrinter) writeValue(x interface{}) {
- pp.writeIndent(fmt.Sprint(x))
-}
-
-func (pp *prettyPrinter) writeType(x interface{}) {
- pp.writeIndent(TypeName(x))
-}
-
-func (pp *prettyPrinter) writeIndent(f string, a ...interface{}) {
- pad := strings.Repeat(" ", pp.depth)
- pp.write(pad+f, a...)
-}
-
-func (pp *prettyPrinter) write(f string, a ...interface{}) {
- fmt.Fprintf(pp.w, f+"\n", a...)
+func Pretty(w io.Writer, x any) {
+ v1.Pretty(w, x)
}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/schema.go b/vendor/github.com/open-policy-agent/opa/ast/schema.go
index 8c96ac624e..979958a3c0 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/schema.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/schema.go
@@ -5,59 +5,13 @@
package ast
import (
- "fmt"
-
- "github.com/open-policy-agent/opa/types"
- "github.com/open-policy-agent/opa/util"
+ v1 "github.com/open-policy-agent/opa/v1/ast"
)
// SchemaSet holds a map from a path to a schema.
-type SchemaSet struct {
- m *util.HashMap
-}
+type SchemaSet = v1.SchemaSet
// NewSchemaSet returns an empty SchemaSet.
func NewSchemaSet() *SchemaSet {
-
- eqFunc := func(a, b util.T) bool {
- return a.(Ref).Equal(b.(Ref))
- }
-
- hashFunc := func(x util.T) int { return x.(Ref).Hash() }
-
- return &SchemaSet{
- m: util.NewHashMap(eqFunc, hashFunc),
- }
-}
-
-// Put inserts a raw schema into the set.
-func (ss *SchemaSet) Put(path Ref, raw interface{}) {
- ss.m.Put(path, raw)
-}
-
-// Get returns the raw schema identified by the path.
-func (ss *SchemaSet) Get(path Ref) interface{} {
- if ss == nil {
- return nil
- }
- x, ok := ss.m.Get(path)
- if !ok {
- return nil
- }
- return x
-}
-
-func loadSchema(raw interface{}, allowNet []string) (types.Type, error) {
-
- jsonSchema, err := compileSchema(raw, allowNet)
- if err != nil {
- return nil, err
- }
-
- tpe, err := newSchemaParser().parseSchema(jsonSchema.RootSchema)
- if err != nil {
- return nil, fmt.Errorf("type checking: %w", err)
- }
-
- return tpe, nil
+ return v1.NewSchemaSet()
}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/strings.go b/vendor/github.com/open-policy-agent/opa/ast/strings.go
index e489f6977c..c2c81de8b7 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/strings.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/strings.go
@@ -5,14 +5,10 @@
package ast
import (
- "reflect"
- "strings"
+ v1 "github.com/open-policy-agent/opa/v1/ast"
)
// TypeName returns a human readable name for the AST element type.
-func TypeName(x interface{}) string {
- if _, ok := x.(*lazyObj); ok {
- return "object"
- }
- return strings.ToLower(reflect.Indirect(reflect.ValueOf(x)).Type().Name())
+func TypeName(x any) string {
+ return v1.TypeName(x)
}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/term.go b/vendor/github.com/open-policy-agent/opa/ast/term.go
index ce8ee4853d..202355070f 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/term.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/term.go
@@ -1,40 +1,22 @@
-// Copyright 2016 The OPA Authors. All rights reserved.
+// Copyright 2024 The OPA Authors. All rights reserved.
// Use of this source code is governed by an Apache2
// license that can be found in the LICENSE file.
-// nolint: deadcode // Public API.
package ast
import (
- "bytes"
"encoding/json"
- "errors"
- "fmt"
"io"
- "math"
- "math/big"
- "net/url"
- "regexp"
- "sort"
- "strconv"
- "strings"
- "sync"
-
- "github.com/OneOfOne/xxhash"
-
- astJSON "github.com/open-policy-agent/opa/ast/json"
- "github.com/open-policy-agent/opa/ast/location"
- "github.com/open-policy-agent/opa/util"
-)
-var errFindNotFound = fmt.Errorf("find: not found")
+ v1 "github.com/open-policy-agent/opa/v1/ast"
+)
// Location records a position in source code.
-type Location = location.Location
+type Location = v1.Location
// NewLocation returns a new Location object.
func NewLocation(text []byte, file string, row int, col int) *Location {
- return location.NewLocation(text, file, row, col)
+ return v1.NewLocation(text, file, row, col)
}
// Value declares the common interface for all Term values. Every kind of Term value
@@ -45,3230 +27,280 @@ func NewLocation(text []byte, file string, row int, col int) *Location {
// - Variables, References
// - Array, Set, and Object Comprehensions
// - Calls
-type Value interface {
- Compare(other Value) int // Compare returns <0, 0, or >0 if this Value is less than, equal to, or greater than other, respectively.
- Find(path Ref) (Value, error) // Find returns value referred to by path or an error if path is not found.
- Hash() int // Returns hash code of the value.
- IsGround() bool // IsGround returns true if this value is not a variable or contains no variables.
- String() string // String returns a human readable string representation of the value.
-}
+type Value = v1.Value
// InterfaceToValue converts a native Go value x to a Value.
-func InterfaceToValue(x interface{}) (Value, error) {
- switch x := x.(type) {
- case nil:
- return Null{}, nil
- case bool:
- return Boolean(x), nil
- case json.Number:
- return Number(x), nil
- case int64:
- return int64Number(x), nil
- case uint64:
- return uint64Number(x), nil
- case float64:
- return floatNumber(x), nil
- case int:
- return intNumber(x), nil
- case string:
- return String(x), nil
- case []interface{}:
- r := make([]*Term, len(x))
- for i, e := range x {
- e, err := InterfaceToValue(e)
- if err != nil {
- return nil, err
- }
- r[i] = &Term{Value: e}
- }
- return NewArray(r...), nil
- case map[string]interface{}:
- r := newobject(len(x))
- for k, v := range x {
- k, err := InterfaceToValue(k)
- if err != nil {
- return nil, err
- }
- v, err := InterfaceToValue(v)
- if err != nil {
- return nil, err
- }
- r.Insert(NewTerm(k), NewTerm(v))
- }
- return r, nil
- case map[string]string:
- r := newobject(len(x))
- for k, v := range x {
- k, err := InterfaceToValue(k)
- if err != nil {
- return nil, err
- }
- v, err := InterfaceToValue(v)
- if err != nil {
- return nil, err
- }
- r.Insert(NewTerm(k), NewTerm(v))
- }
- return r, nil
- default:
- ptr := util.Reference(x)
- if err := util.RoundTrip(ptr); err != nil {
- return nil, fmt.Errorf("ast: interface conversion: %w", err)
- }
- return InterfaceToValue(*ptr)
- }
+func InterfaceToValue(x any) (Value, error) {
+ return v1.InterfaceToValue(x)
}
// ValueFromReader returns an AST value from a JSON serialized value in the reader.
func ValueFromReader(r io.Reader) (Value, error) {
- var x interface{}
- if err := util.NewJSONDecoder(r).Decode(&x); err != nil {
- return nil, err
- }
- return InterfaceToValue(x)
+ return v1.ValueFromReader(r)
}
// As converts v into a Go native type referred to by x.
-func As(v Value, x interface{}) error {
- return util.NewJSONDecoder(bytes.NewBufferString(v.String())).Decode(x)
+func As(v Value, x any) error {
+ return v1.As(v, x)
}
// Resolver defines the interface for resolving references to native Go values.
-type Resolver interface {
- Resolve(Ref) (interface{}, error)
-}
+type Resolver = v1.Resolver
// ValueResolver defines the interface for resolving references to AST values.
-type ValueResolver interface {
- Resolve(Ref) (Value, error)
-}
+type ValueResolver = v1.ValueResolver
// UnknownValueErr indicates a ValueResolver was unable to resolve a reference
// because the reference refers to an unknown value.
-type UnknownValueErr struct{}
-
-func (UnknownValueErr) Error() string {
- return "unknown value"
-}
+type UnknownValueErr = v1.UnknownValueErr
// IsUnknownValueErr returns true if the err is an UnknownValueErr.
func IsUnknownValueErr(err error) bool {
- _, ok := err.(UnknownValueErr)
- return ok
-}
-
-type illegalResolver struct{}
-
-func (illegalResolver) Resolve(ref Ref) (interface{}, error) {
- return nil, fmt.Errorf("illegal value: %v", ref)
+ return v1.IsUnknownValueErr(err)
}
// ValueToInterface returns the Go representation of an AST value. The AST
// value should not contain any values that require evaluation (e.g., vars,
// comprehensions, etc.)
-func ValueToInterface(v Value, resolver Resolver) (interface{}, error) {
- return valueToInterface(v, resolver, JSONOpt{})
-}
-
-func valueToInterface(v Value, resolver Resolver, opt JSONOpt) (interface{}, error) {
- switch v := v.(type) {
- case Null:
- return nil, nil
- case Boolean:
- return bool(v), nil
- case Number:
- return json.Number(v), nil
- case String:
- return string(v), nil
- case *Array:
- buf := []interface{}{}
- for i := 0; i < v.Len(); i++ {
- x1, err := valueToInterface(v.Elem(i).Value, resolver, opt)
- if err != nil {
- return nil, err
- }
- buf = append(buf, x1)
- }
- return buf, nil
- case *object:
- buf := make(map[string]interface{}, v.Len())
- err := v.Iter(func(k, v *Term) error {
- ki, err := valueToInterface(k.Value, resolver, opt)
- if err != nil {
- return err
- }
- var str string
- var ok bool
- if str, ok = ki.(string); !ok {
- var buf bytes.Buffer
- if err := json.NewEncoder(&buf).Encode(ki); err != nil {
- return err
- }
- str = strings.TrimSpace(buf.String())
- }
- vi, err := valueToInterface(v.Value, resolver, opt)
- if err != nil {
- return err
- }
- buf[str] = vi
- return nil
- })
- if err != nil {
- return nil, err
- }
- return buf, nil
- case *lazyObj:
- if opt.CopyMaps {
- return valueToInterface(v.force(), resolver, opt)
- }
- return v.native, nil
- case Set:
- buf := []interface{}{}
- iter := func(x *Term) error {
- x1, err := valueToInterface(x.Value, resolver, opt)
- if err != nil {
- return err
- }
- buf = append(buf, x1)
- return nil
- }
- var err error
- if opt.SortSets {
- err = v.Sorted().Iter(iter)
- } else {
- err = v.Iter(iter)
- }
- if err != nil {
- return nil, err
- }
- return buf, nil
- case Ref:
- return resolver.Resolve(v)
- default:
- return nil, fmt.Errorf("%v requires evaluation", TypeName(v))
- }
+func ValueToInterface(v Value, resolver Resolver) (any, error) {
+ return v1.ValueToInterface(v, resolver)
}
// JSON returns the JSON representation of v. The value must not contain any
// refs or terms that require evaluation (e.g., vars, comprehensions, etc.)
-func JSON(v Value) (interface{}, error) {
- return JSONWithOpt(v, JSONOpt{})
+func JSON(v Value) (any, error) {
+ return v1.JSON(v)
}
// JSONOpt defines parameters for AST to JSON conversion.
-type JSONOpt struct {
- SortSets bool // sort sets before serializing (this makes conversion more expensive)
- CopyMaps bool // enforces copying of map[string]interface{} read from the store
-}
+type JSONOpt = v1.JSONOpt
// JSONWithOpt returns the JSON representation of v. The value must not contain any
// refs or terms that require evaluation (e.g., vars, comprehensions, etc.)
-func JSONWithOpt(v Value, opt JSONOpt) (interface{}, error) {
- return valueToInterface(v, illegalResolver{}, opt)
+func JSONWithOpt(v Value, opt JSONOpt) (any, error) {
+ return v1.JSONWithOpt(v, opt)
}
// MustJSON returns the JSON representation of v. The value must not contain any
// refs or terms that require evaluation (e.g., vars, comprehensions, etc.) If
// the conversion fails, this function will panic. This function is mostly for
// test purposes.
-func MustJSON(v Value) interface{} {
- r, err := JSON(v)
- if err != nil {
- panic(err)
- }
- return r
+func MustJSON(v Value) any {
+ return v1.MustJSON(v)
}
// MustInterfaceToValue converts a native Go value x to a Value. If the
// conversion fails, this function will panic. This function is mostly for test
// purposes.
-func MustInterfaceToValue(x interface{}) Value {
- v, err := InterfaceToValue(x)
- if err != nil {
- panic(err)
- }
- return v
+func MustInterfaceToValue(x any) Value {
+ return v1.MustInterfaceToValue(x)
}
// Term is an argument to a function.
-type Term struct {
- Value Value `json:"value"` // the value of the Term as represented in Go
- Location *Location `json:"location,omitempty"` // the location of the Term in the source
-
- jsonOptions astJSON.Options
-}
+type Term = v1.Term
// NewTerm returns a new Term object.
func NewTerm(v Value) *Term {
- return &Term{
- Value: v,
- }
-}
-
-// SetLocation updates the term's Location and returns the term itself.
-func (term *Term) SetLocation(loc *Location) *Term {
- term.Location = loc
- return term
-}
-
-// Loc returns the Location of term.
-func (term *Term) Loc() *Location {
- if term == nil {
- return nil
- }
- return term.Location
-}
-
-// SetLoc sets the location on term.
-func (term *Term) SetLoc(loc *Location) {
- term.SetLocation(loc)
-}
-
-// Copy returns a deep copy of term.
-func (term *Term) Copy() *Term {
-
- if term == nil {
- return nil
- }
-
- cpy := *term
-
- switch v := term.Value.(type) {
- case Null, Boolean, Number, String, Var:
- cpy.Value = v
- case Ref:
- cpy.Value = v.Copy()
- case *Array:
- cpy.Value = v.Copy()
- case Set:
- cpy.Value = v.Copy()
- case *object:
- cpy.Value = v.Copy()
- case *ArrayComprehension:
- cpy.Value = v.Copy()
- case *ObjectComprehension:
- cpy.Value = v.Copy()
- case *SetComprehension:
- cpy.Value = v.Copy()
- case Call:
- cpy.Value = v.Copy()
- }
-
- return &cpy
-}
-
-// Equal returns true if this term equals the other term. Equality is
-// defined for each kind of term.
-func (term *Term) Equal(other *Term) bool {
- if term == nil && other != nil {
- return false
- }
- if term != nil && other == nil {
- return false
- }
- if term == other {
- return true
- }
-
- // TODO(tsandall): This early-exit avoids allocations for types that have
- // Equal() functions that just use == underneath. We should revisit the
- // other types and implement Equal() functions that do not require
- // allocations.
- switch v := term.Value.(type) {
- case Null:
- return v.Equal(other.Value)
- case Boolean:
- return v.Equal(other.Value)
- case Number:
- return v.Equal(other.Value)
- case String:
- return v.Equal(other.Value)
- case Var:
- return v.Equal(other.Value)
- }
-
- return term.Value.Compare(other.Value) == 0
-}
-
-// Get returns a value referred to by name from the term.
-func (term *Term) Get(name *Term) *Term {
- switch v := term.Value.(type) {
- case *object:
- return v.Get(name)
- case *Array:
- return v.Get(name)
- case interface {
- Get(*Term) *Term
- }:
- return v.Get(name)
- case Set:
- if v.Contains(name) {
- return name
- }
- }
- return nil
-}
-
-// Hash returns the hash code of the Term's Value. Its Location
-// is ignored.
-func (term *Term) Hash() int {
- return term.Value.Hash()
-}
-
-// IsGround returns true if this term's Value is ground.
-func (term *Term) IsGround() bool {
- return term.Value.IsGround()
-}
-
-func (term *Term) setJSONOptions(opts astJSON.Options) {
- term.jsonOptions = opts
- if term.Location != nil {
- term.Location.JSONOptions = opts
- }
-}
-
-// MarshalJSON returns the JSON encoding of the term.
-//
-// Specialized marshalling logic is required to include a type hint for Value.
-func (term *Term) MarshalJSON() ([]byte, error) {
- d := map[string]interface{}{
- "type": TypeName(term.Value),
- "value": term.Value,
- }
- if term.jsonOptions.MarshalOptions.IncludeLocation.Term {
- if term.Location != nil {
- d["location"] = term.Location
- }
- }
- return json.Marshal(d)
-}
-
-func (term *Term) String() string {
- return term.Value.String()
-}
-
-// UnmarshalJSON parses the byte array and stores the result in term.
-// Specialized unmarshalling is required to handle Value and Location.
-func (term *Term) UnmarshalJSON(bs []byte) error {
- v := map[string]interface{}{}
- if err := util.UnmarshalJSON(bs, &v); err != nil {
- return err
- }
- val, err := unmarshalValue(v)
- if err != nil {
- return err
- }
- term.Value = val
-
- if loc, ok := v["location"].(map[string]interface{}); ok {
- term.Location = &Location{}
- err := unmarshalLocation(term.Location, loc)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// Vars returns a VarSet with variables contained in this term.
-func (term *Term) Vars() VarSet {
- vis := &VarVisitor{vars: VarSet{}}
- vis.Walk(term)
- return vis.vars
+ return v1.NewTerm(v)
}
// IsConstant returns true if the AST value is constant.
func IsConstant(v Value) bool {
- found := false
- vis := GenericVisitor{
- func(x interface{}) bool {
- switch x.(type) {
- case Var, Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call:
- found = true
- return true
- }
- return false
- },
- }
- vis.Walk(v)
- return !found
+ return v1.IsConstant(v)
}
// IsComprehension returns true if the supplied value is a comprehension.
func IsComprehension(x Value) bool {
- switch x.(type) {
- case *ArrayComprehension, *ObjectComprehension, *SetComprehension:
- return true
- }
- return false
+ return v1.IsComprehension(x)
}
// ContainsRefs returns true if the Value v contains refs.
-func ContainsRefs(v interface{}) bool {
- found := false
- WalkRefs(v, func(Ref) bool {
- found = true
- return found
- })
- return found
+func ContainsRefs(v any) bool {
+ return v1.ContainsRefs(v)
}
// ContainsComprehensions returns true if the Value v contains comprehensions.
-func ContainsComprehensions(v interface{}) bool {
- found := false
- WalkClosures(v, func(x interface{}) bool {
- switch x.(type) {
- case *ArrayComprehension, *ObjectComprehension, *SetComprehension:
- found = true
- return found
- }
- return found
- })
- return found
+func ContainsComprehensions(v any) bool {
+ return v1.ContainsComprehensions(v)
}
// ContainsClosures returns true if the Value v contains closures.
-func ContainsClosures(v interface{}) bool {
- found := false
- WalkClosures(v, func(x interface{}) bool {
- switch x.(type) {
- case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *Every:
- found = true
- return found
- }
- return found
- })
- return found
+func ContainsClosures(v any) bool {
+ return v1.ContainsClosures(v)
}
// IsScalar returns true if the AST value is a scalar.
func IsScalar(v Value) bool {
- switch v.(type) {
- case String:
- return true
- case Number:
- return true
- case Boolean:
- return true
- case Null:
- return true
- }
- return false
+ return v1.IsScalar(v)
}
// Null represents the null value defined by JSON.
-type Null struct{}
+type Null = v1.Null
// NullTerm creates a new Term with a Null value.
func NullTerm() *Term {
- return &Term{Value: Null{}}
-}
-
-// Equal returns true if the other term Value is also Null.
-func (null Null) Equal(other Value) bool {
- switch other.(type) {
- case Null:
- return true
- default:
- return false
- }
-}
-
-// Compare compares null to other, return <0, 0, or >0 if it is less than, equal to,
-// or greater than other.
-func (null Null) Compare(other Value) int {
- return Compare(null, other)
-}
-
-// Find returns the current value or a not found error.
-func (null Null) Find(path Ref) (Value, error) {
- if len(path) == 0 {
- return null, nil
- }
- return nil, errFindNotFound
-}
-
-// Hash returns the hash code for the Value.
-func (null Null) Hash() int {
- return 0
-}
-
-// IsGround always returns true.
-func (Null) IsGround() bool {
- return true
-}
-
-func (null Null) String() string {
- return "null"
+ return v1.NullTerm()
}
// Boolean represents a boolean value defined by JSON.
-type Boolean bool
+type Boolean = v1.Boolean
// BooleanTerm creates a new Term with a Boolean value.
func BooleanTerm(b bool) *Term {
- return &Term{Value: Boolean(b)}
-}
-
-// Equal returns true if the other Value is a Boolean and is equal.
-func (bol Boolean) Equal(other Value) bool {
- switch other := other.(type) {
- case Boolean:
- return bol == other
- default:
- return false
- }
-}
-
-// Compare compares bol to other, return <0, 0, or >0 if it is less than, equal to,
-// or greater than other.
-func (bol Boolean) Compare(other Value) int {
- return Compare(bol, other)
-}
-
-// Find returns the current value or a not found error.
-func (bol Boolean) Find(path Ref) (Value, error) {
- if len(path) == 0 {
- return bol, nil
- }
- return nil, errFindNotFound
-}
-
-// Hash returns the hash code for the Value.
-func (bol Boolean) Hash() int {
- if bol {
- return 1
- }
- return 0
-}
-
-// IsGround always returns true.
-func (Boolean) IsGround() bool {
- return true
-}
-
-func (bol Boolean) String() string {
- return strconv.FormatBool(bool(bol))
+ return v1.BooleanTerm(b)
}
// Number represents a numeric value as defined by JSON.
-type Number json.Number
+type Number = v1.Number
// NumberTerm creates a new Term with a Number value.
func NumberTerm(n json.Number) *Term {
- return &Term{Value: Number(n)}
+ return v1.NumberTerm(n)
}
// IntNumberTerm creates a new Term with an integer Number value.
func IntNumberTerm(i int) *Term {
- return &Term{Value: Number(strconv.Itoa(i))}
+ return v1.IntNumberTerm(i)
}
// UIntNumberTerm creates a new Term with an unsigned integer Number value.
func UIntNumberTerm(u uint64) *Term {
- return &Term{Value: uint64Number(u)}
+ return v1.UIntNumberTerm(u)
}
// FloatNumberTerm creates a new Term with a floating point Number value.
func FloatNumberTerm(f float64) *Term {
- s := strconv.FormatFloat(f, 'g', -1, 64)
- return &Term{Value: Number(s)}
-}
-
-// Equal returns true if the other Value is a Number and is equal.
-func (num Number) Equal(other Value) bool {
- switch other := other.(type) {
- case Number:
- return Compare(num, other) == 0
- default:
- return false
- }
-}
-
-// Compare compares num to other, return <0, 0, or >0 if it is less than, equal to,
-// or greater than other.
-func (num Number) Compare(other Value) int {
- return Compare(num, other)
-}
-
-// Find returns the current value or a not found error.
-func (num Number) Find(path Ref) (Value, error) {
- if len(path) == 0 {
- return num, nil
- }
- return nil, errFindNotFound
-}
-
-// Hash returns the hash code for the Value.
-func (num Number) Hash() int {
- f, err := json.Number(num).Float64()
- if err != nil {
- bs := []byte(num)
- h := xxhash.Checksum64(bs)
- return int(h)
- }
- return int(f)
-}
-
-// Int returns the int representation of num if possible.
-func (num Number) Int() (int, bool) {
- i64, ok := num.Int64()
- return int(i64), ok
-}
-
-// Int64 returns the int64 representation of num if possible.
-func (num Number) Int64() (int64, bool) {
- i, err := json.Number(num).Int64()
- if err != nil {
- return 0, false
- }
- return i, true
-}
-
-// Float64 returns the float64 representation of num if possible.
-func (num Number) Float64() (float64, bool) {
- f, err := json.Number(num).Float64()
- if err != nil {
- return 0, false
- }
- return f, true
-}
-
-// IsGround always returns true.
-func (Number) IsGround() bool {
- return true
-}
-
-// MarshalJSON returns JSON encoded bytes representing num.
-func (num Number) MarshalJSON() ([]byte, error) {
- return json.Marshal(json.Number(num))
-}
-
-func (num Number) String() string {
- return string(num)
-}
-
-func intNumber(i int) Number {
- return Number(strconv.Itoa(i))
-}
-
-func int64Number(i int64) Number {
- return Number(strconv.FormatInt(i, 10))
-}
-
-func uint64Number(u uint64) Number {
- return Number(strconv.FormatUint(u, 10))
-}
-
-func floatNumber(f float64) Number {
- return Number(strconv.FormatFloat(f, 'g', -1, 64))
+ return v1.FloatNumberTerm(f)
}
// String represents a string value as defined by JSON.
-type String string
+type String = v1.String
// StringTerm creates a new Term with a String value.
func StringTerm(s string) *Term {
- return &Term{Value: String(s)}
-}
-
-// Equal returns true if the other Value is a String and is equal.
-func (str String) Equal(other Value) bool {
- switch other := other.(type) {
- case String:
- return str == other
- default:
- return false
- }
-}
-
-// Compare compares str to other, return <0, 0, or >0 if it is less than, equal to,
-// or greater than other.
-func (str String) Compare(other Value) int {
- return Compare(str, other)
-}
-
-// Find returns the current value or a not found error.
-func (str String) Find(path Ref) (Value, error) {
- if len(path) == 0 {
- return str, nil
- }
- return nil, errFindNotFound
-}
-
-// IsGround always returns true.
-func (String) IsGround() bool {
- return true
-}
-
-func (str String) String() string {
- return strconv.Quote(string(str))
-}
-
-// Hash returns the hash code for the Value.
-func (str String) Hash() int {
- h := xxhash.ChecksumString64S(string(str), hashSeed0)
- return int(h)
+ return v1.StringTerm(s)
}
// Var represents a variable as defined by the language.
-type Var string
+type Var = v1.Var
// VarTerm creates a new Term with a Variable value.
func VarTerm(v string) *Term {
- return &Term{Value: Var(v)}
-}
-
-// Equal returns true if the other Value is a Variable and has the same value
-// (name).
-func (v Var) Equal(other Value) bool {
- switch other := other.(type) {
- case Var:
- return v == other
- default:
- return false
- }
-}
-
-// Compare compares v to other, return <0, 0, or >0 if it is less than, equal to,
-// or greater than other.
-func (v Var) Compare(other Value) int {
- return Compare(v, other)
-}
-
-// Find returns the current value or a not found error.
-func (v Var) Find(path Ref) (Value, error) {
- if len(path) == 0 {
- return v, nil
- }
- return nil, errFindNotFound
-}
-
-// Hash returns the hash code for the Value.
-func (v Var) Hash() int {
- h := xxhash.ChecksumString64S(string(v), hashSeed0)
- return int(h)
-}
-
-// IsGround always returns false.
-func (Var) IsGround() bool {
- return false
-}
-
-// IsWildcard returns true if this is a wildcard variable.
-func (v Var) IsWildcard() bool {
- return strings.HasPrefix(string(v), WildcardPrefix)
-}
-
-// IsGenerated returns true if this variable was generated during compilation.
-func (v Var) IsGenerated() bool {
- return strings.HasPrefix(string(v), "__local")
-}
-
-func (v Var) String() string {
- // Special case for wildcard so that string representation is parseable. The
- // parser mangles wildcard variables to make their names unique and uses an
- // illegal variable name character (WildcardPrefix) to avoid conflicts. When
- // we serialize the variable here, we need to make sure it's parseable.
- if v.IsWildcard() {
- return Wildcard.String()
- }
- return string(v)
+ return v1.VarTerm(v)
}
// Ref represents a reference as defined by the language.
-type Ref []*Term
+type Ref = v1.Ref
// EmptyRef returns a new, empty reference.
func EmptyRef() Ref {
- return Ref([]*Term{})
+ return v1.EmptyRef()
}
// PtrRef returns a new reference against the head for the pointer
// s. Path components in the pointer are unescaped.
func PtrRef(head *Term, s string) (Ref, error) {
- s = strings.Trim(s, "/")
- if s == "" {
- return Ref{head}, nil
- }
- parts := strings.Split(s, "/")
- if maxLen := math.MaxInt32; len(parts) >= maxLen {
- return nil, fmt.Errorf("path too long: %s, %d > %d (max)", s, len(parts), maxLen)
- }
- ref := make(Ref, uint(len(parts))+1)
- ref[0] = head
- for i := 0; i < len(parts); i++ {
- var err error
- parts[i], err = url.PathUnescape(parts[i])
- if err != nil {
- return nil, err
- }
- ref[i+1] = StringTerm(parts[i])
- }
- return ref, nil
+ return v1.PtrRef(head, s)
}
// RefTerm creates a new Term with a Ref value.
func RefTerm(r ...*Term) *Term {
- return &Term{Value: Ref(r)}
-}
-
-// Append returns a copy of ref with the term appended to the end.
-func (ref Ref) Append(term *Term) Ref {
- n := len(ref)
- dst := make(Ref, n+1)
- copy(dst, ref)
- dst[n] = term
- return dst
-}
-
-// Insert returns a copy of the ref with x inserted at pos. If pos < len(ref),
-// existing elements are shifted to the right. If pos > len(ref)+1 this
-// function panics.
-func (ref Ref) Insert(x *Term, pos int) Ref {
- switch {
- case pos == len(ref):
- return ref.Append(x)
- case pos > len(ref)+1:
- panic("illegal index")
- }
- cpy := make(Ref, len(ref)+1)
- copy(cpy, ref[:pos])
- cpy[pos] = x
- copy(cpy[pos+1:], ref[pos:])
- return cpy
-}
-
-// Extend returns a copy of ref with the terms from other appended. The head of
-// other will be converted to a string.
-func (ref Ref) Extend(other Ref) Ref {
- dst := make(Ref, len(ref)+len(other))
- copy(dst, ref)
-
- head := other[0].Copy()
- head.Value = String(head.Value.(Var))
- offset := len(ref)
- dst[offset] = head
-
- copy(dst[offset+1:], other[1:])
- return dst
-}
-
-// Concat returns a ref with the terms appended.
-func (ref Ref) Concat(terms []*Term) Ref {
- if len(terms) == 0 {
- return ref
- }
- cpy := make(Ref, len(ref)+len(terms))
- copy(cpy, ref)
- copy(cpy[len(ref):], terms)
- return cpy
-}
-
-// Dynamic returns the offset of the first non-constant operand of ref.
-func (ref Ref) Dynamic() int {
- switch ref[0].Value.(type) {
- case Call:
- return 0
- }
- for i := 1; i < len(ref); i++ {
- if !IsConstant(ref[i].Value) {
- return i
- }
- }
- return -1
-}
-
-// Copy returns a deep copy of ref.
-func (ref Ref) Copy() Ref {
- return termSliceCopy(ref)
-}
-
-// Equal returns true if ref is equal to other.
-func (ref Ref) Equal(other Value) bool {
- return Compare(ref, other) == 0
-}
-
-// Compare compares ref to other, return <0, 0, or >0 if it is less than, equal to,
-// or greater than other.
-func (ref Ref) Compare(other Value) int {
- return Compare(ref, other)
-}
-
-// Find returns the current value or a "not found" error.
-func (ref Ref) Find(path Ref) (Value, error) {
- if len(path) == 0 {
- return ref, nil
- }
- return nil, errFindNotFound
-}
-
-// Hash returns the hash code for the Value.
-func (ref Ref) Hash() int {
- return termSliceHash(ref)
-}
-
-// HasPrefix returns true if the other ref is a prefix of this ref.
-func (ref Ref) HasPrefix(other Ref) bool {
- if len(other) > len(ref) {
- return false
- }
- for i := range other {
- if !ref[i].Equal(other[i]) {
- return false
- }
- }
- return true
-}
-
-// ConstantPrefix returns the constant portion of the ref starting from the head.
-func (ref Ref) ConstantPrefix() Ref {
- ref = ref.Copy()
-
- i := ref.Dynamic()
- if i < 0 {
- return ref
- }
- return ref[:i]
-}
-
-func (ref Ref) StringPrefix() Ref {
- r := ref.Copy()
-
- for i := 1; i < len(ref); i++ {
- switch r[i].Value.(type) {
- case String: // pass
- default: // cut off
- return r[:i]
- }
- }
-
- return r
-}
-
-// GroundPrefix returns the ground portion of the ref starting from the head. By
-// definition, the head of the reference is always ground.
-func (ref Ref) GroundPrefix() Ref {
- prefix := make(Ref, 0, len(ref))
-
- for i, x := range ref {
- if i > 0 && !x.IsGround() {
- break
- }
- prefix = append(prefix, x)
- }
-
- return prefix
-}
-
-func (ref Ref) DynamicSuffix() Ref {
- i := ref.Dynamic()
- if i < 0 {
- return nil
- }
- return ref[i:]
-}
-
-// IsGround returns true if all of the parts of the Ref are ground.
-func (ref Ref) IsGround() bool {
- if len(ref) == 0 {
- return true
- }
- return termSliceIsGround(ref[1:])
-}
-
-// IsNested returns true if this ref contains other Refs.
-func (ref Ref) IsNested() bool {
- for _, x := range ref {
- if _, ok := x.Value.(Ref); ok {
- return true
- }
- }
- return false
-}
-
-// Ptr returns a slash-separated path string for this ref. If the ref
-// contains non-string terms this function returns an error. Path
-// components are escaped.
-func (ref Ref) Ptr() (string, error) {
- parts := make([]string, 0, len(ref)-1)
- for _, term := range ref[1:] {
- if str, ok := term.Value.(String); ok {
- parts = append(parts, url.PathEscape(string(str)))
- } else {
- return "", fmt.Errorf("invalid path value type")
- }
- }
- return strings.Join(parts, "/"), nil
+ return v1.RefTerm(r...)
}
-var varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$")
-
func IsVarCompatibleString(s string) bool {
- return varRegexp.MatchString(s)
-}
-
-func (ref Ref) String() string {
- if len(ref) == 0 {
- return ""
- }
- buf := []string{ref[0].Value.String()}
- path := ref[1:]
- for _, p := range path {
- switch p := p.Value.(type) {
- case String:
- str := string(p)
- if varRegexp.MatchString(str) && len(buf) > 0 && !IsKeyword(str) {
- buf = append(buf, "."+str)
- } else {
- buf = append(buf, "["+p.String()+"]")
- }
- default:
- buf = append(buf, "["+p.String()+"]")
- }
- }
- return strings.Join(buf, "")
-}
-
-// OutputVars returns a VarSet containing variables that would be bound by evaluating
-// this expression in isolation.
-func (ref Ref) OutputVars() VarSet {
- vis := NewVarVisitor().WithParams(VarVisitorParams{SkipRefHead: true})
- vis.Walk(ref)
- return vis.Vars()
-}
-
-func (ref Ref) toArray() *Array {
- a := NewArray()
- for _, term := range ref {
- if _, ok := term.Value.(String); ok {
- a = a.Append(term)
- } else {
- a = a.Append(StringTerm(term.Value.String()))
- }
- }
- return a
+ return v1.IsVarCompatibleString(s)
}
// QueryIterator defines the interface for querying AST documents with references.
-type QueryIterator func(map[Var]Value, Value) error
+type QueryIterator = v1.QueryIterator
// ArrayTerm creates a new Term with an Array value.
func ArrayTerm(a ...*Term) *Term {
- return NewTerm(NewArray(a...))
+ return v1.ArrayTerm(a...)
}
// NewArray creates an Array with the terms provided. The array will
// use the provided term slice.
func NewArray(a ...*Term) *Array {
- hs := make([]int, len(a))
- for i, e := range a {
- hs[i] = e.Value.Hash()
- }
- arr := &Array{elems: a, hashs: hs, ground: termSliceIsGround(a)}
- arr.rehash()
- return arr
+ return v1.NewArray(a...)
}
// Array represents an array as defined by the language. Arrays are similar to the
// same types as defined by JSON with the exception that they can contain Vars
// and References.
-type Array struct {
- elems []*Term
- hashs []int // element hashes
- hash int
- ground bool
-}
-
-// Copy returns a deep copy of arr.
-func (arr *Array) Copy() *Array {
- cpy := make([]int, len(arr.elems))
- copy(cpy, arr.hashs)
- return &Array{
- elems: termSliceCopy(arr.elems),
- hashs: cpy,
- hash: arr.hash,
- ground: arr.IsGround()}
-}
-
-// Equal returns true if arr is equal to other.
-func (arr *Array) Equal(other Value) bool {
- return Compare(arr, other) == 0
-}
-
-// Compare compares arr to other, return <0, 0, or >0 if it is less than, equal to,
-// or greater than other.
-func (arr *Array) Compare(other Value) int {
- return Compare(arr, other)
-}
-
-// Find returns the value at the index or an out-of-range error.
-func (arr *Array) Find(path Ref) (Value, error) {
- if len(path) == 0 {
- return arr, nil
- }
- num, ok := path[0].Value.(Number)
- if !ok {
- return nil, errFindNotFound
- }
- i, ok := num.Int()
- if !ok {
- return nil, errFindNotFound
- }
- if i < 0 || i >= arr.Len() {
- return nil, errFindNotFound
- }
- return arr.Elem(i).Value.Find(path[1:])
-}
-
-// Get returns the element at pos or nil if not possible.
-func (arr *Array) Get(pos *Term) *Term {
- num, ok := pos.Value.(Number)
- if !ok {
- return nil
- }
-
- i, ok := num.Int()
- if !ok {
- return nil
- }
-
- if i >= 0 && i < len(arr.elems) {
- return arr.elems[i]
- }
-
- return nil
-}
-
-// Sorted returns a new Array that contains the sorted elements of arr.
-func (arr *Array) Sorted() *Array {
- cpy := make([]*Term, len(arr.elems))
- for i := range cpy {
- cpy[i] = arr.elems[i]
- }
- sort.Sort(termSlice(cpy))
- a := NewArray(cpy...)
- a.hashs = arr.hashs
- return a
-}
-
-// Hash returns the hash code for the Value.
-func (arr *Array) Hash() int {
- return arr.hash
-}
-
-// IsGround returns true if all of the Array elements are ground.
-func (arr *Array) IsGround() bool {
- return arr.ground
-}
-
-// MarshalJSON returns JSON encoded bytes representing arr.
-func (arr *Array) MarshalJSON() ([]byte, error) {
- if len(arr.elems) == 0 {
- return []byte(`[]`), nil
- }
- return json.Marshal(arr.elems)
-}
-
-func (arr *Array) String() string {
- var b strings.Builder
- b.WriteRune('[')
- for i, e := range arr.elems {
- if i > 0 {
- b.WriteString(", ")
- }
- b.WriteString(e.String())
- }
- b.WriteRune(']')
- return b.String()
-}
-
-// Len returns the number of elements in the array.
-func (arr *Array) Len() int {
- return len(arr.elems)
-}
-
-// Elem returns the element i of arr.
-func (arr *Array) Elem(i int) *Term {
- return arr.elems[i]
-}
-
-// Set sets the element i of arr.
-func (arr *Array) Set(i int, v *Term) {
- arr.set(i, v)
-}
-
-// rehash updates the cached hash of arr.
-func (arr *Array) rehash() {
- arr.hash = 0
- for _, h := range arr.hashs {
- arr.hash += h
- }
-}
-
-// set sets the element i of arr.
-func (arr *Array) set(i int, v *Term) {
- arr.ground = arr.ground && v.IsGround()
- arr.elems[i] = v
- arr.hashs[i] = v.Value.Hash()
- arr.rehash()
-}
-
-// Slice returns a slice of arr starting from i index to j. -1
-// indicates the end of the array. The returned value array is not a
-// copy and any modifications to either of arrays may be reflected to
-// the other.
-func (arr *Array) Slice(i, j int) *Array {
- var elems []*Term
- var hashs []int
- if j == -1 {
- elems = arr.elems[i:]
- hashs = arr.hashs[i:]
- } else {
- elems = arr.elems[i:j]
- hashs = arr.hashs[i:j]
- }
- // If arr is ground, the slice is, too.
- // If it's not, the slice could still be.
- gr := arr.ground || termSliceIsGround(elems)
-
- s := &Array{elems: elems, hashs: hashs, ground: gr}
- s.rehash()
- return s
-}
-
-// Iter calls f on each element in arr. If f returns an error,
-// iteration stops and the return value is the error.
-func (arr *Array) Iter(f func(*Term) error) error {
- for i := range arr.elems {
- if err := f(arr.elems[i]); err != nil {
- return err
- }
- }
- return nil
-}
-
-// Until calls f on each element in arr. If f returns true, iteration stops.
-func (arr *Array) Until(f func(*Term) bool) bool {
- err := arr.Iter(func(t *Term) error {
- if f(t) {
- return errStop
- }
- return nil
- })
- return err != nil
-}
-
-// Foreach calls f on each element in arr.
-func (arr *Array) Foreach(f func(*Term)) {
- _ = arr.Iter(func(t *Term) error {
- f(t)
- return nil
- }) // ignore error
-}
-
-// Append appends a term to arr, returning the appended array.
-func (arr *Array) Append(v *Term) *Array {
- cpy := *arr
- cpy.elems = append(arr.elems, v)
- cpy.hashs = append(arr.hashs, v.Value.Hash())
- cpy.hash = arr.hash + v.Value.Hash()
- cpy.ground = arr.ground && v.IsGround()
- return &cpy
-}
+type Array = v1.Array
// Set represents a set as defined by the language.
-type Set interface {
- Value
- Len() int
- Copy() Set
- Diff(Set) Set
- Intersect(Set) Set
- Union(Set) Set
- Add(*Term)
- Iter(func(*Term) error) error
- Until(func(*Term) bool) bool
- Foreach(func(*Term))
- Contains(*Term) bool
- Map(func(*Term) (*Term, error)) (Set, error)
- Reduce(*Term, func(*Term, *Term) (*Term, error)) (*Term, error)
- Sorted() *Array
- Slice() []*Term
-}
+type Set = v1.Set
// NewSet returns a new Set containing t.
func NewSet(t ...*Term) Set {
- s := newset(len(t))
- for i := range t {
- s.Add(t[i])
- }
- return s
+ return v1.NewSet(t...)
}
-func newset(n int) *set {
- var keys []*Term
- if n > 0 {
- keys = make([]*Term, 0, n)
- }
- return &set{
- elems: make(map[int]*Term, n),
- keys: keys,
- hash: 0,
- ground: true,
- sortGuard: new(sync.Once),
- }
-}
-
-// SetTerm returns a new Term representing a set containing terms t.
func SetTerm(t ...*Term) *Term {
- set := NewSet(t...)
- return &Term{
- Value: set,
- }
-}
-
-type set struct {
- elems map[int]*Term
- keys []*Term
- hash int
- ground bool
- sortGuard *sync.Once // Prevents race condition around sorting.
-}
-
-// Copy returns a deep copy of s.
-func (s *set) Copy() Set {
- cpy := newset(s.Len())
- s.Foreach(func(x *Term) {
- cpy.Add(x.Copy())
- })
- cpy.hash = s.hash
- cpy.ground = s.ground
- return cpy
-}
-
-// IsGround returns true if all terms in s are ground.
-func (s *set) IsGround() bool {
- return s.ground
-}
-
-// Hash returns a hash code for s.
-func (s *set) Hash() int {
- return s.hash
-}
-
-func (s *set) String() string {
- if s.Len() == 0 {
- return "set()"
- }
- var b strings.Builder
- b.WriteRune('{')
- for i := range s.sortedKeys() {
- if i > 0 {
- b.WriteString(", ")
- }
- b.WriteString(s.keys[i].Value.String())
- }
- b.WriteRune('}')
- return b.String()
-}
-
-func (s *set) sortedKeys() []*Term {
- s.sortGuard.Do(func() {
- sort.Sort(termSlice(s.keys))
- })
- return s.keys
-}
-
-// Compare compares s to other, return <0, 0, or >0 if it is less than, equal to,
-// or greater than other.
-func (s *set) Compare(other Value) int {
- o1 := sortOrder(s)
- o2 := sortOrder(other)
- if o1 < o2 {
- return -1
- } else if o1 > o2 {
- return 1
- }
- t := other.(*set)
- return termSliceCompare(s.sortedKeys(), t.sortedKeys())
-}
-
-// Find returns the set or dereferences the element itself.
-func (s *set) Find(path Ref) (Value, error) {
- if len(path) == 0 {
- return s, nil
- }
- if !s.Contains(path[0]) {
- return nil, errFindNotFound
- }
- return path[0].Value.Find(path[1:])
-}
-
-// Diff returns elements in s that are not in other.
-func (s *set) Diff(other Set) Set {
- r := NewSet()
- s.Foreach(func(x *Term) {
- if !other.Contains(x) {
- r.Add(x)
- }
- })
- return r
-}
-
-// Intersect returns the set containing elements in both s and other.
-func (s *set) Intersect(other Set) Set {
- o := other.(*set)
- n, m := s.Len(), o.Len()
- ss := s
- so := o
- if m < n {
- ss = o
- so = s
- n = m
- }
-
- r := newset(n)
- ss.Foreach(func(x *Term) {
- if so.Contains(x) {
- r.Add(x)
- }
- })
- return r
-}
-
-// Union returns the set containing all elements of s and other.
-func (s *set) Union(other Set) Set {
- r := NewSet()
- s.Foreach(func(x *Term) {
- r.Add(x)
- })
- other.Foreach(func(x *Term) {
- r.Add(x)
- })
- return r
-}
-
-// Add updates s to include t.
-func (s *set) Add(t *Term) {
- s.insert(t)
-}
-
-// Iter calls f on each element in s. If f returns an error, iteration stops
-// and the return value is the error.
-func (s *set) Iter(f func(*Term) error) error {
- for i := range s.sortedKeys() {
- if err := f(s.keys[i]); err != nil {
- return err
- }
- }
- return nil
-}
-
-var errStop = errors.New("stop")
-
-// Until calls f on each element in s. If f returns true, iteration stops.
-func (s *set) Until(f func(*Term) bool) bool {
- err := s.Iter(func(t *Term) error {
- if f(t) {
- return errStop
- }
- return nil
- })
- return err != nil
-}
-
-// Foreach calls f on each element in s.
-func (s *set) Foreach(f func(*Term)) {
- _ = s.Iter(func(t *Term) error {
- f(t)
- return nil
- }) // ignore error
-}
-
-// Map returns a new Set obtained by applying f to each value in s.
-func (s *set) Map(f func(*Term) (*Term, error)) (Set, error) {
- set := NewSet()
- err := s.Iter(func(x *Term) error {
- term, err := f(x)
- if err != nil {
- return err
- }
- set.Add(term)
- return nil
- })
- if err != nil {
- return nil, err
- }
- return set, nil
-}
-
-// Reduce returns a Term produced by applying f to each value in s. The first
-// argument to f is the reduced value (starting with i) and the second argument
-// to f is the element in s.
-func (s *set) Reduce(i *Term, f func(*Term, *Term) (*Term, error)) (*Term, error) {
- err := s.Iter(func(x *Term) error {
- var err error
- i, err = f(i, x)
- if err != nil {
- return err
- }
- return nil
- })
- return i, err
-}
-
-// Contains returns true if t is in s.
-func (s *set) Contains(t *Term) bool {
- return s.get(t) != nil
-}
-
-// Len returns the number of elements in the set.
-func (s *set) Len() int {
- return len(s.keys)
-}
-
-// MarshalJSON returns JSON encoded bytes representing s.
-func (s *set) MarshalJSON() ([]byte, error) {
- if s.keys == nil {
- return []byte(`[]`), nil
- }
- return json.Marshal(s.sortedKeys())
-}
-
-// Sorted returns an Array that contains the sorted elements of s.
-func (s *set) Sorted() *Array {
- cpy := make([]*Term, len(s.keys))
- copy(cpy, s.sortedKeys())
- return NewArray(cpy...)
-}
-
-// Slice returns a slice of terms contained in the set.
-func (s *set) Slice() []*Term {
- return s.sortedKeys()
-}
-
-// NOTE(philipc): We assume a many-readers, single-writer model here.
-// This method should NOT be used concurrently, or else we risk data races.
-func (s *set) insert(x *Term) {
- hash := x.Hash()
- insertHash := hash
- // This `equal` utility is duplicated and manually inlined a number of
- // time in this file. Inlining it avoids heap allocations, so it makes
- // a big performance difference: some operations like lookup become twice
- // as slow without it.
- var equal func(v Value) bool
-
- switch x := x.Value.(type) {
- case Null, Boolean, String, Var:
- equal = func(y Value) bool { return x == y }
- case Number:
- if xi, err := json.Number(x).Int64(); err == nil {
- equal = func(y Value) bool {
- if y, ok := y.(Number); ok {
- if yi, err := json.Number(y).Int64(); err == nil {
- return xi == yi
- }
- }
-
- return false
- }
- break
- }
-
- // We use big.Rat for comparing big numbers.
- // It replaces big.Float due to following reason:
- // big.Float comes with a default precision of 64, and setting a
- // larger precision results in more memory being allocated
- // (regardless of the actual number we are parsing with SetString).
- //
- // Note: If we're so close to zero that big.Float says we are zero, do
- // *not* big.Rat).SetString on the original string it'll potentially
- // take very long.
- var a *big.Rat
- fa, ok := new(big.Float).SetString(string(x))
- if !ok {
- panic("illegal value")
- }
- if fa.IsInt() {
- if i, _ := fa.Int64(); i == 0 {
- a = new(big.Rat).SetInt64(0)
- }
- }
- if a == nil {
- a, ok = new(big.Rat).SetString(string(x))
- if !ok {
- panic("illegal value")
- }
- }
-
- equal = func(b Value) bool {
- if bNum, ok := b.(Number); ok {
- var b *big.Rat
- fb, ok := new(big.Float).SetString(string(bNum))
- if !ok {
- panic("illegal value")
- }
- if fb.IsInt() {
- if i, _ := fb.Int64(); i == 0 {
- b = new(big.Rat).SetInt64(0)
- }
- }
- if b == nil {
- b, ok = new(big.Rat).SetString(string(bNum))
- if !ok {
- panic("illegal value")
- }
- }
-
- return a.Cmp(b) == 0
- }
-
- return false
- }
- default:
- equal = func(y Value) bool { return Compare(x, y) == 0 }
- }
-
- for curr, ok := s.elems[insertHash]; ok; {
- if equal(curr.Value) {
- return
- }
-
- insertHash++
- curr, ok = s.elems[insertHash]
- }
-
- s.elems[insertHash] = x
- // O(1) insertion, but we'll have to re-sort the keys later.
- s.keys = append(s.keys, x)
- // Reset the sync.Once instance.
- // See https://github.com/golang/go/issues/25955 for why we do it this way.
- s.sortGuard = new(sync.Once)
-
- s.hash += hash
- s.ground = s.ground && x.IsGround()
-}
-
-func (s *set) get(x *Term) *Term {
- hash := x.Hash()
- // This `equal` utility is duplicated and manually inlined a number of
- // time in this file. Inlining it avoids heap allocations, so it makes
- // a big performance difference: some operations like lookup become twice
- // as slow without it.
- var equal func(v Value) bool
-
- switch x := x.Value.(type) {
- case Null, Boolean, String, Var:
- equal = func(y Value) bool { return x == y }
- case Number:
- if xi, err := json.Number(x).Int64(); err == nil {
- equal = func(y Value) bool {
- if y, ok := y.(Number); ok {
- if yi, err := json.Number(y).Int64(); err == nil {
- return xi == yi
- }
- }
-
- return false
- }
- break
- }
-
- // We use big.Rat for comparing big numbers.
- // It replaces big.Float due to following reason:
- // big.Float comes with a default precision of 64, and setting a
- // larger precision results in more memory being allocated
- // (regardless of the actual number we are parsing with SetString).
- //
- // Note: If we're so close to zero that big.Float says we are zero, do
- // *not* big.Rat).SetString on the original string it'll potentially
- // take very long.
- var a *big.Rat
- fa, ok := new(big.Float).SetString(string(x))
- if !ok {
- panic("illegal value")
- }
- if fa.IsInt() {
- if i, _ := fa.Int64(); i == 0 {
- a = new(big.Rat).SetInt64(0)
- }
- }
- if a == nil {
- a, ok = new(big.Rat).SetString(string(x))
- if !ok {
- panic("illegal value")
- }
- }
-
- equal = func(b Value) bool {
- if bNum, ok := b.(Number); ok {
- var b *big.Rat
- fb, ok := new(big.Float).SetString(string(bNum))
- if !ok {
- panic("illegal value")
- }
- if fb.IsInt() {
- if i, _ := fb.Int64(); i == 0 {
- b = new(big.Rat).SetInt64(0)
- }
- }
- if b == nil {
- b, ok = new(big.Rat).SetString(string(bNum))
- if !ok {
- panic("illegal value")
- }
- }
-
- return a.Cmp(b) == 0
- }
- return false
-
- }
-
- default:
- equal = func(y Value) bool { return Compare(x, y) == 0 }
- }
-
- for curr, ok := s.elems[hash]; ok; {
- if equal(curr.Value) {
- return curr
- }
-
- hash++
- curr, ok = s.elems[hash]
- }
- return nil
+ return v1.SetTerm(t...)
}
// Object represents an object as defined by the language.
-type Object interface {
- Value
- Len() int
- Get(*Term) *Term
- Copy() Object
- Insert(*Term, *Term)
- Iter(func(*Term, *Term) error) error
- Until(func(*Term, *Term) bool) bool
- Foreach(func(*Term, *Term))
- Map(func(*Term, *Term) (*Term, *Term, error)) (Object, error)
- Diff(other Object) Object
- Intersect(other Object) [][3]*Term
- Merge(other Object) (Object, bool)
- MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool)
- Filter(filter Object) (Object, error)
- Keys() []*Term
- KeysIterator() ObjectKeysIterator
- get(k *Term) *objectElem // To prevent external implementations
-}
+type Object = v1.Object
// NewObject creates a new Object with t.
func NewObject(t ...[2]*Term) Object {
- obj := newobject(len(t))
- for i := range t {
- obj.Insert(t[i][0], t[i][1])
- }
- return obj
+ return v1.NewObject(t...)
}
// ObjectTerm creates a new Term with an Object value.
func ObjectTerm(o ...[2]*Term) *Term {
- return &Term{Value: NewObject(o...)}
-}
-
-func LazyObject(blob map[string]interface{}) Object {
- return &lazyObj{native: blob, cache: map[string]Value{}}
-}
-
-type lazyObj struct {
- strict Object
- cache map[string]Value
- native map[string]interface{}
-}
-
-func (l *lazyObj) force() Object {
- if l.strict == nil {
- l.strict = MustInterfaceToValue(l.native).(Object)
- // NOTE(jf): a possible performance improvement here would be to check how many
- // entries have been realized to AST in the cache, and if some threshold compared to the
- // total number of keys is exceeded, realize the remaining entries and set l.strict to l.cache.
- l.cache = map[string]Value{} // We don't need the cache anymore; drop it to free up memory.
- }
- return l.strict
-}
-
-func (l *lazyObj) Compare(other Value) int {
- o1 := sortOrder(l)
- o2 := sortOrder(other)
- if o1 < o2 {
- return -1
- } else if o2 < o1 {
- return 1
- }
- return l.force().Compare(other)
-}
-
-func (l *lazyObj) Copy() Object {
- return l
-}
-
-func (l *lazyObj) Diff(other Object) Object {
- return l.force().Diff(other)
-}
-
-func (l *lazyObj) Intersect(other Object) [][3]*Term {
- return l.force().Intersect(other)
-}
-
-func (l *lazyObj) Iter(f func(*Term, *Term) error) error {
- return l.force().Iter(f)
-}
-
-func (l *lazyObj) Until(f func(*Term, *Term) bool) bool {
- // NOTE(sr): there could be benefits in not forcing here -- if we abort because
- // `f` returns true, we could save us from converting the rest of the object.
- return l.force().Until(f)
-}
-
-func (l *lazyObj) Foreach(f func(*Term, *Term)) {
- l.force().Foreach(f)
-}
-
-func (l *lazyObj) Filter(filter Object) (Object, error) {
- return l.force().Filter(filter)
-}
-
-func (l *lazyObj) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, error) {
- return l.force().Map(f)
-}
-
-func (l *lazyObj) MarshalJSON() ([]byte, error) {
- return l.force().(*object).MarshalJSON()
-}
-
-func (l *lazyObj) Merge(other Object) (Object, bool) {
- return l.force().Merge(other)
-}
-
-func (l *lazyObj) MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) {
- return l.force().MergeWith(other, conflictResolver)
-}
-
-func (l *lazyObj) Len() int {
- return len(l.native)
+ return v1.ObjectTerm(o...)
}
-func (l *lazyObj) String() string {
- return l.force().String()
+func LazyObject(blob map[string]any) Object {
+ return v1.LazyObject(blob)
}
-// get is merely there to implement the Object interface -- `get` there serves the
-// purpose of prohibiting external implementations. It's never called for lazyObj.
-func (*lazyObj) get(*Term) *objectElem {
- return nil
-}
-
-func (l *lazyObj) Get(k *Term) *Term {
- if l.strict != nil {
- return l.strict.Get(k)
- }
- if s, ok := k.Value.(String); ok {
- if v, ok := l.cache[string(s)]; ok {
- return NewTerm(v)
- }
-
- if val, ok := l.native[string(s)]; ok {
- var converted Value
- switch val := val.(type) {
- case map[string]interface{}:
- converted = LazyObject(val)
- default:
- converted = MustInterfaceToValue(val)
- }
- l.cache[string(s)] = converted
- return NewTerm(converted)
- }
- }
- return nil
-}
-
-func (l *lazyObj) Insert(k, v *Term) {
- l.force().Insert(k, v)
-}
-
-func (*lazyObj) IsGround() bool {
- return true
-}
-
-func (l *lazyObj) Hash() int {
- return l.force().Hash()
-}
-
-func (l *lazyObj) Keys() []*Term {
- if l.strict != nil {
- return l.strict.Keys()
- }
- ret := make([]*Term, 0, len(l.native))
- for k := range l.native {
- ret = append(ret, StringTerm(k))
- }
- sort.Sort(termSlice(ret))
- return ret
-}
-
-func (l *lazyObj) KeysIterator() ObjectKeysIterator {
- return &lazyObjKeysIterator{keys: l.Keys()}
-}
-
-type lazyObjKeysIterator struct {
- current int
- keys []*Term
-}
-
-func (ki *lazyObjKeysIterator) Next() (*Term, bool) {
- if ki.current == len(ki.keys) {
- return nil, false
- }
- ki.current++
- return ki.keys[ki.current-1], true
-}
-
-func (l *lazyObj) Find(path Ref) (Value, error) {
- if l.strict != nil {
- return l.strict.Find(path)
- }
- if len(path) == 0 {
- return l, nil
- }
- if p0, ok := path[0].Value.(String); ok {
- if v, ok := l.cache[string(p0)]; ok {
- return v.Find(path[1:])
- }
-
- if v, ok := l.native[string(p0)]; ok {
- var converted Value
- switch v := v.(type) {
- case map[string]interface{}:
- converted = LazyObject(v)
- default:
- converted = MustInterfaceToValue(v)
- }
- l.cache[string(p0)] = converted
- return converted.Find(path[1:])
- }
- }
- return nil, errFindNotFound
-}
-
-type object struct {
- elems map[int]*objectElem
- keys objectElemSlice
- ground int // number of key and value grounds. Counting is
- // required to support insert's key-value replace.
- hash int
- sortGuard *sync.Once // Prevents race condition around sorting.
-}
-
-func newobject(n int) *object {
- var keys objectElemSlice
- if n > 0 {
- keys = make(objectElemSlice, 0, n)
- }
- return &object{
- elems: make(map[int]*objectElem, n),
- keys: keys,
- ground: 0,
- hash: 0,
- sortGuard: new(sync.Once),
- }
-}
-
-type objectElem struct {
- key *Term
- value *Term
- next *objectElem
-}
-
-type objectElemSlice []*objectElem
-
-func (s objectElemSlice) Less(i, j int) bool { return Compare(s[i].key.Value, s[j].key.Value) < 0 }
-func (s objectElemSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x }
-func (s objectElemSlice) Len() int { return len(s) }
-
// Item is a helper for constructing an tuple containing two Terms
// representing a key/value pair in an Object.
func Item(key, value *Term) [2]*Term {
- return [2]*Term{key, value}
-}
-
-func (obj *object) sortedKeys() objectElemSlice {
- obj.sortGuard.Do(func() {
- sort.Sort(obj.keys)
- })
- return obj.keys
-}
-
-// Compare compares obj to other, return <0, 0, or >0 if it is less than, equal to,
-// or greater than other.
-func (obj *object) Compare(other Value) int {
- if x, ok := other.(*lazyObj); ok {
- other = x.force()
- }
- o1 := sortOrder(obj)
- o2 := sortOrder(other)
- if o1 < o2 {
- return -1
- } else if o2 < o1 {
- return 1
- }
- a := obj
- b := other.(*object)
- // Ensure that keys are in canonical sorted order before use!
- akeys := a.sortedKeys()
- bkeys := b.sortedKeys()
- minLen := len(akeys)
- if len(b.keys) < len(akeys) {
- minLen = len(bkeys)
- }
- for i := 0; i < minLen; i++ {
- keysCmp := Compare(akeys[i].key, bkeys[i].key)
- if keysCmp < 0 {
- return -1
- }
- if keysCmp > 0 {
- return 1
- }
- valA := akeys[i].value
- valB := bkeys[i].value
- valCmp := Compare(valA, valB)
- if valCmp != 0 {
- return valCmp
- }
- }
- if len(akeys) < len(bkeys) {
- return -1
- }
- if len(bkeys) < len(akeys) {
- return 1
- }
- return 0
-}
-
-// Find returns the value at the key or undefined.
-func (obj *object) Find(path Ref) (Value, error) {
- if len(path) == 0 {
- return obj, nil
- }
- value := obj.Get(path[0])
- if value == nil {
- return nil, errFindNotFound
- }
- return value.Value.Find(path[1:])
-}
-
-func (obj *object) Insert(k, v *Term) {
- obj.insert(k, v)
-}
-
-// Get returns the value of k in obj if k exists, otherwise nil.
-func (obj *object) Get(k *Term) *Term {
- if elem := obj.get(k); elem != nil {
- return elem.value
- }
- return nil
-}
-
-// Hash returns the hash code for the Value.
-func (obj *object) Hash() int {
- return obj.hash
-}
-
-// IsGround returns true if all of the Object key/value pairs are ground.
-func (obj *object) IsGround() bool {
- return obj.ground == 2*len(obj.keys)
-}
-
-// Copy returns a deep copy of obj.
-func (obj *object) Copy() Object {
- cpy, _ := obj.Map(func(k, v *Term) (*Term, *Term, error) {
- return k.Copy(), v.Copy(), nil
- })
- cpy.(*object).hash = obj.hash
- return cpy
-}
-
-// Diff returns a new Object that contains only the key/value pairs that exist in obj.
-func (obj *object) Diff(other Object) Object {
- r := NewObject()
- obj.Foreach(func(k, v *Term) {
- if other.Get(k) == nil {
- r.Insert(k, v)
- }
- })
- return r
-}
-
-// Intersect returns a slice of term triplets that represent the intersection of keys
-// between obj and other. For each intersecting key, the values from obj and other are included
-// as the last two terms in the triplet (respectively).
-func (obj *object) Intersect(other Object) [][3]*Term {
- r := [][3]*Term{}
- obj.Foreach(func(k, v *Term) {
- if v2 := other.Get(k); v2 != nil {
- r = append(r, [3]*Term{k, v, v2})
- }
- })
- return r
-}
-
-// Iter calls the function f for each key-value pair in the object. If f
-// returns an error, iteration stops and the error is returned.
-func (obj *object) Iter(f func(*Term, *Term) error) error {
- for _, node := range obj.sortedKeys() {
- if err := f(node.key, node.value); err != nil {
- return err
- }
- }
- return nil
-}
-
-// Until calls f for each key-value pair in the object. If f returns
-// true, iteration stops and Until returns true. Otherwise, return
-// false.
-func (obj *object) Until(f func(*Term, *Term) bool) bool {
- err := obj.Iter(func(k, v *Term) error {
- if f(k, v) {
- return errStop
- }
- return nil
- })
- return err != nil
-}
-
-// Foreach calls f for each key-value pair in the object.
-func (obj *object) Foreach(f func(*Term, *Term)) {
- _ = obj.Iter(func(k, v *Term) error {
- f(k, v)
- return nil
- }) // ignore error
-}
-
-// Map returns a new Object constructed by mapping each element in the object
-// using the function f.
-func (obj *object) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, error) {
- cpy := newobject(obj.Len())
- err := obj.Iter(func(k, v *Term) error {
- var err error
- k, v, err = f(k, v)
- if err != nil {
- return err
- }
- cpy.insert(k, v)
- return nil
- })
- if err != nil {
- return nil, err
- }
- return cpy, nil
-}
-
-// Keys returns the keys of obj.
-func (obj *object) Keys() []*Term {
- keys := make([]*Term, len(obj.keys))
-
- for i, elem := range obj.sortedKeys() {
- keys[i] = elem.key
- }
-
- return keys
-}
-
-// Returns an iterator over the obj's keys.
-func (obj *object) KeysIterator() ObjectKeysIterator {
- return newobjectKeysIterator(obj)
-}
-
-// MarshalJSON returns JSON encoded bytes representing obj.
-func (obj *object) MarshalJSON() ([]byte, error) {
- sl := make([][2]*Term, obj.Len())
- for i, node := range obj.sortedKeys() {
- sl[i] = Item(node.key, node.value)
- }
- return json.Marshal(sl)
-}
-
-// Merge returns a new Object containing the non-overlapping keys of obj and other. If there are
-// overlapping keys between obj and other, the values of associated with the keys are merged. Only
-// objects can be merged with other objects. If the values cannot be merged, the second turn value
-// will be false.
-func (obj object) Merge(other Object) (Object, bool) {
- return obj.MergeWith(other, func(v1, v2 *Term) (*Term, bool) {
- obj1, ok1 := v1.Value.(Object)
- obj2, ok2 := v2.Value.(Object)
- if !ok1 || !ok2 {
- return nil, true
- }
- obj3, ok := obj1.Merge(obj2)
- if !ok {
- return nil, true
- }
- return NewTerm(obj3), false
- })
-}
-
-// MergeWith returns a new Object containing the merged keys of obj and other.
-// If there are overlapping keys between obj and other, the conflictResolver
-// is called. The conflictResolver can return a merged value and a boolean
-// indicating if the merge has failed and should stop.
-func (obj object) MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) {
- result := NewObject()
- stop := obj.Until(func(k, v *Term) bool {
- v2 := other.Get(k)
- // The key didn't exist in other, keep the original value
- if v2 == nil {
- result.Insert(k, v)
- return false
- }
-
- // The key exists in both, resolve the conflict if possible
- merged, stop := conflictResolver(v, v2)
- if !stop {
- result.Insert(k, merged)
- }
- return stop
- })
-
- if stop {
- return nil, false
- }
-
- // Copy in any values from other for keys that don't exist in obj
- other.Foreach(func(k, v *Term) {
- if v2 := obj.Get(k); v2 == nil {
- result.Insert(k, v)
- }
- })
- return result, true
-}
-
-// Filter returns a new object from values in obj where the keys are
-// found in filter. Array indices for values can be specified as
-// number strings.
-func (obj *object) Filter(filter Object) (Object, error) {
- filtered, err := filterObject(obj, filter)
- if err != nil {
- return nil, err
- }
- return filtered.(Object), nil
-}
-
-// Len returns the number of elements in the object.
-func (obj object) Len() int {
- return len(obj.keys)
-}
-
-func (obj object) String() string {
- var b strings.Builder
- b.WriteRune('{')
-
- for i, elem := range obj.sortedKeys() {
- if i > 0 {
- b.WriteString(", ")
- }
- b.WriteString(elem.key.String())
- b.WriteString(": ")
- b.WriteString(elem.value.String())
- }
- b.WriteRune('}')
- return b.String()
-}
-
-func (obj *object) get(k *Term) *objectElem {
- hash := k.Hash()
-
- // This `equal` utility is duplicated and manually inlined a number of
- // time in this file. Inlining it avoids heap allocations, so it makes
- // a big performance difference: some operations like lookup become twice
- // as slow without it.
- var equal func(v Value) bool
-
- switch x := k.Value.(type) {
- case Null, Boolean, String, Var:
- equal = func(y Value) bool { return x == y }
- case Number:
- if xi, err := json.Number(x).Int64(); err == nil {
- equal = func(y Value) bool {
- if y, ok := y.(Number); ok {
- if yi, err := json.Number(y).Int64(); err == nil {
- return xi == yi
- }
- }
-
- return false
- }
- break
- }
-
- // We use big.Rat for comparing big numbers.
- // It replaces big.Float due to following reason:
- // big.Float comes with a default precision of 64, and setting a
- // larger precision results in more memory being allocated
- // (regardless of the actual number we are parsing with SetString).
- //
- // Note: If we're so close to zero that big.Float says we are zero, do
- // *not* big.Rat).SetString on the original string it'll potentially
- // take very long.
- var a *big.Rat
- fa, ok := new(big.Float).SetString(string(x))
- if !ok {
- panic("illegal value")
- }
- if fa.IsInt() {
- if i, _ := fa.Int64(); i == 0 {
- a = new(big.Rat).SetInt64(0)
- }
- }
- if a == nil {
- a, ok = new(big.Rat).SetString(string(x))
- if !ok {
- panic("illegal value")
- }
- }
-
- equal = func(b Value) bool {
- if bNum, ok := b.(Number); ok {
- var b *big.Rat
- fb, ok := new(big.Float).SetString(string(bNum))
- if !ok {
- panic("illegal value")
- }
- if fb.IsInt() {
- if i, _ := fb.Int64(); i == 0 {
- b = new(big.Rat).SetInt64(0)
- }
- }
- if b == nil {
- b, ok = new(big.Rat).SetString(string(bNum))
- if !ok {
- panic("illegal value")
- }
- }
-
- return a.Cmp(b) == 0
- }
-
- return false
- }
- default:
- equal = func(y Value) bool { return Compare(x, y) == 0 }
- }
-
- for curr := obj.elems[hash]; curr != nil; curr = curr.next {
- if equal(curr.key.Value) {
- return curr
- }
- }
- return nil
-}
-
-// NOTE(philipc): We assume a many-readers, single-writer model here.
-// This method should NOT be used concurrently, or else we risk data races.
-func (obj *object) insert(k, v *Term) {
- hash := k.Hash()
- head := obj.elems[hash]
- // This `equal` utility is duplicated and manually inlined a number of
- // time in this file. Inlining it avoids heap allocations, so it makes
- // a big performance difference: some operations like lookup become twice
- // as slow without it.
- var equal func(v Value) bool
-
- switch x := k.Value.(type) {
- case Null, Boolean, String, Var:
- equal = func(y Value) bool { return x == y }
- case Number:
- if xi, err := json.Number(x).Int64(); err == nil {
- equal = func(y Value) bool {
- if y, ok := y.(Number); ok {
- if yi, err := json.Number(y).Int64(); err == nil {
- return xi == yi
- }
- }
-
- return false
- }
- break
- }
-
- // We use big.Rat for comparing big numbers.
- // It replaces big.Float due to following reason:
- // big.Float comes with a default precision of 64, and setting a
- // larger precision results in more memory being allocated
- // (regardless of the actual number we are parsing with SetString).
- //
- // Note: If we're so close to zero that big.Float says we are zero, do
- // *not* big.Rat).SetString on the original string it'll potentially
- // take very long.
- var a *big.Rat
- fa, ok := new(big.Float).SetString(string(x))
- if !ok {
- panic("illegal value")
- }
- if fa.IsInt() {
- if i, _ := fa.Int64(); i == 0 {
- a = new(big.Rat).SetInt64(0)
- }
- }
- if a == nil {
- a, ok = new(big.Rat).SetString(string(x))
- if !ok {
- panic("illegal value")
- }
- }
-
- equal = func(b Value) bool {
- if bNum, ok := b.(Number); ok {
- var b *big.Rat
- fb, ok := new(big.Float).SetString(string(bNum))
- if !ok {
- panic("illegal value")
- }
- if fb.IsInt() {
- if i, _ := fb.Int64(); i == 0 {
- b = new(big.Rat).SetInt64(0)
- }
- }
- if b == nil {
- b, ok = new(big.Rat).SetString(string(bNum))
- if !ok {
- panic("illegal value")
- }
- }
-
- return a.Cmp(b) == 0
- }
-
- return false
- }
- default:
- equal = func(y Value) bool { return Compare(x, y) == 0 }
- }
-
- for curr := head; curr != nil; curr = curr.next {
- if equal(curr.key.Value) {
- // The ground bit of the value may change in
- // replace, hence adjust the counter per old
- // and new value.
-
- if curr.value.IsGround() {
- obj.ground--
- }
- if v.IsGround() {
- obj.ground++
- }
-
- curr.value = v
-
- obj.rehash()
- return
- }
- }
- elem := &objectElem{
- key: k,
- value: v,
- next: head,
- }
- obj.elems[hash] = elem
- // O(1) insertion, but we'll have to re-sort the keys later.
- obj.keys = append(obj.keys, elem)
- // Reset the sync.Once instance.
- // See https://github.com/golang/go/issues/25955 for why we do it this way.
- obj.sortGuard = new(sync.Once)
- obj.hash += hash + v.Hash()
-
- if k.IsGround() {
- obj.ground++
- }
- if v.IsGround() {
- obj.ground++
- }
-}
-
-func (obj *object) rehash() {
- // obj.keys is considered truth, from which obj.hash and obj.elems are recalculated.
-
- obj.hash = 0
- obj.elems = make(map[int]*objectElem, len(obj.keys))
-
- for _, elem := range obj.keys {
- hash := elem.key.Hash()
- obj.hash += hash + elem.value.Hash()
- obj.elems[hash] = elem
- }
-}
-
-func filterObject(o Value, filter Value) (Value, error) {
- if filter.Compare(Null{}) == 0 {
- return o, nil
- }
-
- filteredObj, ok := filter.(*object)
- if !ok {
- return nil, fmt.Errorf("invalid filter value %q, expected an object", filter)
- }
-
- switch v := o.(type) {
- case String, Number, Boolean, Null:
- return o, nil
- case *Array:
- values := NewArray()
- for i := 0; i < v.Len(); i++ {
- subFilter := filteredObj.Get(StringTerm(strconv.Itoa(i)))
- if subFilter != nil {
- filteredValue, err := filterObject(v.Elem(i).Value, subFilter.Value)
- if err != nil {
- return nil, err
- }
- values = values.Append(NewTerm(filteredValue))
- }
- }
- return values, nil
- case Set:
- values := NewSet()
- err := v.Iter(func(t *Term) error {
- if filteredObj.Get(t) != nil {
- filteredValue, err := filterObject(t.Value, filteredObj.Get(t).Value)
- if err != nil {
- return err
- }
- values.Add(NewTerm(filteredValue))
- }
- return nil
- })
- return values, err
- case *object:
- values := NewObject()
-
- iterObj := v
- other := filteredObj
- if v.Len() < filteredObj.Len() {
- iterObj = filteredObj
- other = v
- }
-
- err := iterObj.Iter(func(key *Term, _ *Term) error {
- if other.Get(key) != nil {
- filteredValue, err := filterObject(v.Get(key).Value, filteredObj.Get(key).Value)
- if err != nil {
- return err
- }
- values.Insert(key, NewTerm(filteredValue))
- }
- return nil
- })
- return values, err
- default:
- return nil, fmt.Errorf("invalid object value type %q", v)
- }
+ return v1.Item(key, value)
}
// NOTE(philipc): The only way to get an ObjectKeyIterator should be
// from an Object. This ensures that the iterator can have implementation-
// specific details internally, with no contracts except to the very
// limited interface.
-type ObjectKeysIterator interface {
- Next() (*Term, bool)
-}
-
-type objectKeysIterator struct {
- obj *object
- numKeys int
- index int
-}
-
-func newobjectKeysIterator(o *object) ObjectKeysIterator {
- return &objectKeysIterator{
- obj: o,
- numKeys: o.Len(),
- index: 0,
- }
-}
-
-func (oki *objectKeysIterator) Next() (*Term, bool) {
- if oki.index == oki.numKeys || oki.numKeys == 0 {
- return nil, false
- }
- oki.index++
- return oki.obj.sortedKeys()[oki.index-1].key, true
-}
+type ObjectKeysIterator = v1.ObjectKeysIterator
// ArrayComprehension represents an array comprehension as defined in the language.
-type ArrayComprehension struct {
- Term *Term `json:"term"`
- Body Body `json:"body"`
-}
+type ArrayComprehension = v1.ArrayComprehension
// ArrayComprehensionTerm creates a new Term with an ArrayComprehension value.
func ArrayComprehensionTerm(term *Term, body Body) *Term {
- return &Term{
- Value: &ArrayComprehension{
- Term: term,
- Body: body,
- },
- }
-}
-
-// Copy returns a deep copy of ac.
-func (ac *ArrayComprehension) Copy() *ArrayComprehension {
- cpy := *ac
- cpy.Body = ac.Body.Copy()
- cpy.Term = ac.Term.Copy()
- return &cpy
-}
-
-// Equal returns true if ac is equal to other.
-func (ac *ArrayComprehension) Equal(other Value) bool {
- return Compare(ac, other) == 0
-}
-
-// Compare compares ac to other, return <0, 0, or >0 if it is less than, equal to,
-// or greater than other.
-func (ac *ArrayComprehension) Compare(other Value) int {
- return Compare(ac, other)
-}
-
-// Find returns the current value or a not found error.
-func (ac *ArrayComprehension) Find(path Ref) (Value, error) {
- if len(path) == 0 {
- return ac, nil
- }
- return nil, errFindNotFound
-}
-
-// Hash returns the hash code of the Value.
-func (ac *ArrayComprehension) Hash() int {
- return ac.Term.Hash() + ac.Body.Hash()
-}
-
-// IsGround returns true if the Term and Body are ground.
-func (ac *ArrayComprehension) IsGround() bool {
- return ac.Term.IsGround() && ac.Body.IsGround()
-}
-
-func (ac *ArrayComprehension) String() string {
- return "[" + ac.Term.String() + " | " + ac.Body.String() + "]"
+ return v1.ArrayComprehensionTerm(term, body)
}
// ObjectComprehension represents an object comprehension as defined in the language.
-type ObjectComprehension struct {
- Key *Term `json:"key"`
- Value *Term `json:"value"`
- Body Body `json:"body"`
-}
+type ObjectComprehension = v1.ObjectComprehension
// ObjectComprehensionTerm creates a new Term with an ObjectComprehension value.
func ObjectComprehensionTerm(key, value *Term, body Body) *Term {
- return &Term{
- Value: &ObjectComprehension{
- Key: key,
- Value: value,
- Body: body,
- },
- }
-}
-
-// Copy returns a deep copy of oc.
-func (oc *ObjectComprehension) Copy() *ObjectComprehension {
- cpy := *oc
- cpy.Body = oc.Body.Copy()
- cpy.Key = oc.Key.Copy()
- cpy.Value = oc.Value.Copy()
- return &cpy
-}
-
-// Equal returns true if oc is equal to other.
-func (oc *ObjectComprehension) Equal(other Value) bool {
- return Compare(oc, other) == 0
-}
-
-// Compare compares oc to other, return <0, 0, or >0 if it is less than, equal to,
-// or greater than other.
-func (oc *ObjectComprehension) Compare(other Value) int {
- return Compare(oc, other)
-}
-
-// Find returns the current value or a not found error.
-func (oc *ObjectComprehension) Find(path Ref) (Value, error) {
- if len(path) == 0 {
- return oc, nil
- }
- return nil, errFindNotFound
-}
-
-// Hash returns the hash code of the Value.
-func (oc *ObjectComprehension) Hash() int {
- return oc.Key.Hash() + oc.Value.Hash() + oc.Body.Hash()
-}
-
-// IsGround returns true if the Key, Value and Body are ground.
-func (oc *ObjectComprehension) IsGround() bool {
- return oc.Key.IsGround() && oc.Value.IsGround() && oc.Body.IsGround()
-}
-
-func (oc *ObjectComprehension) String() string {
- return "{" + oc.Key.String() + ": " + oc.Value.String() + " | " + oc.Body.String() + "}"
+ return v1.ObjectComprehensionTerm(key, value, body)
}
// SetComprehension represents a set comprehension as defined in the language.
-type SetComprehension struct {
- Term *Term `json:"term"`
- Body Body `json:"body"`
-}
+type SetComprehension = v1.SetComprehension
// SetComprehensionTerm creates a new Term with an SetComprehension value.
func SetComprehensionTerm(term *Term, body Body) *Term {
- return &Term{
- Value: &SetComprehension{
- Term: term,
- Body: body,
- },
- }
-}
-
-// Copy returns a deep copy of sc.
-func (sc *SetComprehension) Copy() *SetComprehension {
- cpy := *sc
- cpy.Body = sc.Body.Copy()
- cpy.Term = sc.Term.Copy()
- return &cpy
-}
-
-// Equal returns true if sc is equal to other.
-func (sc *SetComprehension) Equal(other Value) bool {
- return Compare(sc, other) == 0
-}
-
-// Compare compares sc to other, return <0, 0, or >0 if it is less than, equal to,
-// or greater than other.
-func (sc *SetComprehension) Compare(other Value) int {
- return Compare(sc, other)
-}
-
-// Find returns the current value or a not found error.
-func (sc *SetComprehension) Find(path Ref) (Value, error) {
- if len(path) == 0 {
- return sc, nil
- }
- return nil, errFindNotFound
-}
-
-// Hash returns the hash code of the Value.
-func (sc *SetComprehension) Hash() int {
- return sc.Term.Hash() + sc.Body.Hash()
-}
-
-// IsGround returns true if the Term and Body are ground.
-func (sc *SetComprehension) IsGround() bool {
- return sc.Term.IsGround() && sc.Body.IsGround()
-}
-
-func (sc *SetComprehension) String() string {
- return "{" + sc.Term.String() + " | " + sc.Body.String() + "}"
+ return v1.SetComprehensionTerm(term, body)
}
// Call represents as function call in the language.
-type Call []*Term
+type Call = v1.Call
// CallTerm returns a new Term with a Call value defined by terms. The first
// term is the operator and the rest are operands.
func CallTerm(terms ...*Term) *Term {
- return NewTerm(Call(terms))
-}
-
-// Copy returns a deep copy of c.
-func (c Call) Copy() Call {
- return termSliceCopy(c)
-}
-
-// Compare compares c to other, return <0, 0, or >0 if it is less than, equal to,
-// or greater than other.
-func (c Call) Compare(other Value) int {
- return Compare(c, other)
-}
-
-// Find returns the current value or a not found error.
-func (c Call) Find(Ref) (Value, error) {
- return nil, errFindNotFound
-}
-
-// Hash returns the hash code for the Value.
-func (c Call) Hash() int {
- return termSliceHash(c)
-}
-
-// IsGround returns true if the Value is ground.
-func (c Call) IsGround() bool {
- return termSliceIsGround(c)
-}
-
-// MakeExpr returns an ew Expr from this call.
-func (c Call) MakeExpr(output *Term) *Expr {
- terms := []*Term(c)
- return NewExpr(append(terms, output))
-}
-
-func (c Call) String() string {
- args := make([]string, len(c)-1)
- for i := 1; i < len(c); i++ {
- args[i-1] = c[i].String()
- }
- return fmt.Sprintf("%v(%v)", c[0], strings.Join(args, ", "))
-}
-
-func termSliceCopy(a []*Term) []*Term {
- cpy := make([]*Term, len(a))
- for i := range a {
- cpy[i] = a[i].Copy()
- }
- return cpy
-}
-
-func termSliceEqual(a, b []*Term) bool {
- if len(a) == len(b) {
- for i := range a {
- if !a[i].Equal(b[i]) {
- return false
- }
- }
- return true
- }
- return false
-}
-
-func termSliceHash(a []*Term) int {
- var hash int
- for _, v := range a {
- hash += v.Value.Hash()
- }
- return hash
-}
-
-func termSliceIsGround(a []*Term) bool {
- for _, v := range a {
- if !v.IsGround() {
- return false
- }
- }
- return true
-}
-
-// NOTE(tsandall): The unmarshalling errors in these functions are not
-// helpful for callers because they do not identify the source of the
-// unmarshalling error. Because OPA doesn't accept JSON describing ASTs
-// from callers, this is acceptable (for now). If that changes in the future,
-// the error messages should be revisited. The current approach focuses
-// on the happy path and treats all errors the same. If better error
-// reporting is needed, the error paths will need to be fleshed out.
-
-func unmarshalBody(b []interface{}) (Body, error) {
- buf := Body{}
- for _, e := range b {
- if m, ok := e.(map[string]interface{}); ok {
- expr := &Expr{}
- if err := unmarshalExpr(expr, m); err == nil {
- buf = append(buf, expr)
- continue
- }
- }
- goto unmarshal_error
- }
- return buf, nil
-unmarshal_error:
- return nil, fmt.Errorf("ast: unable to unmarshal body")
-}
-
-func unmarshalExpr(expr *Expr, v map[string]interface{}) error {
- if x, ok := v["negated"]; ok {
- if b, ok := x.(bool); ok {
- expr.Negated = b
- } else {
- return fmt.Errorf("ast: unable to unmarshal negated field with type: %T (expected true or false)", v["negated"])
- }
- }
- if generatedRaw, ok := v["generated"]; ok {
- if b, ok := generatedRaw.(bool); ok {
- expr.Generated = b
- } else {
- return fmt.Errorf("ast: unable to unmarshal generated field with type: %T (expected true or false)", v["generated"])
- }
- }
-
- if err := unmarshalExprIndex(expr, v); err != nil {
- return err
- }
- switch ts := v["terms"].(type) {
- case map[string]interface{}:
- t, err := unmarshalTerm(ts)
- if err != nil {
- return err
- }
- expr.Terms = t
- case []interface{}:
- terms, err := unmarshalTermSlice(ts)
- if err != nil {
- return err
- }
- expr.Terms = terms
- default:
- return fmt.Errorf(`ast: unable to unmarshal terms field with type: %T (expected {"value": ..., "type": ...} or [{"value": ..., "type": ...}, ...])`, v["terms"])
- }
- if x, ok := v["with"]; ok {
- if sl, ok := x.([]interface{}); ok {
- ws := make([]*With, len(sl))
- for i := range sl {
- var err error
- ws[i], err = unmarshalWith(sl[i])
- if err != nil {
- return err
- }
- }
- expr.With = ws
- }
- }
- if loc, ok := v["location"].(map[string]interface{}); ok {
- expr.Location = &Location{}
- if err := unmarshalLocation(expr.Location, loc); err != nil {
- return err
- }
- }
- return nil
-}
-
-func unmarshalLocation(loc *Location, v map[string]interface{}) error {
- if x, ok := v["file"]; ok {
- if s, ok := x.(string); ok {
- loc.File = s
- } else {
- return fmt.Errorf("ast: unable to unmarshal file field with type: %T (expected string)", v["file"])
- }
- }
- if x, ok := v["row"]; ok {
- if n, ok := x.(json.Number); ok {
- i64, err := n.Int64()
- if err != nil {
- return err
- }
- loc.Row = int(i64)
- } else {
- return fmt.Errorf("ast: unable to unmarshal row field with type: %T (expected number)", v["row"])
- }
- }
- if x, ok := v["col"]; ok {
- if n, ok := x.(json.Number); ok {
- i64, err := n.Int64()
- if err != nil {
- return err
- }
- loc.Col = int(i64)
- } else {
- return fmt.Errorf("ast: unable to unmarshal col field with type: %T (expected number)", v["col"])
- }
- }
-
- return nil
-}
-
-func unmarshalExprIndex(expr *Expr, v map[string]interface{}) error {
- if x, ok := v["index"]; ok {
- if n, ok := x.(json.Number); ok {
- i, err := n.Int64()
- if err == nil {
- expr.Index = int(i)
- return nil
- }
- }
- }
- return fmt.Errorf("ast: unable to unmarshal index field with type: %T (expected integer)", v["index"])
-}
-
-func unmarshalTerm(m map[string]interface{}) (*Term, error) {
- var term Term
-
- v, err := unmarshalValue(m)
- if err != nil {
- return nil, err
- }
- term.Value = v
-
- if loc, ok := m["location"].(map[string]interface{}); ok {
- term.Location = &Location{}
- if err := unmarshalLocation(term.Location, loc); err != nil {
- return nil, err
- }
- }
-
- return &term, nil
-}
-
-func unmarshalTermSlice(s []interface{}) ([]*Term, error) {
- buf := []*Term{}
- for _, x := range s {
- if m, ok := x.(map[string]interface{}); ok {
- t, err := unmarshalTerm(m)
- if err == nil {
- buf = append(buf, t)
- continue
- }
- return nil, err
- }
- return nil, fmt.Errorf("ast: unable to unmarshal term")
- }
- return buf, nil
-}
-
-func unmarshalTermSliceValue(d map[string]interface{}) ([]*Term, error) {
- if s, ok := d["value"].([]interface{}); ok {
- return unmarshalTermSlice(s)
- }
- return nil, fmt.Errorf(`ast: unable to unmarshal term (expected {"value": [...], "type": ...} where type is one of: ref, array, or set)`)
-}
-
-func unmarshalWith(i interface{}) (*With, error) {
- if m, ok := i.(map[string]interface{}); ok {
- tgt, _ := m["target"].(map[string]interface{})
- target, err := unmarshalTerm(tgt)
- if err == nil {
- val, _ := m["value"].(map[string]interface{})
- value, err := unmarshalTerm(val)
- if err == nil {
- return &With{
- Target: target,
- Value: value,
- }, nil
- }
- return nil, err
- }
- return nil, err
- }
- return nil, fmt.Errorf(`ast: unable to unmarshal with modifier (expected {"target": {...}, "value": {...}})`)
-}
-
-func unmarshalValue(d map[string]interface{}) (Value, error) {
- v := d["value"]
- switch d["type"] {
- case "null":
- return Null{}, nil
- case "boolean":
- if b, ok := v.(bool); ok {
- return Boolean(b), nil
- }
- case "number":
- if n, ok := v.(json.Number); ok {
- return Number(n), nil
- }
- case "string":
- if s, ok := v.(string); ok {
- return String(s), nil
- }
- case "var":
- if s, ok := v.(string); ok {
- return Var(s), nil
- }
- case "ref":
- if s, err := unmarshalTermSliceValue(d); err == nil {
- return Ref(s), nil
- }
- case "array":
- if s, err := unmarshalTermSliceValue(d); err == nil {
- return NewArray(s...), nil
- }
- case "set":
- if s, err := unmarshalTermSliceValue(d); err == nil {
- set := NewSet()
- for _, x := range s {
- set.Add(x)
- }
- return set, nil
- }
- case "object":
- if s, ok := v.([]interface{}); ok {
- buf := NewObject()
- for _, x := range s {
- if i, ok := x.([]interface{}); ok && len(i) == 2 {
- p, err := unmarshalTermSlice(i)
- if err == nil {
- buf.Insert(p[0], p[1])
- continue
- }
- }
- goto unmarshal_error
- }
- return buf, nil
- }
- case "arraycomprehension", "setcomprehension":
- if m, ok := v.(map[string]interface{}); ok {
- t, ok := m["term"].(map[string]interface{})
- if !ok {
- goto unmarshal_error
- }
-
- term, err := unmarshalTerm(t)
- if err != nil {
- goto unmarshal_error
- }
-
- b, ok := m["body"].([]interface{})
- if !ok {
- goto unmarshal_error
- }
-
- body, err := unmarshalBody(b)
- if err != nil {
- goto unmarshal_error
- }
-
- if d["type"] == "arraycomprehension" {
- return &ArrayComprehension{Term: term, Body: body}, nil
- }
- return &SetComprehension{Term: term, Body: body}, nil
- }
- case "objectcomprehension":
- if m, ok := v.(map[string]interface{}); ok {
- k, ok := m["key"].(map[string]interface{})
- if !ok {
- goto unmarshal_error
- }
-
- key, err := unmarshalTerm(k)
- if err != nil {
- goto unmarshal_error
- }
-
- v, ok := m["value"].(map[string]interface{})
- if !ok {
- goto unmarshal_error
- }
-
- value, err := unmarshalTerm(v)
- if err != nil {
- goto unmarshal_error
- }
-
- b, ok := m["body"].([]interface{})
- if !ok {
- goto unmarshal_error
- }
-
- body, err := unmarshalBody(b)
- if err != nil {
- goto unmarshal_error
- }
-
- return &ObjectComprehension{Key: key, Value: value, Body: body}, nil
- }
- case "call":
- if s, err := unmarshalTermSliceValue(d); err == nil {
- return Call(s), nil
- }
- }
-unmarshal_error:
- return nil, fmt.Errorf("ast: unable to unmarshal term")
+ return v1.CallTerm(terms...)
}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/transform.go b/vendor/github.com/open-policy-agent/opa/ast/transform.go
index 391a164860..8c03c48663 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/transform.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/transform.go
@@ -5,427 +5,42 @@
package ast
import (
- "fmt"
+ v1 "github.com/open-policy-agent/opa/v1/ast"
)
// Transformer defines the interface for transforming AST elements. If the
// transformer returns nil and does not indicate an error, the AST element will
// be set to nil and no transformations will be applied to children of the
// element.
-type Transformer interface {
- Transform(interface{}) (interface{}, error)
-}
+type Transformer = v1.Transformer
// Transform iterates the AST and calls the Transform function on the
// Transformer t for x before recursing.
-func Transform(t Transformer, x interface{}) (interface{}, error) {
-
- if term, ok := x.(*Term); ok {
- return Transform(t, term.Value)
- }
-
- y, err := t.Transform(x)
- if err != nil {
- return x, err
- }
-
- if y == nil {
- return nil, nil
- }
-
- var ok bool
- switch y := y.(type) {
- case *Module:
- p, err := Transform(t, y.Package)
- if err != nil {
- return nil, err
- }
- if y.Package, ok = p.(*Package); !ok {
- return nil, fmt.Errorf("illegal transform: %T != %T", y.Package, p)
- }
- for i := range y.Imports {
- imp, err := Transform(t, y.Imports[i])
- if err != nil {
- return nil, err
- }
- if y.Imports[i], ok = imp.(*Import); !ok {
- return nil, fmt.Errorf("illegal transform: %T != %T", y.Imports[i], imp)
- }
- }
- for i := range y.Rules {
- rule, err := Transform(t, y.Rules[i])
- if err != nil {
- return nil, err
- }
- if y.Rules[i], ok = rule.(*Rule); !ok {
- return nil, fmt.Errorf("illegal transform: %T != %T", y.Rules[i], rule)
- }
- }
- for i := range y.Annotations {
- a, err := Transform(t, y.Annotations[i])
- if err != nil {
- return nil, err
- }
- if y.Annotations[i], ok = a.(*Annotations); !ok {
- return nil, fmt.Errorf("illegal transform: %T != %T", y.Annotations[i], a)
- }
- }
- for i := range y.Comments {
- comment, err := Transform(t, y.Comments[i])
- if err != nil {
- return nil, err
- }
- if y.Comments[i], ok = comment.(*Comment); !ok {
- return nil, fmt.Errorf("illegal transform: %T != %T", y.Comments[i], comment)
- }
- }
- return y, nil
- case *Package:
- ref, err := Transform(t, y.Path)
- if err != nil {
- return nil, err
- }
- if y.Path, ok = ref.(Ref); !ok {
- return nil, fmt.Errorf("illegal transform: %T != %T", y.Path, ref)
- }
- return y, nil
- case *Import:
- y.Path, err = transformTerm(t, y.Path)
- if err != nil {
- return nil, err
- }
- if y.Alias, err = transformVar(t, y.Alias); err != nil {
- return nil, err
- }
- return y, nil
- case *Rule:
- if y.Head, err = transformHead(t, y.Head); err != nil {
- return nil, err
- }
- if y.Body, err = transformBody(t, y.Body); err != nil {
- return nil, err
- }
- if y.Else != nil {
- rule, err := Transform(t, y.Else)
- if err != nil {
- return nil, err
- }
- if y.Else, ok = rule.(*Rule); !ok {
- return nil, fmt.Errorf("illegal transform: %T != %T", y.Else, rule)
- }
- }
- return y, nil
- case *Head:
- if y.Reference, err = transformRef(t, y.Reference); err != nil {
- return nil, err
- }
- if y.Name, err = transformVar(t, y.Name); err != nil {
- return nil, err
- }
- if y.Args, err = transformArgs(t, y.Args); err != nil {
- return nil, err
- }
- if y.Key != nil {
- if y.Key, err = transformTerm(t, y.Key); err != nil {
- return nil, err
- }
- }
- if y.Value != nil {
- if y.Value, err = transformTerm(t, y.Value); err != nil {
- return nil, err
- }
- }
- return y, nil
- case Args:
- for i := range y {
- if y[i], err = transformTerm(t, y[i]); err != nil {
- return nil, err
- }
- }
- return y, nil
- case Body:
- for i, e := range y {
- e, err := Transform(t, e)
- if err != nil {
- return nil, err
- }
- if y[i], ok = e.(*Expr); !ok {
- return nil, fmt.Errorf("illegal transform: %T != %T", y[i], e)
- }
- }
- return y, nil
- case *Expr:
- switch ts := y.Terms.(type) {
- case *SomeDecl:
- decl, err := Transform(t, ts)
- if err != nil {
- return nil, err
- }
- if y.Terms, ok = decl.(*SomeDecl); !ok {
- return nil, fmt.Errorf("illegal transform: %T != %T", y, decl)
- }
- return y, nil
- case []*Term:
- for i := range ts {
- if ts[i], err = transformTerm(t, ts[i]); err != nil {
- return nil, err
- }
- }
- case *Term:
- if y.Terms, err = transformTerm(t, ts); err != nil {
- return nil, err
- }
- case *Every:
- if ts.Key != nil {
- ts.Key, err = transformTerm(t, ts.Key)
- if err != nil {
- return nil, err
- }
- }
- ts.Value, err = transformTerm(t, ts.Value)
- if err != nil {
- return nil, err
- }
- ts.Domain, err = transformTerm(t, ts.Domain)
- if err != nil {
- return nil, err
- }
- ts.Body, err = transformBody(t, ts.Body)
- if err != nil {
- return nil, err
- }
- y.Terms = ts
- }
- for i, w := range y.With {
- w, err := Transform(t, w)
- if err != nil {
- return nil, err
- }
- if y.With[i], ok = w.(*With); !ok {
- return nil, fmt.Errorf("illegal transform: %T != %T", y.With[i], w)
- }
- }
- return y, nil
- case *With:
- if y.Target, err = transformTerm(t, y.Target); err != nil {
- return nil, err
- }
- if y.Value, err = transformTerm(t, y.Value); err != nil {
- return nil, err
- }
- return y, nil
- case Ref:
- for i, term := range y {
- if y[i], err = transformTerm(t, term); err != nil {
- return nil, err
- }
- }
- return y, nil
- case *object:
- return y.Map(func(k, v *Term) (*Term, *Term, error) {
- k, err := transformTerm(t, k)
- if err != nil {
- return nil, nil, err
- }
- v, err = transformTerm(t, v)
- if err != nil {
- return nil, nil, err
- }
- return k, v, nil
- })
- case *Array:
- for i := 0; i < y.Len(); i++ {
- v, err := transformTerm(t, y.Elem(i))
- if err != nil {
- return nil, err
- }
- y.set(i, v)
- }
- return y, nil
- case Set:
- y, err = y.Map(func(term *Term) (*Term, error) {
- return transformTerm(t, term)
- })
- if err != nil {
- return nil, err
- }
- return y, nil
- case *ArrayComprehension:
- if y.Term, err = transformTerm(t, y.Term); err != nil {
- return nil, err
- }
- if y.Body, err = transformBody(t, y.Body); err != nil {
- return nil, err
- }
- return y, nil
- case *ObjectComprehension:
- if y.Key, err = transformTerm(t, y.Key); err != nil {
- return nil, err
- }
- if y.Value, err = transformTerm(t, y.Value); err != nil {
- return nil, err
- }
- if y.Body, err = transformBody(t, y.Body); err != nil {
- return nil, err
- }
- return y, nil
- case *SetComprehension:
- if y.Term, err = transformTerm(t, y.Term); err != nil {
- return nil, err
- }
- if y.Body, err = transformBody(t, y.Body); err != nil {
- return nil, err
- }
- return y, nil
- case Call:
- for i := range y {
- if y[i], err = transformTerm(t, y[i]); err != nil {
- return nil, err
- }
- }
- return y, nil
- default:
- return y, nil
- }
+func Transform(t Transformer, x any) (any, error) {
+ return v1.Transform(t, x)
}
// TransformRefs calls the function f on all references under x.
-func TransformRefs(x interface{}, f func(Ref) (Value, error)) (interface{}, error) {
- t := &GenericTransformer{func(x interface{}) (interface{}, error) {
- if r, ok := x.(Ref); ok {
- return f(r)
- }
- return x, nil
- }}
- return Transform(t, x)
+func TransformRefs(x any, f func(Ref) (Value, error)) (any, error) {
+ return v1.TransformRefs(x, f)
}
// TransformVars calls the function f on all vars under x.
-func TransformVars(x interface{}, f func(Var) (Value, error)) (interface{}, error) {
- t := &GenericTransformer{func(x interface{}) (interface{}, error) {
- if v, ok := x.(Var); ok {
- return f(v)
- }
- return x, nil
- }}
- return Transform(t, x)
+func TransformVars(x any, f func(Var) (Value, error)) (any, error) {
+ return v1.TransformVars(x, f)
}
// TransformComprehensions calls the functio nf on all comprehensions under x.
-func TransformComprehensions(x interface{}, f func(interface{}) (Value, error)) (interface{}, error) {
- t := &GenericTransformer{func(x interface{}) (interface{}, error) {
- switch x := x.(type) {
- case *ArrayComprehension:
- return f(x)
- case *SetComprehension:
- return f(x)
- case *ObjectComprehension:
- return f(x)
- }
- return x, nil
- }}
- return Transform(t, x)
+func TransformComprehensions(x any, f func(any) (Value, error)) (any, error) {
+ return v1.TransformComprehensions(x, f)
}
// GenericTransformer implements the Transformer interface to provide a utility
// to transform AST nodes using a closure.
-type GenericTransformer struct {
- f func(interface{}) (interface{}, error)
-}
+type GenericTransformer = v1.GenericTransformer
// NewGenericTransformer returns a new GenericTransformer that will transform
// AST nodes using the function f.
-func NewGenericTransformer(f func(x interface{}) (interface{}, error)) *GenericTransformer {
- return &GenericTransformer{
- f: f,
- }
-}
-
-// Transform calls the function f on the GenericTransformer.
-func (t *GenericTransformer) Transform(x interface{}) (interface{}, error) {
- return t.f(x)
-}
-
-func transformHead(t Transformer, head *Head) (*Head, error) {
- y, err := Transform(t, head)
- if err != nil {
- return nil, err
- }
- h, ok := y.(*Head)
- if !ok {
- return nil, fmt.Errorf("illegal transform: %T != %T", head, y)
- }
- return h, nil
-}
-
-func transformArgs(t Transformer, args Args) (Args, error) {
- y, err := Transform(t, args)
- if err != nil {
- return nil, err
- }
- a, ok := y.(Args)
- if !ok {
- return nil, fmt.Errorf("illegal transform: %T != %T", args, y)
- }
- return a, nil
-}
-
-func transformBody(t Transformer, body Body) (Body, error) {
- y, err := Transform(t, body)
- if err != nil {
- return nil, err
- }
- r, ok := y.(Body)
- if !ok {
- return nil, fmt.Errorf("illegal transform: %T != %T", body, y)
- }
- return r, nil
-}
-
-func transformTerm(t Transformer, term *Term) (*Term, error) {
- v, err := transformValue(t, term.Value)
- if err != nil {
- return nil, err
- }
- r := &Term{
- Value: v,
- Location: term.Location,
- }
- return r, nil
-}
-
-func transformValue(t Transformer, v Value) (Value, error) {
- v1, err := Transform(t, v)
- if err != nil {
- return nil, err
- }
- r, ok := v1.(Value)
- if !ok {
- return nil, fmt.Errorf("illegal transform: %T != %T", v, v1)
- }
- return r, nil
-}
-
-func transformVar(t Transformer, v Var) (Var, error) {
- v1, err := Transform(t, v)
- if err != nil {
- return "", err
- }
- r, ok := v1.(Var)
- if !ok {
- return "", fmt.Errorf("illegal transform: %T != %T", v, v1)
- }
- return r, nil
-}
-
-func transformRef(t Transformer, r Ref) (Ref, error) {
- r1, err := Transform(t, r)
- if err != nil {
- return nil, err
- }
- r2, ok := r1.(Ref)
- if !ok {
- return nil, fmt.Errorf("illegal transform: %T != %T", r, r2)
- }
- return r2, nil
+func NewGenericTransformer(f func(x any) (any, error)) *GenericTransformer {
+ return v1.NewGenericTransformer(f)
}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/unify.go b/vendor/github.com/open-policy-agent/opa/ast/unify.go
index 60244974a9..3cb260272a 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/unify.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/unify.go
@@ -4,232 +4,11 @@
package ast
-func isRefSafe(ref Ref, safe VarSet) bool {
- switch head := ref[0].Value.(type) {
- case Var:
- return safe.Contains(head)
- case Call:
- return isCallSafe(head, safe)
- default:
- for v := range ref[0].Vars() {
- if !safe.Contains(v) {
- return false
- }
- }
- return true
- }
-}
-
-func isCallSafe(call Call, safe VarSet) bool {
- vis := NewVarVisitor().WithParams(SafetyCheckVisitorParams)
- vis.Walk(call)
- unsafe := vis.Vars().Diff(safe)
- return len(unsafe) == 0
-}
+import v1 "github.com/open-policy-agent/opa/v1/ast"
// Unify returns a set of variables that will be unified when the equality expression defined by
// terms a and b is evaluated. The unifier assumes that variables in the VarSet safe are already
// unified.
func Unify(safe VarSet, a *Term, b *Term) VarSet {
- u := &unifier{
- safe: safe,
- unified: VarSet{},
- unknown: map[Var]VarSet{},
- }
- u.unify(a, b)
- return u.unified
-}
-
-type unifier struct {
- safe VarSet
- unified VarSet
- unknown map[Var]VarSet
-}
-
-func (u *unifier) isSafe(x Var) bool {
- return u.safe.Contains(x) || u.unified.Contains(x)
-}
-
-func (u *unifier) unify(a *Term, b *Term) {
-
- switch a := a.Value.(type) {
-
- case Var:
- switch b := b.Value.(type) {
- case Var:
- if u.isSafe(b) {
- u.markSafe(a)
- } else if u.isSafe(a) {
- u.markSafe(b)
- } else {
- u.markUnknown(a, b)
- u.markUnknown(b, a)
- }
- case *Array, Object:
- u.unifyAll(a, b)
- case Ref:
- if isRefSafe(b, u.safe) {
- u.markSafe(a)
- }
- case Call:
- if isCallSafe(b, u.safe) {
- u.markSafe(a)
- }
- default:
- u.markSafe(a)
- }
-
- case Ref:
- if isRefSafe(a, u.safe) {
- switch b := b.Value.(type) {
- case Var:
- u.markSafe(b)
- case *Array, Object:
- u.markAllSafe(b)
- }
- }
-
- case Call:
- if isCallSafe(a, u.safe) {
- switch b := b.Value.(type) {
- case Var:
- u.markSafe(b)
- case *Array, Object:
- u.markAllSafe(b)
- }
- }
-
- case *ArrayComprehension:
- switch b := b.Value.(type) {
- case Var:
- u.markSafe(b)
- case *Array:
- u.markAllSafe(b)
- }
- case *ObjectComprehension:
- switch b := b.Value.(type) {
- case Var:
- u.markSafe(b)
- case *object:
- u.markAllSafe(b)
- }
- case *SetComprehension:
- switch b := b.Value.(type) {
- case Var:
- u.markSafe(b)
- }
-
- case *Array:
- switch b := b.Value.(type) {
- case Var:
- u.unifyAll(b, a)
- case *ArrayComprehension, *ObjectComprehension, *SetComprehension:
- u.markAllSafe(a)
- case Ref:
- if isRefSafe(b, u.safe) {
- u.markAllSafe(a)
- }
- case Call:
- if isCallSafe(b, u.safe) {
- u.markAllSafe(a)
- }
- case *Array:
- if a.Len() == b.Len() {
- for i := 0; i < a.Len(); i++ {
- u.unify(a.Elem(i), b.Elem(i))
- }
- }
- }
-
- case *object:
- switch b := b.Value.(type) {
- case Var:
- u.unifyAll(b, a)
- case Ref:
- if isRefSafe(b, u.safe) {
- u.markAllSafe(a)
- }
- case Call:
- if isCallSafe(b, u.safe) {
- u.markAllSafe(a)
- }
- case *object:
- if a.Len() == b.Len() {
- _ = a.Iter(func(k, v *Term) error {
- if v2 := b.Get(k); v2 != nil {
- u.unify(v, v2)
- }
- return nil
- }) // impossible to return error
- }
- }
-
- default:
- switch b := b.Value.(type) {
- case Var:
- u.markSafe(b)
- }
- }
-}
-
-func (u *unifier) markAllSafe(x Value) {
- vis := u.varVisitor()
- vis.Walk(x)
- for v := range vis.Vars() {
- u.markSafe(v)
- }
-}
-
-func (u *unifier) markSafe(x Var) {
- u.unified.Add(x)
-
- // Add dependencies of 'x' to safe set
- vs := u.unknown[x]
- delete(u.unknown, x)
- for v := range vs {
- u.markSafe(v)
- }
-
- // Add dependants of 'x' to safe set if they have no more
- // dependencies.
- for v, deps := range u.unknown {
- if deps.Contains(x) {
- delete(deps, x)
- if len(deps) == 0 {
- u.markSafe(v)
- }
- }
- }
-}
-
-func (u *unifier) markUnknown(a, b Var) {
- if _, ok := u.unknown[a]; !ok {
- u.unknown[a] = NewVarSet()
- }
- u.unknown[a].Add(b)
-}
-
-func (u *unifier) unifyAll(a Var, b Value) {
- if u.isSafe(a) {
- u.markAllSafe(b)
- } else {
- vis := u.varVisitor()
- vis.Walk(b)
- unsafe := vis.Vars().Diff(u.safe).Diff(u.unified)
- if len(unsafe) == 0 {
- u.markSafe(a)
- } else {
- for v := range unsafe {
- u.markUnknown(a, v)
- }
- }
- }
-}
-
-func (u *unifier) varVisitor() *VarVisitor {
- return NewVarVisitor().WithParams(VarVisitorParams{
- SkipRefHead: true,
- SkipObjectKeys: true,
- SkipClosures: true,
- })
+ return v1.Unify(safe, a, b)
}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/varset.go b/vendor/github.com/open-policy-agent/opa/ast/varset.go
index 14f531494b..9e7db8efda 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/varset.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/varset.go
@@ -5,96 +5,13 @@
package ast
import (
- "fmt"
- "sort"
+ v1 "github.com/open-policy-agent/opa/v1/ast"
)
// VarSet represents a set of variables.
-type VarSet map[Var]struct{}
+type VarSet = v1.VarSet
// NewVarSet returns a new VarSet containing the specified variables.
func NewVarSet(vs ...Var) VarSet {
- s := VarSet{}
- for _, v := range vs {
- s.Add(v)
- }
- return s
-}
-
-// Add updates the set to include the variable "v".
-func (s VarSet) Add(v Var) {
- s[v] = struct{}{}
-}
-
-// Contains returns true if the set contains the variable "v".
-func (s VarSet) Contains(v Var) bool {
- _, ok := s[v]
- return ok
-}
-
-// Copy returns a shallow copy of the VarSet.
-func (s VarSet) Copy() VarSet {
- cpy := VarSet{}
- for v := range s {
- cpy.Add(v)
- }
- return cpy
-}
-
-// Diff returns a VarSet containing variables in s that are not in vs.
-func (s VarSet) Diff(vs VarSet) VarSet {
- r := VarSet{}
- for v := range s {
- if !vs.Contains(v) {
- r.Add(v)
- }
- }
- return r
-}
-
-// Equal returns true if s contains exactly the same elements as vs.
-func (s VarSet) Equal(vs VarSet) bool {
- if len(s.Diff(vs)) > 0 {
- return false
- }
- return len(vs.Diff(s)) == 0
-}
-
-// Intersect returns a VarSet containing variables in s that are in vs.
-func (s VarSet) Intersect(vs VarSet) VarSet {
- r := VarSet{}
- for v := range s {
- if vs.Contains(v) {
- r.Add(v)
- }
- }
- return r
-}
-
-// Sorted returns a sorted slice of vars from s.
-func (s VarSet) Sorted() []Var {
- sorted := make([]Var, 0, len(s))
- for v := range s {
- sorted = append(sorted, v)
- }
- sort.Slice(sorted, func(i, j int) bool {
- return sorted[i].Compare(sorted[j]) < 0
- })
- return sorted
-}
-
-// Update merges the other VarSet into this VarSet.
-func (s VarSet) Update(vs VarSet) {
- for v := range vs {
- s.Add(v)
- }
-}
-
-func (s VarSet) String() string {
- tmp := make([]string, 0, len(s))
- for v := range s {
- tmp = append(tmp, string(v))
- }
- sort.Strings(tmp)
- return fmt.Sprintf("%v", tmp)
+ return v1.NewVarSet(vs...)
}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/visit.go b/vendor/github.com/open-policy-agent/opa/ast/visit.go
index d83c31149e..f4f2459ecc 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/visit.go
+++ b/vendor/github.com/open-policy-agent/opa/ast/visit.go
@@ -4,780 +4,120 @@
package ast
+import v1 "github.com/open-policy-agent/opa/v1/ast"
+
// Visitor defines the interface for iterating AST elements. The Visit function
// can return a Visitor w which will be used to visit the children of the AST
// element v. If the Visit function returns nil, the children will not be
// visited.
// Deprecated: use GenericVisitor or another visitor implementation
-type Visitor interface {
- Visit(v interface{}) (w Visitor)
-}
+type Visitor = v1.Visitor
// BeforeAndAfterVisitor wraps Visitor to provide hooks for being called before
// and after the AST has been visited.
// Deprecated: use GenericVisitor or another visitor implementation
-type BeforeAndAfterVisitor interface {
- Visitor
- Before(x interface{})
- After(x interface{})
-}
+type BeforeAndAfterVisitor = v1.BeforeAndAfterVisitor
// Walk iterates the AST by calling the Visit function on the Visitor
// v for x before recursing.
// Deprecated: use GenericVisitor.Walk
-func Walk(v Visitor, x interface{}) {
- if bav, ok := v.(BeforeAndAfterVisitor); !ok {
- walk(v, x)
- } else {
- bav.Before(x)
- defer bav.After(x)
- walk(bav, x)
- }
+func Walk(v Visitor, x any) {
+ v1.Walk(v, x)
}
// WalkBeforeAndAfter iterates the AST by calling the Visit function on the
// Visitor v for x before recursing.
// Deprecated: use GenericVisitor.Walk
-func WalkBeforeAndAfter(v BeforeAndAfterVisitor, x interface{}) {
- Walk(v, x)
-}
-
-func walk(v Visitor, x interface{}) {
- w := v.Visit(x)
- if w == nil {
- return
- }
- switch x := x.(type) {
- case *Module:
- Walk(w, x.Package)
- for i := range x.Imports {
- Walk(w, x.Imports[i])
- }
- for i := range x.Rules {
- Walk(w, x.Rules[i])
- }
- for i := range x.Annotations {
- Walk(w, x.Annotations[i])
- }
- for i := range x.Comments {
- Walk(w, x.Comments[i])
- }
- case *Package:
- Walk(w, x.Path)
- case *Import:
- Walk(w, x.Path)
- Walk(w, x.Alias)
- case *Rule:
- Walk(w, x.Head)
- Walk(w, x.Body)
- if x.Else != nil {
- Walk(w, x.Else)
- }
- case *Head:
- Walk(w, x.Name)
- Walk(w, x.Args)
- if x.Key != nil {
- Walk(w, x.Key)
- }
- if x.Value != nil {
- Walk(w, x.Value)
- }
- case Body:
- for i := range x {
- Walk(w, x[i])
- }
- case Args:
- for i := range x {
- Walk(w, x[i])
- }
- case *Expr:
- switch ts := x.Terms.(type) {
- case *Term, *SomeDecl, *Every:
- Walk(w, ts)
- case []*Term:
- for i := range ts {
- Walk(w, ts[i])
- }
- }
- for i := range x.With {
- Walk(w, x.With[i])
- }
- case *With:
- Walk(w, x.Target)
- Walk(w, x.Value)
- case *Term:
- Walk(w, x.Value)
- case Ref:
- for i := range x {
- Walk(w, x[i])
- }
- case *object:
- x.Foreach(func(k, vv *Term) {
- Walk(w, k)
- Walk(w, vv)
- })
- case *Array:
- x.Foreach(func(t *Term) {
- Walk(w, t)
- })
- case Set:
- x.Foreach(func(t *Term) {
- Walk(w, t)
- })
- case *ArrayComprehension:
- Walk(w, x.Term)
- Walk(w, x.Body)
- case *ObjectComprehension:
- Walk(w, x.Key)
- Walk(w, x.Value)
- Walk(w, x.Body)
- case *SetComprehension:
- Walk(w, x.Term)
- Walk(w, x.Body)
- case Call:
- for i := range x {
- Walk(w, x[i])
- }
- case *Every:
- if x.Key != nil {
- Walk(w, x.Key)
- }
- Walk(w, x.Value)
- Walk(w, x.Domain)
- Walk(w, x.Body)
- case *SomeDecl:
- for i := range x.Symbols {
- Walk(w, x.Symbols[i])
- }
- }
+func WalkBeforeAndAfter(v BeforeAndAfterVisitor, x any) {
+ v1.WalkBeforeAndAfter(v, x)
}
// WalkVars calls the function f on all vars under x. If the function f
// returns true, AST nodes under the last node will not be visited.
-func WalkVars(x interface{}, f func(Var) bool) {
- vis := &GenericVisitor{func(x interface{}) bool {
- if v, ok := x.(Var); ok {
- return f(v)
- }
- return false
- }}
- vis.Walk(x)
+func WalkVars(x any, f func(Var) bool) {
+ v1.WalkVars(x, f)
}
// WalkClosures calls the function f on all closures under x. If the function f
// returns true, AST nodes under the last node will not be visited.
-func WalkClosures(x interface{}, f func(interface{}) bool) {
- vis := &GenericVisitor{func(x interface{}) bool {
- switch x := x.(type) {
- case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *Every:
- return f(x)
- }
- return false
- }}
- vis.Walk(x)
+func WalkClosures(x any, f func(any) bool) {
+ v1.WalkClosures(x, f)
}
// WalkRefs calls the function f on all references under x. If the function f
// returns true, AST nodes under the last node will not be visited.
-func WalkRefs(x interface{}, f func(Ref) bool) {
- vis := &GenericVisitor{func(x interface{}) bool {
- if r, ok := x.(Ref); ok {
- return f(r)
- }
- return false
- }}
- vis.Walk(x)
+func WalkRefs(x any, f func(Ref) bool) {
+ v1.WalkRefs(x, f)
}
// WalkTerms calls the function f on all terms under x. If the function f
// returns true, AST nodes under the last node will not be visited.
-func WalkTerms(x interface{}, f func(*Term) bool) {
- vis := &GenericVisitor{func(x interface{}) bool {
- if term, ok := x.(*Term); ok {
- return f(term)
- }
- return false
- }}
- vis.Walk(x)
+func WalkTerms(x any, f func(*Term) bool) {
+ v1.WalkTerms(x, f)
}
// WalkWiths calls the function f on all with modifiers under x. If the function f
// returns true, AST nodes under the last node will not be visited.
-func WalkWiths(x interface{}, f func(*With) bool) {
- vis := &GenericVisitor{func(x interface{}) bool {
- if w, ok := x.(*With); ok {
- return f(w)
- }
- return false
- }}
- vis.Walk(x)
+func WalkWiths(x any, f func(*With) bool) {
+ v1.WalkWiths(x, f)
}
// WalkExprs calls the function f on all expressions under x. If the function f
// returns true, AST nodes under the last node will not be visited.
-func WalkExprs(x interface{}, f func(*Expr) bool) {
- vis := &GenericVisitor{func(x interface{}) bool {
- if r, ok := x.(*Expr); ok {
- return f(r)
- }
- return false
- }}
- vis.Walk(x)
+func WalkExprs(x any, f func(*Expr) bool) {
+ v1.WalkExprs(x, f)
}
// WalkBodies calls the function f on all bodies under x. If the function f
// returns true, AST nodes under the last node will not be visited.
-func WalkBodies(x interface{}, f func(Body) bool) {
- vis := &GenericVisitor{func(x interface{}) bool {
- if b, ok := x.(Body); ok {
- return f(b)
- }
- return false
- }}
- vis.Walk(x)
+func WalkBodies(x any, f func(Body) bool) {
+ v1.WalkBodies(x, f)
}
// WalkRules calls the function f on all rules under x. If the function f
// returns true, AST nodes under the last node will not be visited.
-func WalkRules(x interface{}, f func(*Rule) bool) {
- vis := &GenericVisitor{func(x interface{}) bool {
- if r, ok := x.(*Rule); ok {
- stop := f(r)
- // NOTE(tsandall): since rules cannot be embedded inside of queries
- // we can stop early if there is no else block.
- if stop || r.Else == nil {
- return true
- }
- }
- return false
- }}
- vis.Walk(x)
+func WalkRules(x any, f func(*Rule) bool) {
+ v1.WalkRules(x, f)
}
// WalkNodes calls the function f on all nodes under x. If the function f
// returns true, AST nodes under the last node will not be visited.
-func WalkNodes(x interface{}, f func(Node) bool) {
- vis := &GenericVisitor{func(x interface{}) bool {
- if n, ok := x.(Node); ok {
- return f(n)
- }
- return false
- }}
- vis.Walk(x)
+func WalkNodes(x any, f func(Node) bool) {
+ v1.WalkNodes(x, f)
}
// GenericVisitor provides a utility to walk over AST nodes using a
// closure. If the closure returns true, the visitor will not walk
// over AST nodes under x.
-type GenericVisitor struct {
- f func(x interface{}) bool
-}
+type GenericVisitor = v1.GenericVisitor
// NewGenericVisitor returns a new GenericVisitor that will invoke the function
// f on AST nodes.
-func NewGenericVisitor(f func(x interface{}) bool) *GenericVisitor {
- return &GenericVisitor{f}
-}
-
-// Walk iterates the AST by calling the function f on the
-// GenericVisitor before recursing. Contrary to the generic Walk, this
-// does not require allocating the visitor from heap.
-func (vis *GenericVisitor) Walk(x interface{}) {
- if vis.f(x) {
- return
- }
-
- switch x := x.(type) {
- case *Module:
- vis.Walk(x.Package)
- for i := range x.Imports {
- vis.Walk(x.Imports[i])
- }
- for i := range x.Rules {
- vis.Walk(x.Rules[i])
- }
- for i := range x.Annotations {
- vis.Walk(x.Annotations[i])
- }
- for i := range x.Comments {
- vis.Walk(x.Comments[i])
- }
- case *Package:
- vis.Walk(x.Path)
- case *Import:
- vis.Walk(x.Path)
- vis.Walk(x.Alias)
- case *Rule:
- vis.Walk(x.Head)
- vis.Walk(x.Body)
- if x.Else != nil {
- vis.Walk(x.Else)
- }
- case *Head:
- vis.Walk(x.Name)
- vis.Walk(x.Args)
- if x.Key != nil {
- vis.Walk(x.Key)
- }
- if x.Value != nil {
- vis.Walk(x.Value)
- }
- case Body:
- for i := range x {
- vis.Walk(x[i])
- }
- case Args:
- for i := range x {
- vis.Walk(x[i])
- }
- case *Expr:
- switch ts := x.Terms.(type) {
- case *Term, *SomeDecl, *Every:
- vis.Walk(ts)
- case []*Term:
- for i := range ts {
- vis.Walk(ts[i])
- }
- }
- for i := range x.With {
- vis.Walk(x.With[i])
- }
- case *With:
- vis.Walk(x.Target)
- vis.Walk(x.Value)
- case *Term:
- vis.Walk(x.Value)
- case Ref:
- for i := range x {
- vis.Walk(x[i])
- }
- case *object:
- x.Foreach(func(k, _ *Term) {
- vis.Walk(k)
- vis.Walk(x.Get(k))
- })
- case Object:
- x.Foreach(func(k, _ *Term) {
- vis.Walk(k)
- vis.Walk(x.Get(k))
- })
- case *Array:
- x.Foreach(func(t *Term) {
- vis.Walk(t)
- })
- case Set:
- xSlice := x.Slice()
- for i := range xSlice {
- vis.Walk(xSlice[i])
- }
- case *ArrayComprehension:
- vis.Walk(x.Term)
- vis.Walk(x.Body)
- case *ObjectComprehension:
- vis.Walk(x.Key)
- vis.Walk(x.Value)
- vis.Walk(x.Body)
- case *SetComprehension:
- vis.Walk(x.Term)
- vis.Walk(x.Body)
- case Call:
- for i := range x {
- vis.Walk(x[i])
- }
- case *Every:
- if x.Key != nil {
- vis.Walk(x.Key)
- }
- vis.Walk(x.Value)
- vis.Walk(x.Domain)
- vis.Walk(x.Body)
- case *SomeDecl:
- for i := range x.Symbols {
- vis.Walk(x.Symbols[i])
- }
- }
+func NewGenericVisitor(f func(x any) bool) *GenericVisitor {
+ return v1.NewGenericVisitor(f)
}
// BeforeAfterVisitor provides a utility to walk over AST nodes using
// closures. If the before closure returns true, the visitor will not
// walk over AST nodes under x. The after closure is invoked always
// after visiting a node.
-type BeforeAfterVisitor struct {
- before func(x interface{}) bool
- after func(x interface{})
-}
+type BeforeAfterVisitor = v1.BeforeAfterVisitor
// NewBeforeAfterVisitor returns a new BeforeAndAfterVisitor that
// will invoke the functions before and after AST nodes.
-func NewBeforeAfterVisitor(before func(x interface{}) bool, after func(x interface{})) *BeforeAfterVisitor {
- return &BeforeAfterVisitor{before, after}
-}
-
-// Walk iterates the AST by calling the functions on the
-// BeforeAndAfterVisitor before and after recursing. Contrary to the
-// generic Walk, this does not require allocating the visitor from
-// heap.
-func (vis *BeforeAfterVisitor) Walk(x interface{}) {
- defer vis.after(x)
- if vis.before(x) {
- return
- }
-
- switch x := x.(type) {
- case *Module:
- vis.Walk(x.Package)
- for i := range x.Imports {
- vis.Walk(x.Imports[i])
- }
- for i := range x.Rules {
- vis.Walk(x.Rules[i])
- }
- for i := range x.Annotations {
- vis.Walk(x.Annotations[i])
- }
- for i := range x.Comments {
- vis.Walk(x.Comments[i])
- }
- case *Package:
- vis.Walk(x.Path)
- case *Import:
- vis.Walk(x.Path)
- vis.Walk(x.Alias)
- case *Rule:
- vis.Walk(x.Head)
- vis.Walk(x.Body)
- if x.Else != nil {
- vis.Walk(x.Else)
- }
- case *Head:
- if len(x.Reference) > 0 {
- vis.Walk(x.Reference)
- } else {
- vis.Walk(x.Name)
- if x.Key != nil {
- vis.Walk(x.Key)
- }
- }
- vis.Walk(x.Args)
- if x.Value != nil {
- vis.Walk(x.Value)
- }
- case Body:
- for i := range x {
- vis.Walk(x[i])
- }
- case Args:
- for i := range x {
- vis.Walk(x[i])
- }
- case *Expr:
- switch ts := x.Terms.(type) {
- case *Term, *SomeDecl, *Every:
- vis.Walk(ts)
- case []*Term:
- for i := range ts {
- vis.Walk(ts[i])
- }
- }
- for i := range x.With {
- vis.Walk(x.With[i])
- }
- case *With:
- vis.Walk(x.Target)
- vis.Walk(x.Value)
- case *Term:
- vis.Walk(x.Value)
- case Ref:
- for i := range x {
- vis.Walk(x[i])
- }
- case *object:
- x.Foreach(func(k, _ *Term) {
- vis.Walk(k)
- vis.Walk(x.Get(k))
- })
- case Object:
- x.Foreach(func(k, _ *Term) {
- vis.Walk(k)
- vis.Walk(x.Get(k))
- })
- case *Array:
- x.Foreach(func(t *Term) {
- vis.Walk(t)
- })
- case Set:
- xSlice := x.Slice()
- for i := range xSlice {
- vis.Walk(xSlice[i])
- }
- case *ArrayComprehension:
- vis.Walk(x.Term)
- vis.Walk(x.Body)
- case *ObjectComprehension:
- vis.Walk(x.Key)
- vis.Walk(x.Value)
- vis.Walk(x.Body)
- case *SetComprehension:
- vis.Walk(x.Term)
- vis.Walk(x.Body)
- case Call:
- for i := range x {
- vis.Walk(x[i])
- }
- case *Every:
- if x.Key != nil {
- vis.Walk(x.Key)
- }
- vis.Walk(x.Value)
- vis.Walk(x.Domain)
- vis.Walk(x.Body)
- case *SomeDecl:
- for i := range x.Symbols {
- vis.Walk(x.Symbols[i])
- }
- }
+func NewBeforeAfterVisitor(before func(x any) bool, after func(x any)) *BeforeAfterVisitor {
+ return v1.NewBeforeAfterVisitor(before, after)
}
// VarVisitor walks AST nodes under a given node and collects all encountered
// variables. The collected variables can be controlled by specifying
// VarVisitorParams when creating the visitor.
-type VarVisitor struct {
- params VarVisitorParams
- vars VarSet
-}
+type VarVisitor = v1.VarVisitor
// VarVisitorParams contains settings for a VarVisitor.
-type VarVisitorParams struct {
- SkipRefHead bool
- SkipRefCallHead bool
- SkipObjectKeys bool
- SkipClosures bool
- SkipWithTarget bool
- SkipSets bool
-}
+type VarVisitorParams = v1.VarVisitorParams
// NewVarVisitor returns a new VarVisitor object.
func NewVarVisitor() *VarVisitor {
- return &VarVisitor{
- vars: NewVarSet(),
- }
-}
-
-// WithParams sets the parameters in params on vis.
-func (vis *VarVisitor) WithParams(params VarVisitorParams) *VarVisitor {
- vis.params = params
- return vis
-}
-
-// Vars returns a VarSet that contains collected vars.
-func (vis *VarVisitor) Vars() VarSet {
- return vis.vars
-}
-
-// visit determines if the VarVisitor will recurse into x: if it returns `true`,
-// the visitor will _skip_ that branch of the AST
-func (vis *VarVisitor) visit(v interface{}) bool {
- if vis.params.SkipObjectKeys {
- if o, ok := v.(Object); ok {
- o.Foreach(func(_, v *Term) {
- vis.Walk(v)
- })
- return true
- }
- }
- if vis.params.SkipRefHead {
- if r, ok := v.(Ref); ok {
- rSlice := r[1:]
- for i := range rSlice {
- vis.Walk(rSlice[i])
- }
- return true
- }
- }
- if vis.params.SkipClosures {
- switch v := v.(type) {
- case *ArrayComprehension, *ObjectComprehension, *SetComprehension:
- return true
- case *Expr:
- if ev, ok := v.Terms.(*Every); ok {
- vis.Walk(ev.Domain)
- // We're _not_ walking ev.Body -- that's the closure here
- return true
- }
- }
- }
- if vis.params.SkipWithTarget {
- if v, ok := v.(*With); ok {
- vis.Walk(v.Value)
- return true
- }
- }
- if vis.params.SkipSets {
- if _, ok := v.(Set); ok {
- return true
- }
- }
- if vis.params.SkipRefCallHead {
- switch v := v.(type) {
- case *Expr:
- if terms, ok := v.Terms.([]*Term); ok {
- termSlice := terms[0].Value.(Ref)[1:]
- for i := range termSlice {
- vis.Walk(termSlice[i])
- }
- for i := 1; i < len(terms); i++ {
- vis.Walk(terms[i])
- }
- for i := range v.With {
- vis.Walk(v.With[i])
- }
- return true
- }
- case Call:
- operator := v[0].Value.(Ref)
- for i := 1; i < len(operator); i++ {
- vis.Walk(operator[i])
- }
- for i := 1; i < len(v); i++ {
- vis.Walk(v[i])
- }
- return true
- case *With:
- if ref, ok := v.Target.Value.(Ref); ok {
- refSlice := ref[1:]
- for i := range refSlice {
- vis.Walk(refSlice[i])
- }
- }
- if ref, ok := v.Value.Value.(Ref); ok {
- refSlice := ref[1:]
- for i := range refSlice {
- vis.Walk(refSlice[i])
- }
- } else {
- vis.Walk(v.Value)
- }
- return true
- }
- }
- if v, ok := v.(Var); ok {
- vis.vars.Add(v)
- }
- return false
-}
-
-// Walk iterates the AST by calling the function f on the
-// GenericVisitor before recursing. Contrary to the generic Walk, this
-// does not require allocating the visitor from heap.
-func (vis *VarVisitor) Walk(x interface{}) {
- if vis.visit(x) {
- return
- }
-
- switch x := x.(type) {
- case *Module:
- vis.Walk(x.Package)
- for i := range x.Imports {
- vis.Walk(x.Imports[i])
- }
- for i := range x.Rules {
- vis.Walk(x.Rules[i])
- }
- for i := range x.Comments {
- vis.Walk(x.Comments[i])
- }
- case *Package:
- vis.Walk(x.Path)
- case *Import:
- vis.Walk(x.Path)
- vis.Walk(x.Alias)
- case *Rule:
- vis.Walk(x.Head)
- vis.Walk(x.Body)
- if x.Else != nil {
- vis.Walk(x.Else)
- }
- case *Head:
- if len(x.Reference) > 0 {
- vis.Walk(x.Reference)
- } else {
- vis.Walk(x.Name)
- if x.Key != nil {
- vis.Walk(x.Key)
- }
- }
- vis.Walk(x.Args)
-
- if x.Value != nil {
- vis.Walk(x.Value)
- }
- case Body:
- for i := range x {
- vis.Walk(x[i])
- }
- case Args:
- for i := range x {
- vis.Walk(x[i])
- }
- case *Expr:
- switch ts := x.Terms.(type) {
- case *Term, *SomeDecl, *Every:
- vis.Walk(ts)
- case []*Term:
- for i := range ts {
- vis.Walk(ts[i])
- }
- }
- for i := range x.With {
- vis.Walk(x.With[i])
- }
- case *With:
- vis.Walk(x.Target)
- vis.Walk(x.Value)
- case *Term:
- vis.Walk(x.Value)
- case Ref:
- for i := range x {
- vis.Walk(x[i])
- }
- case *object:
- x.Foreach(func(k, _ *Term) {
- vis.Walk(k)
- vis.Walk(x.Get(k))
- })
- case *Array:
- x.Foreach(func(t *Term) {
- vis.Walk(t)
- })
- case Set:
- xSlice := x.Slice()
- for i := range xSlice {
- vis.Walk(xSlice[i])
- }
- case *ArrayComprehension:
- vis.Walk(x.Term)
- vis.Walk(x.Body)
- case *ObjectComprehension:
- vis.Walk(x.Key)
- vis.Walk(x.Value)
- vis.Walk(x.Body)
- case *SetComprehension:
- vis.Walk(x.Term)
- vis.Walk(x.Body)
- case Call:
- for i := range x {
- vis.Walk(x[i])
- }
- case *Every:
- if x.Key != nil {
- vis.Walk(x.Key)
- }
- vis.Walk(x.Value)
- vis.Walk(x.Domain)
- vis.Walk(x.Body)
- case *SomeDecl:
- for i := range x.Symbols {
- vis.Walk(x.Symbols[i])
- }
- }
+ return v1.NewVarVisitor()
}
diff --git a/vendor/github.com/open-policy-agent/opa/bundle/bundle.go b/vendor/github.com/open-policy-agent/opa/bundle/bundle.go
index 0e159384ef..50ad97349a 100644
--- a/vendor/github.com/open-policy-agent/opa/bundle/bundle.go
+++ b/vendor/github.com/open-policy-agent/opa/bundle/bundle.go
@@ -6,1386 +6,97 @@
package bundle
import (
- "archive/tar"
- "bytes"
- "compress/gzip"
- "encoding/hex"
- "encoding/json"
- "errors"
- "fmt"
"io"
- "net/url"
- "os"
- "path"
- "path/filepath"
- "reflect"
- "strings"
- "github.com/gobwas/glob"
"github.com/open-policy-agent/opa/ast"
- astJSON "github.com/open-policy-agent/opa/ast/json"
- "github.com/open-policy-agent/opa/format"
- "github.com/open-policy-agent/opa/internal/file/archive"
- "github.com/open-policy-agent/opa/internal/merge"
- "github.com/open-policy-agent/opa/metrics"
- "github.com/open-policy-agent/opa/util"
+ v1 "github.com/open-policy-agent/opa/v1/bundle"
)
// Common file extensions and file names.
const (
- RegoExt = ".rego"
- WasmFile = "policy.wasm"
- PlanFile = "plan.json"
- ManifestExt = ".manifest"
- SignaturesFile = "signatures.json"
- patchFile = "patch.json"
- dataFile = "data.json"
- yamlDataFile = "data.yaml"
- ymlDataFile = "data.yml"
- defaultHashingAlg = "SHA-256"
- DefaultSizeLimitBytes = (1024 * 1024 * 1024) // limit bundle reads to 1GB to protect against gzip bombs
- DeltaBundleType = "delta"
- SnapshotBundleType = "snapshot"
+ RegoExt = v1.RegoExt
+ WasmFile = v1.WasmFile
+ PlanFile = v1.PlanFile
+ ManifestExt = v1.ManifestExt
+ SignaturesFile = v1.SignaturesFile
+
+ DefaultSizeLimitBytes = v1.DefaultSizeLimitBytes
+ DeltaBundleType = v1.DeltaBundleType
+ SnapshotBundleType = v1.SnapshotBundleType
)
// Bundle represents a loaded bundle. The bundle can contain data and policies.
-type Bundle struct {
- Signatures SignaturesConfig
- Manifest Manifest
- Data map[string]interface{}
- Modules []ModuleFile
- Wasm []byte // Deprecated. Use WasmModules instead
- WasmModules []WasmModuleFile
- PlanModules []PlanModuleFile
- Patch Patch
- Etag string
- Raw []Raw
-
- lazyLoadingMode bool
- sizeLimitBytes int64
-}
+type Bundle = v1.Bundle
// Raw contains raw bytes representing the bundle's content
-type Raw struct {
- Path string
- Value []byte
-}
+type Raw = v1.Raw
// Patch contains an array of objects wherein each object represents the patch operation to be
// applied to the bundle data.
-type Patch struct {
- Data []PatchOperation `json:"data,omitempty"`
-}
+type Patch = v1.Patch
// PatchOperation models a single patch operation against a document.
-type PatchOperation struct {
- Op string `json:"op"`
- Path string `json:"path"`
- Value interface{} `json:"value"`
-}
+type PatchOperation = v1.PatchOperation
// SignaturesConfig represents an array of JWTs that encapsulate the signatures for the bundle.
-type SignaturesConfig struct {
- Signatures []string `json:"signatures,omitempty"`
- Plugin string `json:"plugin,omitempty"`
-}
-
-// isEmpty returns if the SignaturesConfig is empty.
-func (s SignaturesConfig) isEmpty() bool {
- return reflect.DeepEqual(s, SignaturesConfig{})
-}
+type SignaturesConfig = v1.SignaturesConfig
// DecodedSignature represents the decoded JWT payload.
-type DecodedSignature struct {
- Files []FileInfo `json:"files"`
- KeyID string `json:"keyid"` // Deprecated, use kid in the JWT header instead.
- Scope string `json:"scope"`
- IssuedAt int64 `json:"iat"`
- Issuer string `json:"iss"`
-}
+type DecodedSignature = v1.DecodedSignature
// FileInfo contains the hashing algorithm used, resulting digest etc.
-type FileInfo struct {
- Name string `json:"name"`
- Hash string `json:"hash"`
- Algorithm string `json:"algorithm"`
-}
+type FileInfo = v1.FileInfo
// NewFile returns a new FileInfo.
func NewFile(name, hash, alg string) FileInfo {
- return FileInfo{
- Name: name,
- Hash: hash,
- Algorithm: alg,
- }
+ return v1.NewFile(name, hash, alg)
}
// Manifest represents the manifest from a bundle. The manifest may contain
// metadata such as the bundle revision.
-type Manifest struct {
- Revision string `json:"revision"`
- Roots *[]string `json:"roots,omitempty"`
- WasmResolvers []WasmResolver `json:"wasm,omitempty"`
- // RegoVersion is the global Rego version for the bundle described by this Manifest.
- // The Rego version of individual files can be overridden in FileRegoVersions.
- // We don't use ast.RegoVersion here, as this iota type's order isn't guaranteed to be stable over time.
- // We use a pointer so that we can support hand-made bundles that don't have an explicit version appropriately.
- // E.g. in OPA 0.x if --v1-compatible is used when consuming the bundle, and there is no specified version,
- // we should default to v1; if --v1-compatible isn't used, we should default to v0. In OPA 1.0, no --x-compatible
- // flag and no explicit bundle version should default to v1.
- RegoVersion *int `json:"rego_version,omitempty"`
- // FileRegoVersions is a map from file paths to Rego versions.
- // This allows individual files to override the global Rego version specified by RegoVersion.
- FileRegoVersions map[string]int `json:"file_rego_versions,omitempty"`
- Metadata map[string]interface{} `json:"metadata,omitempty"`
-
- compiledFileRegoVersions []fileRegoVersion
-}
-
-type fileRegoVersion struct {
- path glob.Glob
- version int
-}
+type Manifest = v1.Manifest
// WasmResolver maps a wasm module to an entrypoint ref.
-type WasmResolver struct {
- Entrypoint string `json:"entrypoint,omitempty"`
- Module string `json:"module,omitempty"`
- Annotations []*ast.Annotations `json:"annotations,omitempty"`
-}
-
-// Init initializes the manifest. If you instantiate a manifest
-// manually, call Init to ensure that the roots are set properly.
-func (m *Manifest) Init() {
- if m.Roots == nil {
- defaultRoots := []string{""}
- m.Roots = &defaultRoots
- }
-}
-
-// AddRoot adds r to the roots of m. This function is idempotent.
-func (m *Manifest) AddRoot(r string) {
- m.Init()
- if !RootPathsContain(*m.Roots, r) {
- *m.Roots = append(*m.Roots, r)
- }
-}
-
-func (m *Manifest) SetRegoVersion(v ast.RegoVersion) {
- m.Init()
- regoVersion := 0
- if v == ast.RegoV1 {
- regoVersion = 1
- }
- m.RegoVersion = ®oVersion
-}
-
-// Equal returns true if m is semantically equivalent to other.
-func (m Manifest) Equal(other Manifest) bool {
-
- // This is safe since both are passed by value.
- m.Init()
- other.Init()
-
- if m.Revision != other.Revision {
- return false
- }
-
- if m.RegoVersion == nil && other.RegoVersion != nil {
- return false
- }
- if m.RegoVersion != nil && other.RegoVersion == nil {
- return false
- }
- if m.RegoVersion != nil && other.RegoVersion != nil && *m.RegoVersion != *other.RegoVersion {
- return false
- }
-
- // If both are nil, or both are empty, we consider them equal.
- if !(len(m.FileRegoVersions) == 0 && len(other.FileRegoVersions) == 0) &&
- !reflect.DeepEqual(m.FileRegoVersions, other.FileRegoVersions) {
- return false
- }
-
- if !reflect.DeepEqual(m.Metadata, other.Metadata) {
- return false
- }
-
- return m.equalWasmResolversAndRoots(other)
-}
-
-func (m Manifest) Empty() bool {
- return m.Equal(Manifest{})
-}
-
-// Copy returns a deep copy of the manifest.
-func (m Manifest) Copy() Manifest {
- m.Init()
- roots := make([]string, len(*m.Roots))
- copy(roots, *m.Roots)
- m.Roots = &roots
-
- wasmModules := make([]WasmResolver, len(m.WasmResolvers))
- copy(wasmModules, m.WasmResolvers)
- m.WasmResolvers = wasmModules
-
- metadata := m.Metadata
-
- if metadata != nil {
- m.Metadata = make(map[string]interface{})
- for k, v := range metadata {
- m.Metadata[k] = v
- }
- }
-
- return m
-}
-
-func (m Manifest) String() string {
- m.Init()
- if m.RegoVersion != nil {
- return fmt.Sprintf("",
- m.Revision, *m.RegoVersion, *m.Roots, m.WasmResolvers, m.Metadata)
- }
- return fmt.Sprintf("",
- m.Revision, *m.Roots, m.WasmResolvers, m.Metadata)
-}
-
-func (m Manifest) rootSet() stringSet {
- rs := map[string]struct{}{}
-
- for _, r := range *m.Roots {
- rs[r] = struct{}{}
- }
-
- return stringSet(rs)
-}
-
-func (m Manifest) equalWasmResolversAndRoots(other Manifest) bool {
- if len(m.WasmResolvers) != len(other.WasmResolvers) {
- return false
- }
-
- for i := 0; i < len(m.WasmResolvers); i++ {
- if !m.WasmResolvers[i].Equal(&other.WasmResolvers[i]) {
- return false
- }
- }
-
- return m.rootSet().Equal(other.rootSet())
-}
-
-func (wr *WasmResolver) Equal(other *WasmResolver) bool {
- if wr == nil && other == nil {
- return true
- }
-
- if wr == nil || other == nil {
- return false
- }
-
- if wr.Module != other.Module {
- return false
- }
-
- if wr.Entrypoint != other.Entrypoint {
- return false
- }
-
- annotLen := len(wr.Annotations)
- if annotLen != len(other.Annotations) {
- return false
- }
-
- for i := 0; i < annotLen; i++ {
- if wr.Annotations[i].Compare(other.Annotations[i]) != 0 {
- return false
- }
- }
-
- return true
-}
-
-type stringSet map[string]struct{}
-
-func (ss stringSet) Equal(other stringSet) bool {
- if len(ss) != len(other) {
- return false
- }
- for k := range other {
- if _, ok := ss[k]; !ok {
- return false
- }
- }
- return true
-}
-
-func (m *Manifest) validateAndInjectDefaults(b Bundle) error {
-
- m.Init()
-
- // Validate roots in bundle.
- roots := *m.Roots
-
- // Standardize the roots (no starting or trailing slash)
- for i := range roots {
- roots[i] = strings.Trim(roots[i], "/")
- }
-
- for i := 0; i < len(roots)-1; i++ {
- for j := i + 1; j < len(roots); j++ {
- if RootPathsOverlap(roots[i], roots[j]) {
- return fmt.Errorf("manifest has overlapped roots: '%v' and '%v'", roots[i], roots[j])
- }
- }
- }
-
- // Validate modules in bundle.
- for _, module := range b.Modules {
- found := false
- if path, err := module.Parsed.Package.Path.Ptr(); err == nil {
- found = RootPathsContain(roots, path)
- }
- if !found {
- return fmt.Errorf("manifest roots %v do not permit '%v' in module '%v'", roots, module.Parsed.Package, module.Path)
- }
- }
-
- // Build a set of wasm module entrypoints to validate
- wasmModuleToEps := map[string]string{}
- seenEps := map[string]struct{}{}
- for _, wm := range b.WasmModules {
- wasmModuleToEps[wm.Path] = ""
- }
-
- for _, wmConfig := range b.Manifest.WasmResolvers {
- _, ok := wasmModuleToEps[wmConfig.Module]
- if !ok {
- return fmt.Errorf("manifest references wasm module '%s' but the module file does not exist", wmConfig.Module)
- }
-
- // Ensure wasm module entrypoint in within bundle roots
- if !RootPathsContain(roots, wmConfig.Entrypoint) {
- return fmt.Errorf("manifest roots %v do not permit '%v' entrypoint for wasm module '%v'", roots, wmConfig.Entrypoint, wmConfig.Module)
- }
-
- if _, ok := seenEps[wmConfig.Entrypoint]; ok {
- return fmt.Errorf("entrypoint '%s' cannot be used by more than one wasm module", wmConfig.Entrypoint)
- }
- seenEps[wmConfig.Entrypoint] = struct{}{}
-
- wasmModuleToEps[wmConfig.Module] = wmConfig.Entrypoint
- }
-
- // Validate data patches in bundle.
- for _, patch := range b.Patch.Data {
- path := strings.Trim(patch.Path, "/")
- if !RootPathsContain(roots, path) {
- return fmt.Errorf("manifest roots %v do not permit data patch at path '%s'", roots, path)
- }
- }
-
- if b.lazyLoadingMode {
- return nil
- }
-
- // Validate data in bundle.
- return dfs(b.Data, "", func(path string, node interface{}) (bool, error) {
- path = strings.Trim(path, "/")
- if RootPathsContain(roots, path) {
- return true, nil
- }
-
- if _, ok := node.(map[string]interface{}); ok {
- for i := range roots {
- if RootPathsContain(strings.Split(path, "/"), roots[i]) {
- return false, nil
- }
- }
- }
- return false, fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, path)
- })
-}
+type WasmResolver = v1.WasmResolver
// ModuleFile represents a single module contained in a bundle.
-type ModuleFile struct {
- URL string
- Path string
- RelativePath string
- Raw []byte
- Parsed *ast.Module
-}
+type ModuleFile = v1.ModuleFile
// WasmModuleFile represents a single wasm module contained in a bundle.
-type WasmModuleFile struct {
- URL string
- Path string
- Entrypoints []ast.Ref
- Raw []byte
-}
+type WasmModuleFile = v1.WasmModuleFile
// PlanModuleFile represents a single plan module contained in a bundle.
//
// NOTE(tsandall): currently the plans are just opaque binary blobs. In the
// future we could inject the entrypoints so that the plans could be executed
// inside of OPA proper like we do for Wasm modules.
-type PlanModuleFile struct {
- URL string
- Path string
- Raw []byte
-}
+type PlanModuleFile = v1.PlanModuleFile
// Reader contains the reader to load the bundle from.
-type Reader struct {
- loader DirectoryLoader
- includeManifestInData bool
- metrics metrics.Metrics
- baseDir string
- verificationConfig *VerificationConfig
- skipVerify bool
- processAnnotations bool
- jsonOptions *astJSON.Options
- capabilities *ast.Capabilities
- files map[string]FileInfo // files in the bundle signature payload
- sizeLimitBytes int64
- etag string
- lazyLoadingMode bool
- name string
- persist bool
- regoVersion ast.RegoVersion
- followSymlinks bool
-}
+type Reader = v1.Reader
// NewReader is deprecated. Use NewCustomReader instead.
func NewReader(r io.Reader) *Reader {
- return NewCustomReader(NewTarballLoader(r))
+ return v1.NewReader(r).WithRegoVersion(ast.DefaultRegoVersion)
}
// NewCustomReader returns a new Reader configured to use the
// specified DirectoryLoader.
func NewCustomReader(loader DirectoryLoader) *Reader {
- nr := Reader{
- loader: loader,
- metrics: metrics.New(),
- files: make(map[string]FileInfo),
- sizeLimitBytes: DefaultSizeLimitBytes + 1,
- }
- return &nr
-}
-
-// IncludeManifestInData sets whether the manifest metadata should be
-// included in the bundle's data.
-func (r *Reader) IncludeManifestInData(includeManifestInData bool) *Reader {
- r.includeManifestInData = includeManifestInData
- return r
-}
-
-// WithMetrics sets the metrics object to be used while loading bundles
-func (r *Reader) WithMetrics(m metrics.Metrics) *Reader {
- r.metrics = m
- return r
-}
-
-// WithBaseDir sets a base directory for file paths of loaded Rego
-// modules. This will *NOT* affect the loaded path of data files.
-func (r *Reader) WithBaseDir(dir string) *Reader {
- r.baseDir = dir
- return r
-}
-
-// WithBundleVerificationConfig sets the key configuration used to verify a signed bundle
-func (r *Reader) WithBundleVerificationConfig(config *VerificationConfig) *Reader {
- r.verificationConfig = config
- return r
-}
-
-// WithSkipBundleVerification skips verification of a signed bundle
-func (r *Reader) WithSkipBundleVerification(skipVerify bool) *Reader {
- r.skipVerify = skipVerify
- return r
-}
-
-// WithProcessAnnotations enables annotation processing during .rego file parsing.
-func (r *Reader) WithProcessAnnotations(yes bool) *Reader {
- r.processAnnotations = yes
- return r
-}
-
-// WithCapabilities sets the supported capabilities when loading the files
-func (r *Reader) WithCapabilities(caps *ast.Capabilities) *Reader {
- r.capabilities = caps
- return r
-}
-
-// WithJSONOptions sets the JSONOptions to use when parsing policy files
-func (r *Reader) WithJSONOptions(opts *astJSON.Options) *Reader {
- r.jsonOptions = opts
- return r
-}
-
-// WithSizeLimitBytes sets the size limit to apply to files in the bundle. If files are larger
-// than this, an error will be returned by the reader.
-func (r *Reader) WithSizeLimitBytes(n int64) *Reader {
- r.sizeLimitBytes = n + 1
- return r
-}
-
-// WithBundleEtag sets the given etag value on the bundle
-func (r *Reader) WithBundleEtag(etag string) *Reader {
- r.etag = etag
- return r
-}
-
-// WithBundleName specifies the bundle name
-func (r *Reader) WithBundleName(name string) *Reader {
- r.name = name
- return r
-}
-
-func (r *Reader) WithFollowSymlinks(yes bool) *Reader {
- r.followSymlinks = yes
- return r
-}
-
-// WithLazyLoadingMode sets the bundle loading mode. If true,
-// bundles will be read in lazy mode. In this mode, data files in the bundle will not be
-// deserialized and the check to validate that the bundle data does not contain paths
-// outside the bundle's roots will not be performed while reading the bundle.
-func (r *Reader) WithLazyLoadingMode(yes bool) *Reader {
- r.lazyLoadingMode = yes
- return r
-}
-
-// WithBundlePersistence specifies if the downloaded bundle will eventually be persisted to disk.
-func (r *Reader) WithBundlePersistence(persist bool) *Reader {
- r.persist = persist
- return r
-}
-
-func (r *Reader) WithRegoVersion(version ast.RegoVersion) *Reader {
- r.regoVersion = version
- return r
-}
-
-func (r *Reader) ParserOptions() ast.ParserOptions {
- return ast.ParserOptions{
- ProcessAnnotation: r.processAnnotations,
- Capabilities: r.capabilities,
- JSONOptions: r.jsonOptions,
- RegoVersion: r.regoVersion,
- }
-}
-
-// Read returns a new Bundle loaded from the reader.
-func (r *Reader) Read() (Bundle, error) {
-
- var bundle Bundle
- var descriptors []*Descriptor
- var err error
- var raw []Raw
-
- bundle.Signatures, bundle.Patch, descriptors, err = preProcessBundle(r.loader, r.skipVerify, r.sizeLimitBytes)
- if err != nil {
- return bundle, err
- }
-
- bundle.lazyLoadingMode = r.lazyLoadingMode
- bundle.sizeLimitBytes = r.sizeLimitBytes
-
- if bundle.Type() == SnapshotBundleType {
- err = r.checkSignaturesAndDescriptors(bundle.Signatures)
- if err != nil {
- return bundle, err
- }
-
- bundle.Data = map[string]interface{}{}
- }
-
- var modules []ModuleFile
- for _, f := range descriptors {
- buf, err := readFile(f, r.sizeLimitBytes)
- if err != nil {
- return bundle, err
- }
-
- // verify the file content
- if bundle.Type() == SnapshotBundleType && !bundle.Signatures.isEmpty() {
- path := f.Path()
- if r.baseDir != "" {
- path = f.URL()
- }
- path = strings.TrimPrefix(path, "/")
-
- // check if the file is to be excluded from bundle verification
- if r.isFileExcluded(path) {
- delete(r.files, path)
- } else {
- if err = r.verifyBundleFile(path, buf); err != nil {
- return bundle, err
- }
- }
- }
-
- // Normalize the paths to use `/` separators
- path := filepath.ToSlash(f.Path())
-
- if strings.HasSuffix(path, RegoExt) {
- fullPath := r.fullPath(path)
- bs := buf.Bytes()
-
- if r.lazyLoadingMode {
- p := fullPath
- if r.name != "" {
- p = modulePathWithPrefix(r.name, fullPath)
- }
-
- raw = append(raw, Raw{Path: p, Value: bs})
- }
-
- // Modules are parsed after we've had a chance to read the manifest
- mf := ModuleFile{
- URL: f.URL(),
- Path: fullPath,
- RelativePath: path,
- Raw: bs,
- }
- modules = append(modules, mf)
- } else if filepath.Base(path) == WasmFile {
- bundle.WasmModules = append(bundle.WasmModules, WasmModuleFile{
- URL: f.URL(),
- Path: r.fullPath(path),
- Raw: buf.Bytes(),
- })
- } else if filepath.Base(path) == PlanFile {
- bundle.PlanModules = append(bundle.PlanModules, PlanModuleFile{
- URL: f.URL(),
- Path: r.fullPath(path),
- Raw: buf.Bytes(),
- })
- } else if filepath.Base(path) == dataFile {
- if r.lazyLoadingMode {
- raw = append(raw, Raw{Path: path, Value: buf.Bytes()})
- continue
- }
-
- var value interface{}
-
- r.metrics.Timer(metrics.RegoDataParse).Start()
- err := util.UnmarshalJSON(buf.Bytes(), &value)
- r.metrics.Timer(metrics.RegoDataParse).Stop()
-
- if err != nil {
- return bundle, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err)
- }
-
- if err := insertValue(&bundle, path, value); err != nil {
- return bundle, err
- }
-
- } else if filepath.Base(path) == yamlDataFile || filepath.Base(path) == ymlDataFile {
- if r.lazyLoadingMode {
- raw = append(raw, Raw{Path: path, Value: buf.Bytes()})
- continue
- }
-
- var value interface{}
-
- r.metrics.Timer(metrics.RegoDataParse).Start()
- err := util.Unmarshal(buf.Bytes(), &value)
- r.metrics.Timer(metrics.RegoDataParse).Stop()
-
- if err != nil {
- return bundle, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err)
- }
-
- if err := insertValue(&bundle, path, value); err != nil {
- return bundle, err
- }
-
- } else if strings.HasSuffix(path, ManifestExt) {
- if err := util.NewJSONDecoder(&buf).Decode(&bundle.Manifest); err != nil {
- return bundle, fmt.Errorf("bundle load failed on manifest decode: %w", err)
- }
- }
- }
-
- // Parse modules
- popts := r.ParserOptions()
- popts.RegoVersion = bundle.RegoVersion(popts.RegoVersion)
- for _, mf := range modules {
- modulePopts := popts
- if modulePopts.RegoVersion, err = bundle.RegoVersionForFile(mf.RelativePath, popts.RegoVersion); err != nil {
- return bundle, err
- }
- r.metrics.Timer(metrics.RegoModuleParse).Start()
- mf.Parsed, err = ast.ParseModuleWithOpts(mf.Path, string(mf.Raw), modulePopts)
- r.metrics.Timer(metrics.RegoModuleParse).Stop()
- if err != nil {
- return bundle, err
- }
- bundle.Modules = append(bundle.Modules, mf)
- }
-
- if bundle.Type() == DeltaBundleType {
- if len(bundle.Data) != 0 {
- return bundle, fmt.Errorf("delta bundle expected to contain only patch file but data files found")
- }
-
- if len(bundle.Modules) != 0 {
- return bundle, fmt.Errorf("delta bundle expected to contain only patch file but policy files found")
- }
-
- if len(bundle.WasmModules) != 0 {
- return bundle, fmt.Errorf("delta bundle expected to contain only patch file but wasm files found")
- }
-
- if r.persist {
- return bundle, fmt.Errorf("'persist' property is true in config. persisting delta bundle to disk is not supported")
- }
- }
-
- // check if the bundle signatures specify any files that weren't found in the bundle
- if bundle.Type() == SnapshotBundleType && len(r.files) != 0 {
- extra := []string{}
- for k := range r.files {
- extra = append(extra, k)
- }
- return bundle, fmt.Errorf("file(s) %v specified in bundle signatures but not found in the target bundle", extra)
- }
-
- if err := bundle.Manifest.validateAndInjectDefaults(bundle); err != nil {
- return bundle, err
- }
-
- // Inject the wasm module entrypoint refs into the WasmModuleFile structs
- epMap := map[string][]string{}
- for _, r := range bundle.Manifest.WasmResolvers {
- epMap[r.Module] = append(epMap[r.Module], r.Entrypoint)
- }
- for i := 0; i < len(bundle.WasmModules); i++ {
- entrypoints := epMap[bundle.WasmModules[i].Path]
- for _, entrypoint := range entrypoints {
- ref, err := ast.PtrRef(ast.DefaultRootDocument, entrypoint)
- if err != nil {
- return bundle, fmt.Errorf("failed to parse wasm module entrypoint '%s': %s", entrypoint, err)
- }
- bundle.WasmModules[i].Entrypoints = append(bundle.WasmModules[i].Entrypoints, ref)
- }
- }
-
- if r.includeManifestInData {
- var metadata map[string]interface{}
-
- b, err := json.Marshal(&bundle.Manifest)
- if err != nil {
- return bundle, fmt.Errorf("bundle load failed on manifest marshal: %w", err)
- }
-
- err = util.UnmarshalJSON(b, &metadata)
- if err != nil {
- return bundle, fmt.Errorf("bundle load failed on manifest unmarshal: %w", err)
- }
-
- // For backwards compatibility always write to the old unnamed manifest path
- // This will *not* be correct if >1 bundle is in use...
- if err := bundle.insertData(legacyManifestStoragePath, metadata); err != nil {
- return bundle, fmt.Errorf("bundle load failed on %v: %w", legacyRevisionStoragePath, err)
- }
- }
-
- bundle.Etag = r.etag
- bundle.Raw = raw
-
- return bundle, nil
-}
-
-func (r *Reader) isFileExcluded(path string) bool {
- for _, e := range r.verificationConfig.Exclude {
- match, _ := filepath.Match(e, path)
- if match {
- return true
- }
- }
- return false
-}
-
-func (r *Reader) checkSignaturesAndDescriptors(signatures SignaturesConfig) error {
- if r.skipVerify {
- return nil
- }
-
- if signatures.isEmpty() && r.verificationConfig != nil && r.verificationConfig.KeyID != "" {
- return fmt.Errorf("bundle missing .signatures.json file")
- }
-
- if !signatures.isEmpty() {
- if r.verificationConfig == nil {
- return fmt.Errorf("verification key not provided")
- }
-
- // verify the JWT signatures included in the `.signatures.json` file
- if err := r.verifyBundleSignature(signatures); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (r *Reader) verifyBundleSignature(sc SignaturesConfig) error {
- var err error
- r.files, err = VerifyBundleSignature(sc, r.verificationConfig)
- return err
-}
-
-func (r *Reader) verifyBundleFile(path string, data bytes.Buffer) error {
- return VerifyBundleFile(path, data, r.files)
-}
-
-func (r *Reader) fullPath(path string) string {
- if r.baseDir != "" {
- path = filepath.Join(r.baseDir, path)
- }
- return path
+ return v1.NewCustomReader(loader).WithRegoVersion(ast.DefaultRegoVersion)
}
// Write is deprecated. Use NewWriter instead.
func Write(w io.Writer, bundle Bundle) error {
- return NewWriter(w).
- UseModulePath(true).
- DisableFormat(true).
- Write(bundle)
+ return v1.Write(w, bundle)
}
// Writer implements bundle serialization.
-type Writer struct {
- usePath bool
- disableFormat bool
- w io.Writer
-}
+type Writer = v1.Writer
// NewWriter returns a bundle writer that writes to w.
func NewWriter(w io.Writer) *Writer {
- return &Writer{
- w: w,
- }
-}
-
-// UseModulePath configures the writer to use the module file path instead of the
-// module file URL during serialization. This is for backwards compatibility.
-func (w *Writer) UseModulePath(yes bool) *Writer {
- w.usePath = yes
- return w
-}
-
-// DisableFormat configures the writer to just write out raw bytes instead
-// of formatting modules before serialization.
-func (w *Writer) DisableFormat(yes bool) *Writer {
- w.disableFormat = yes
- return w
-}
-
-// Write writes the bundle to the writer's output stream.
-func (w *Writer) Write(bundle Bundle) error {
- gw := gzip.NewWriter(w.w)
- tw := tar.NewWriter(gw)
-
- bundleType := bundle.Type()
-
- if bundleType == SnapshotBundleType {
- var buf bytes.Buffer
-
- if err := json.NewEncoder(&buf).Encode(bundle.Data); err != nil {
- return err
- }
-
- if err := archive.WriteFile(tw, "data.json", buf.Bytes()); err != nil {
- return err
- }
-
- for _, module := range bundle.Modules {
- path := module.URL
- if w.usePath {
- path = module.Path
- }
-
- if err := archive.WriteFile(tw, path, module.Raw); err != nil {
- return err
- }
- }
-
- if err := w.writeWasm(tw, bundle); err != nil {
- return err
- }
-
- if err := writeSignatures(tw, bundle); err != nil {
- return err
- }
-
- if err := w.writePlan(tw, bundle); err != nil {
- return err
- }
- } else if bundleType == DeltaBundleType {
- if err := writePatch(tw, bundle); err != nil {
- return err
- }
- }
-
- if err := writeManifest(tw, bundle); err != nil {
- return err
- }
-
- if err := tw.Close(); err != nil {
- return err
- }
-
- return gw.Close()
-}
-
-func (w *Writer) writeWasm(tw *tar.Writer, bundle Bundle) error {
- for _, wm := range bundle.WasmModules {
- path := wm.URL
- if w.usePath {
- path = wm.Path
- }
-
- err := archive.WriteFile(tw, path, wm.Raw)
- if err != nil {
- return err
- }
- }
-
- if len(bundle.Wasm) > 0 {
- err := archive.WriteFile(tw, "/"+WasmFile, bundle.Wasm)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (w *Writer) writePlan(tw *tar.Writer, bundle Bundle) error {
- for _, wm := range bundle.PlanModules {
- path := wm.URL
- if w.usePath {
- path = wm.Path
- }
-
- err := archive.WriteFile(tw, path, wm.Raw)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func writeManifest(tw *tar.Writer, bundle Bundle) error {
-
- if bundle.Manifest.Empty() {
- return nil
- }
-
- var buf bytes.Buffer
-
- if err := json.NewEncoder(&buf).Encode(bundle.Manifest); err != nil {
- return err
- }
-
- return archive.WriteFile(tw, ManifestExt, buf.Bytes())
-}
-
-func writePatch(tw *tar.Writer, bundle Bundle) error {
-
- var buf bytes.Buffer
-
- if err := json.NewEncoder(&buf).Encode(bundle.Patch); err != nil {
- return err
- }
-
- return archive.WriteFile(tw, patchFile, buf.Bytes())
-}
-
-func writeSignatures(tw *tar.Writer, bundle Bundle) error {
-
- if bundle.Signatures.isEmpty() {
- return nil
- }
-
- bs, err := json.MarshalIndent(bundle.Signatures, "", " ")
- if err != nil {
- return err
- }
-
- return archive.WriteFile(tw, fmt.Sprintf(".%v", SignaturesFile), bs)
-}
-
-func hashBundleFiles(hash SignatureHasher, b *Bundle) ([]FileInfo, error) {
-
- files := []FileInfo{}
-
- bs, err := hash.HashFile(b.Data)
- if err != nil {
- return files, err
- }
- files = append(files, NewFile(strings.TrimPrefix("data.json", "/"), hex.EncodeToString(bs), defaultHashingAlg))
-
- if len(b.Wasm) != 0 {
- bs, err := hash.HashFile(b.Wasm)
- if err != nil {
- return files, err
- }
- files = append(files, NewFile(strings.TrimPrefix(WasmFile, "/"), hex.EncodeToString(bs), defaultHashingAlg))
- }
-
- for _, wasmModule := range b.WasmModules {
- bs, err := hash.HashFile(wasmModule.Raw)
- if err != nil {
- return files, err
- }
- files = append(files, NewFile(strings.TrimPrefix(wasmModule.Path, "/"), hex.EncodeToString(bs), defaultHashingAlg))
- }
-
- for _, planmodule := range b.PlanModules {
- bs, err := hash.HashFile(planmodule.Raw)
- if err != nil {
- return files, err
- }
- files = append(files, NewFile(strings.TrimPrefix(planmodule.Path, "/"), hex.EncodeToString(bs), defaultHashingAlg))
- }
-
- // If the manifest is essentially empty, don't add it to the signatures since it
- // won't be written to the bundle. Otherwise:
- // parse the manifest into a JSON structure;
- // then recursively order the fields of all objects alphabetically and then apply
- // the hash function to result to compute the hash.
- if !b.Manifest.Empty() {
- mbs, err := json.Marshal(b.Manifest)
- if err != nil {
- return files, err
- }
-
- var result map[string]interface{}
- if err := util.Unmarshal(mbs, &result); err != nil {
- return files, err
- }
-
- bs, err = hash.HashFile(result)
- if err != nil {
- return files, err
- }
-
- files = append(files, NewFile(strings.TrimPrefix(ManifestExt, "/"), hex.EncodeToString(bs), defaultHashingAlg))
- }
-
- return files, err
-}
-
-// FormatModules formats Rego modules
-// Modules will be formatted to comply with rego-v0, but Rego compatibility of individual parsed modules will be respected (e.g. if 'rego.v1' is imported).
-func (b *Bundle) FormatModules(useModulePath bool) error {
- return b.FormatModulesForRegoVersion(ast.RegoV0, true, useModulePath)
-}
-
-// FormatModulesForRegoVersion formats Rego modules to comply with a given Rego version
-func (b *Bundle) FormatModulesForRegoVersion(version ast.RegoVersion, preserveModuleRegoVersion bool, useModulePath bool) error {
- var err error
-
- for i, module := range b.Modules {
- opts := format.Opts{}
- if preserveModuleRegoVersion {
- opts.RegoVersion = module.Parsed.RegoVersion()
- opts.ParserOptions = &ast.ParserOptions{
- RegoVersion: opts.RegoVersion,
- }
- } else {
- opts.RegoVersion = version
- }
-
- if module.Raw == nil {
- module.Raw, err = format.AstWithOpts(module.Parsed, opts)
- if err != nil {
- return err
- }
- } else {
- path := module.URL
- if useModulePath {
- path = module.Path
- }
-
- module.Raw, err = format.SourceWithOpts(path, module.Raw, opts)
- if err != nil {
- return err
- }
- }
- b.Modules[i].Raw = module.Raw
- }
- return nil
-}
-
-// GenerateSignature generates the signature for the given bundle.
-func (b *Bundle) GenerateSignature(signingConfig *SigningConfig, keyID string, useModulePath bool) error {
-
- hash, err := NewSignatureHasher(HashingAlgorithm(defaultHashingAlg))
- if err != nil {
- return err
- }
-
- files := []FileInfo{}
-
- for _, module := range b.Modules {
- bytes, err := hash.HashFile(module.Raw)
- if err != nil {
- return err
- }
-
- path := module.URL
- if useModulePath {
- path = module.Path
- }
- files = append(files, NewFile(strings.TrimPrefix(path, "/"), hex.EncodeToString(bytes), defaultHashingAlg))
- }
-
- result, err := hashBundleFiles(hash, b)
- if err != nil {
- return err
- }
- files = append(files, result...)
-
- // generate signed token
- token, err := GenerateSignedToken(files, signingConfig, keyID)
- if err != nil {
- return err
- }
-
- if b.Signatures.isEmpty() {
- b.Signatures = SignaturesConfig{}
- }
-
- if signingConfig.Plugin != "" {
- b.Signatures.Plugin = signingConfig.Plugin
- }
-
- b.Signatures.Signatures = []string{token}
-
- return nil
-}
-
-// ParsedModules returns a map of parsed modules with names that are
-// unique and human readable for the given a bundle name.
-func (b *Bundle) ParsedModules(bundleName string) map[string]*ast.Module {
-
- mods := make(map[string]*ast.Module, len(b.Modules))
-
- for _, mf := range b.Modules {
- mods[modulePathWithPrefix(bundleName, mf.Path)] = mf.Parsed
- }
-
- return mods
-}
-
-func (b *Bundle) RegoVersion(def ast.RegoVersion) ast.RegoVersion {
- if v := b.Manifest.RegoVersion; v != nil {
- if *v == 0 {
- return ast.RegoV0
- } else if *v == 1 {
- return ast.RegoV1
- }
- }
- return def
-}
-
-func (b *Bundle) SetRegoVersion(v ast.RegoVersion) {
- b.Manifest.SetRegoVersion(v)
-}
-
-// RegoVersionForFile returns the rego-version for the specified file path.
-// If there is no defined version for the given path, the default version def is returned.
-// If the version does not correspond to ast.RegoV0 or ast.RegoV1, an error is returned.
-func (b *Bundle) RegoVersionForFile(path string, def ast.RegoVersion) (ast.RegoVersion, error) {
- version, err := b.Manifest.numericRegoVersionForFile(path)
- if err != nil {
- return def, err
- } else if version == nil {
- return def, nil
- } else if *version == 0 {
- return ast.RegoV0, nil
- } else if *version == 1 {
- return ast.RegoV1, nil
- }
- return def, fmt.Errorf("unknown bundle rego-version %d for file '%s'", *version, path)
-}
-
-func (m *Manifest) numericRegoVersionForFile(path string) (*int, error) {
- var version *int
-
- if len(m.FileRegoVersions) != len(m.compiledFileRegoVersions) {
- m.compiledFileRegoVersions = make([]fileRegoVersion, 0, len(m.FileRegoVersions))
- for pattern, v := range m.FileRegoVersions {
- compiled, err := glob.Compile(pattern)
- if err != nil {
- return nil, fmt.Errorf("failed to compile glob pattern %s: %s", pattern, err)
- }
- m.compiledFileRegoVersions = append(m.compiledFileRegoVersions, fileRegoVersion{compiled, v})
- }
- }
-
- for _, fv := range m.compiledFileRegoVersions {
- if fv.path.Match(path) {
- version = &fv.version
- break
- }
- }
-
- if version == nil {
- version = m.RegoVersion
- }
- return version, nil
-}
-
-// Equal returns true if this bundle's contents equal the other bundle's
-// contents.
-func (b Bundle) Equal(other Bundle) bool {
- if !reflect.DeepEqual(b.Data, other.Data) {
- return false
- }
-
- if len(b.Modules) != len(other.Modules) {
- return false
- }
- for i := range b.Modules {
- // To support bundles built from rootless filesystems we ignore a "/" prefix
- // for URLs and Paths, such that "/file" and "file" are equivalent
- if strings.TrimPrefix(b.Modules[i].URL, string(filepath.Separator)) !=
- strings.TrimPrefix(other.Modules[i].URL, string(filepath.Separator)) {
- return false
- }
- if strings.TrimPrefix(b.Modules[i].Path, string(filepath.Separator)) !=
- strings.TrimPrefix(other.Modules[i].Path, string(filepath.Separator)) {
- return false
- }
- if !b.Modules[i].Parsed.Equal(other.Modules[i].Parsed) {
- return false
- }
- if !bytes.Equal(b.Modules[i].Raw, other.Modules[i].Raw) {
- return false
- }
- }
- if (b.Wasm == nil && other.Wasm != nil) || (b.Wasm != nil && other.Wasm == nil) {
- return false
- }
-
- return bytes.Equal(b.Wasm, other.Wasm)
-}
-
-// Copy returns a deep copy of the bundle.
-func (b Bundle) Copy() Bundle {
-
- // Copy data.
- var x interface{} = b.Data
-
- if err := util.RoundTrip(&x); err != nil {
- panic(err)
- }
-
- if x != nil {
- b.Data = x.(map[string]interface{})
- }
-
- // Copy modules.
- for i := range b.Modules {
- bs := make([]byte, len(b.Modules[i].Raw))
- copy(bs, b.Modules[i].Raw)
- b.Modules[i].Raw = bs
- b.Modules[i].Parsed = b.Modules[i].Parsed.Copy()
- }
-
- // Copy manifest.
- b.Manifest = b.Manifest.Copy()
-
- return b
-}
-
-func (b *Bundle) insertData(key []string, value interface{}) error {
- // Build an object with the full structure for the value
- obj, err := mktree(key, value)
- if err != nil {
- return err
- }
-
- // Merge the new data in with the current bundle data object
- merged, ok := merge.InterfaceMaps(b.Data, obj)
- if !ok {
- return fmt.Errorf("failed to insert data file from path %s", filepath.Join(key...))
- }
-
- b.Data = merged
-
- return nil
-}
-
-func (b *Bundle) readData(key []string) *interface{} {
-
- if len(key) == 0 {
- if len(b.Data) == 0 {
- return nil
- }
- var result interface{} = b.Data
- return &result
- }
-
- node := b.Data
-
- for i := 0; i < len(key)-1; i++ {
-
- child, ok := node[key[i]]
- if !ok {
- return nil
- }
-
- childObj, ok := child.(map[string]interface{})
- if !ok {
- return nil
- }
-
- node = childObj
- }
-
- child, ok := node[key[len(key)-1]]
- if !ok {
- return nil
- }
-
- return &child
-}
-
-// Type returns the type of the bundle.
-func (b *Bundle) Type() string {
- if len(b.Patch.Data) != 0 {
- return DeltaBundleType
- }
- return SnapshotBundleType
-}
-
-func mktree(path []string, value interface{}) (map[string]interface{}, error) {
- if len(path) == 0 {
- // For 0 length path the value is the full tree.
- obj, ok := value.(map[string]interface{})
- if !ok {
- return nil, fmt.Errorf("root value must be object")
- }
- return obj, nil
- }
-
- dir := map[string]interface{}{}
- for i := len(path) - 1; i > 0; i-- {
- dir[path[i]] = value
- value = dir
- dir = map[string]interface{}{}
- }
- dir[path[0]] = value
-
- return dir, nil
+ return v1.NewWriter(w)
}
// Merge accepts a set of bundles and merges them into a single result bundle. If there are
@@ -1393,7 +104,7 @@ func mktree(path []string, value interface{}) (map[string]interface{}, error) {
// will have an empty revision except in the special case where a single bundle is provided
// (and in that case the bundle is just returned unmodified.)
func Merge(bundles []*Bundle) (*Bundle, error) {
- return MergeWithRegoVersion(bundles, ast.RegoV0, false)
+ return MergeWithRegoVersion(bundles, ast.DefaultRegoVersion, false)
}
// MergeWithRegoVersion creates a merged bundle from the provided bundles, similar to Merge.
@@ -1405,348 +116,19 @@ func Merge(bundles []*Bundle) (*Bundle, error) {
// If usePath is true, per-file rego-versions will be calculated using the file's ModuleFile.Path; otherwise, the file's
// ModuleFile.URL will be used.
func MergeWithRegoVersion(bundles []*Bundle, regoVersion ast.RegoVersion, usePath bool) (*Bundle, error) {
-
- if len(bundles) == 0 {
- return nil, errors.New("expected at least one bundle")
- }
-
- if len(bundles) == 1 {
- result := bundles[0]
- // We respect the bundle rego-version, defaulting to the provided rego version if not set.
- result.SetRegoVersion(result.RegoVersion(regoVersion))
- fileRegoVersions, err := bundleRegoVersions(result, result.RegoVersion(regoVersion), usePath)
- if err != nil {
- return nil, err
- }
- result.Manifest.FileRegoVersions = fileRegoVersions
- return result, nil
+ if regoVersion == ast.RegoUndefined {
+ regoVersion = ast.DefaultRegoVersion
}
- var roots []string
- var result Bundle
-
- for _, b := range bundles {
-
- if b.Manifest.Roots == nil {
- return nil, errors.New("bundle manifest not initialized")
- }
-
- roots = append(roots, *b.Manifest.Roots...)
-
- result.Modules = append(result.Modules, b.Modules...)
-
- for _, root := range *b.Manifest.Roots {
- key := strings.Split(root, "/")
- if val := b.readData(key); val != nil {
- if err := result.insertData(key, *val); err != nil {
- return nil, err
- }
- }
- }
-
- result.Manifest.WasmResolvers = append(result.Manifest.WasmResolvers, b.Manifest.WasmResolvers...)
- result.WasmModules = append(result.WasmModules, b.WasmModules...)
- result.PlanModules = append(result.PlanModules, b.PlanModules...)
-
- if b.Manifest.RegoVersion != nil || len(b.Manifest.FileRegoVersions) > 0 {
- if result.Manifest.FileRegoVersions == nil {
- result.Manifest.FileRegoVersions = map[string]int{}
- }
-
- fileRegoVersions, err := bundleRegoVersions(b, regoVersion, usePath)
- if err != nil {
- return nil, err
- }
- for k, v := range fileRegoVersions {
- result.Manifest.FileRegoVersions[k] = v
- }
- }
- }
-
- // We respect the bundle rego-version, defaulting to the provided rego version if not set.
- result.SetRegoVersion(result.RegoVersion(regoVersion))
-
- if result.Data == nil {
- result.Data = map[string]interface{}{}
- }
-
- result.Manifest.Roots = &roots
-
- if err := result.Manifest.validateAndInjectDefaults(result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-func bundleRegoVersions(bundle *Bundle, regoVersion ast.RegoVersion, usePath bool) (map[string]int, error) {
- fileRegoVersions := map[string]int{}
-
- // we drop the bundle-global rego versions and record individual rego versions for each module.
- for _, m := range bundle.Modules {
- // We fetch rego-version by the path relative to the bundle root, as the complete path of the module might
- // contain the path between OPA working directory and the bundle root.
- v, err := bundle.RegoVersionForFile(bundleRelativePath(m, usePath), bundle.RegoVersion(regoVersion))
- if err != nil {
- return nil, err
- }
- // only record the rego version if it's different from one applied globally to the result bundle
- if v != regoVersion {
- // We store the rego version by the absolute path to the bundle root, as this will be the - possibly new - path
- // to the module inside the merged bundle.
- fileRegoVersions[bundleAbsolutePath(m, usePath)] = v.Int()
- }
- }
-
- return fileRegoVersions, nil
-}
-
-func bundleRelativePath(m ModuleFile, usePath bool) string {
- p := m.RelativePath
- if p == "" {
- if usePath {
- p = m.Path
- } else {
- p = m.URL
- }
- }
- return p
-}
-
-func bundleAbsolutePath(m ModuleFile, usePath bool) string {
- var p string
- if usePath {
- p = m.Path
- } else {
- p = m.URL
- }
- if !path.IsAbs(p) {
- p = "/" + p
- }
- return path.Clean(p)
+ return v1.MergeWithRegoVersion(bundles, regoVersion, usePath)
}
// RootPathsOverlap takes in two bundle root paths and returns true if they overlap.
func RootPathsOverlap(pathA string, pathB string) bool {
- a := rootPathSegments(pathA)
- b := rootPathSegments(pathB)
- return rootContains(a, b) || rootContains(b, a)
+ return v1.RootPathsOverlap(pathA, pathB)
}
// RootPathsContain takes a set of bundle root paths and returns true if the path is contained.
func RootPathsContain(roots []string, path string) bool {
- segments := rootPathSegments(path)
- for i := range roots {
- if rootContains(rootPathSegments(roots[i]), segments) {
- return true
- }
- }
- return false
-}
-
-func rootPathSegments(path string) []string {
- return strings.Split(path, "/")
-}
-
-func rootContains(root []string, other []string) bool {
-
- // A single segment, empty string root always contains the other.
- if len(root) == 1 && root[0] == "" {
- return true
- }
-
- if len(root) > len(other) {
- return false
- }
-
- for j := range root {
- if root[j] != other[j] {
- return false
- }
- }
-
- return true
-}
-
-func insertValue(b *Bundle, path string, value interface{}) error {
- if err := b.insertData(getNormalizedPath(path), value); err != nil {
- return fmt.Errorf("bundle load failed on %v: %w", path, err)
- }
- return nil
-}
-
-func getNormalizedPath(path string) []string {
- // Remove leading / and . characters from the directory path. If the bundle
- // was written with OPA then the paths will contain a leading slash. On the
- // other hand, if the path is empty, filepath.Dir will return '.'.
- // Note: filepath.Dir can return paths with '\' separators, always use
- // filepath.ToSlash to keep them normalized.
- dirpath := strings.TrimLeft(normalizePath(filepath.Dir(path)), "/.")
- var key []string
- if dirpath != "" {
- key = strings.Split(dirpath, "/")
- }
- return key
-}
-
-func dfs(value interface{}, path string, fn func(string, interface{}) (bool, error)) error {
- if stop, err := fn(path, value); err != nil {
- return err
- } else if stop {
- return nil
- }
- obj, ok := value.(map[string]interface{})
- if !ok {
- return nil
- }
- for key := range obj {
- if err := dfs(obj[key], path+"/"+key, fn); err != nil {
- return err
- }
- }
- return nil
-}
-
-func modulePathWithPrefix(bundleName string, modulePath string) string {
- // Default prefix is just the bundle name
- prefix := bundleName
-
- // Bundle names are sometimes just file paths, some of which
- // are full urls (file:///foo/). Parse these and only use the path.
- parsed, err := url.Parse(bundleName)
- if err == nil {
- prefix = filepath.Join(parsed.Host, parsed.Path)
- }
-
- // Note: filepath.Join can return paths with '\' separators, always use
- // filepath.ToSlash to keep them normalized.
- return normalizePath(filepath.Join(prefix, modulePath))
-}
-
-// IsStructuredDoc checks if the file name equals a structured file extension ex. ".json"
-func IsStructuredDoc(name string) bool {
- return filepath.Base(name) == dataFile || filepath.Base(name) == yamlDataFile ||
- filepath.Base(name) == SignaturesFile || filepath.Base(name) == ManifestExt
-}
-
-func preProcessBundle(loader DirectoryLoader, skipVerify bool, sizeLimitBytes int64) (SignaturesConfig, Patch, []*Descriptor, error) {
- descriptors := []*Descriptor{}
- var signatures SignaturesConfig
- var patch Patch
-
- for {
- f, err := loader.NextFile()
- if err == io.EOF {
- break
- }
-
- if err != nil {
- return signatures, patch, nil, fmt.Errorf("bundle read failed: %w", err)
- }
-
- // check for the signatures file
- if !skipVerify && strings.HasSuffix(f.Path(), SignaturesFile) {
- buf, err := readFile(f, sizeLimitBytes)
- if err != nil {
- return signatures, patch, nil, err
- }
-
- if err := util.NewJSONDecoder(&buf).Decode(&signatures); err != nil {
- return signatures, patch, nil, fmt.Errorf("bundle load failed on signatures decode: %w", err)
- }
- } else if !strings.HasSuffix(f.Path(), SignaturesFile) {
- descriptors = append(descriptors, f)
-
- if filepath.Base(f.Path()) == patchFile {
-
- var b bytes.Buffer
- tee := io.TeeReader(f.reader, &b)
- f.reader = tee
-
- buf, err := readFile(f, sizeLimitBytes)
- if err != nil {
- return signatures, patch, nil, err
- }
-
- if err := util.NewJSONDecoder(&buf).Decode(&patch); err != nil {
- return signatures, patch, nil, fmt.Errorf("bundle load failed on patch decode: %w", err)
- }
-
- f.reader = &b
- }
- }
- }
- return signatures, patch, descriptors, nil
-}
-
-func readFile(f *Descriptor, sizeLimitBytes int64) (bytes.Buffer, error) {
- // Case for pre-loaded byte buffers, like those from the tarballLoader.
- if bb, ok := f.reader.(*bytes.Buffer); ok {
- _ = f.Close() // always close, even on error
-
- if int64(bb.Len()) >= sizeLimitBytes {
- return *bb, fmt.Errorf("bundle file '%v' size (%d bytes) exceeded max size (%v bytes)",
- strings.TrimPrefix(f.Path(), "/"), bb.Len(), sizeLimitBytes-1)
- }
-
- return *bb, nil
- }
-
- // Case for *lazyFile readers:
- if lf, ok := f.reader.(*lazyFile); ok {
- var buf bytes.Buffer
- if lf.file == nil {
- var err error
- if lf.file, err = os.Open(lf.path); err != nil {
- return buf, fmt.Errorf("failed to open file %s: %w", f.path, err)
- }
- }
- // Bail out if we can't read the whole file-- there's nothing useful we can do at that point!
- fileSize, _ := fstatFileSize(lf.file)
- if fileSize > sizeLimitBytes {
- return buf, fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(f.Path(), "/"), fileSize, sizeLimitBytes-1)
- }
- // Prealloc the buffer for the file read.
- buffer := make([]byte, fileSize)
- _, err := io.ReadFull(lf.file, buffer)
- if err != nil {
- return buf, err
- }
- _ = lf.file.Close() // always close, even on error
-
- // Note(philipc): Replace the lazyFile reader in the *Descriptor with a
- // pointer to the wrapping bytes.Buffer, so that we don't re-read the
- // file on disk again by accident.
- buf = *bytes.NewBuffer(buffer)
- f.reader = &buf
- return buf, nil
- }
-
- // Fallback case:
- var buf bytes.Buffer
- n, err := f.Read(&buf, sizeLimitBytes)
- _ = f.Close() // always close, even on error
-
- if err != nil && err != io.EOF {
- return buf, err
- } else if err == nil && n >= sizeLimitBytes {
- return buf, fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(f.Path(), "/"), n, sizeLimitBytes-1)
- }
-
- return buf, nil
-}
-
-// Takes an already open file handle and invokes the os.Stat system call on it
-// to determine the file's size. Passes any errors from *File.Stat on up to the
-// caller.
-func fstatFileSize(f *os.File) (int64, error) {
- fileInfo, err := f.Stat()
- if err != nil {
- return 0, err
- }
- return fileInfo.Size(), nil
-}
-
-func normalizePath(p string) string {
- return filepath.ToSlash(p)
+ return v1.RootPathsContain(roots, path)
}
diff --git a/vendor/github.com/open-policy-agent/opa/bundle/doc.go b/vendor/github.com/open-policy-agent/opa/bundle/doc.go
new file mode 100644
index 0000000000..7ec7c9b332
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/bundle/doc.go
@@ -0,0 +1,8 @@
+// Copyright 2024 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended.
+// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead.
+// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information.
+package bundle
diff --git a/vendor/github.com/open-policy-agent/opa/bundle/file.go b/vendor/github.com/open-policy-agent/opa/bundle/file.go
index 80b1a87eb1..ccb7b23510 100644
--- a/vendor/github.com/open-policy-agent/opa/bundle/file.go
+++ b/vendor/github.com/open-policy-agent/opa/bundle/file.go
@@ -1,508 +1,50 @@
package bundle
import (
- "archive/tar"
- "bytes"
- "compress/gzip"
- "fmt"
"io"
- "io/fs"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "sync"
-
- "github.com/open-policy-agent/opa/loader/filter"
"github.com/open-policy-agent/opa/storage"
+ v1 "github.com/open-policy-agent/opa/v1/bundle"
)
-const maxSizeLimitBytesErrMsg = "bundle file %s size (%d bytes) exceeds configured size_limit_bytes (%d bytes)"
-
// Descriptor contains information about a file and
// can be used to read the file contents.
-type Descriptor struct {
- url string
- path string
- reader io.Reader
- closer io.Closer
- closeOnce *sync.Once
-}
-
-// lazyFile defers reading the file until the first call of Read
-type lazyFile struct {
- path string
- file *os.File
-}
-
-// newLazyFile creates a new instance of lazyFile
-func newLazyFile(path string) *lazyFile {
- return &lazyFile{path: path}
-}
-
-// Read implements io.Reader. It will check if the file has been opened
-// and open it if it has not before attempting to read using the file's
-// read method
-func (f *lazyFile) Read(b []byte) (int, error) {
- var err error
-
- if f.file == nil {
- if f.file, err = os.Open(f.path); err != nil {
- return 0, fmt.Errorf("failed to open file %s: %w", f.path, err)
- }
- }
-
- return f.file.Read(b)
-}
-
-// Close closes the lazy file if it has been opened using the file's
-// close method
-func (f *lazyFile) Close() error {
- if f.file != nil {
- return f.file.Close()
- }
-
- return nil
-}
+type Descriptor = v1.Descriptor
func NewDescriptor(url, path string, reader io.Reader) *Descriptor {
- return &Descriptor{
- url: url,
- path: path,
- reader: reader,
- }
-}
-
-func (d *Descriptor) WithCloser(closer io.Closer) *Descriptor {
- d.closer = closer
- d.closeOnce = new(sync.Once)
- return d
-}
-
-// Path returns the path of the file.
-func (d *Descriptor) Path() string {
- return d.path
-}
-
-// URL returns the url of the file.
-func (d *Descriptor) URL() string {
- return d.url
-}
-
-// Read will read all the contents from the file the Descriptor refers to
-// into the dest writer up n bytes. Will return an io.EOF error
-// if EOF is encountered before n bytes are read.
-func (d *Descriptor) Read(dest io.Writer, n int64) (int64, error) {
- n, err := io.CopyN(dest, d.reader, n)
- return n, err
+ return v1.NewDescriptor(url, path, reader)
}
-// Close the file, on some Loader implementations this might be a no-op.
-// It should *always* be called regardless of file.
-func (d *Descriptor) Close() error {
- var err error
- if d.closer != nil {
- d.closeOnce.Do(func() {
- err = d.closer.Close()
- })
- }
- return err
-}
-
-type PathFormat int64
+type PathFormat = v1.PathFormat
const (
- Chrooted PathFormat = iota
- SlashRooted
- Passthrough
+ Chrooted = v1.Chrooted
+ SlashRooted = v1.SlashRooted
+ Passthrough = v1.Passthrough
)
// DirectoryLoader defines an interface which can be used to load
// files from a directory by iterating over each one in the tree.
-type DirectoryLoader interface {
- // NextFile must return io.EOF if there is no next value. The returned
- // descriptor should *always* be closed when no longer needed.
- NextFile() (*Descriptor, error)
- WithFilter(filter filter.LoaderFilter) DirectoryLoader
- WithPathFormat(PathFormat) DirectoryLoader
- WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader
- WithFollowSymlinks(followSymlinks bool) DirectoryLoader
-}
-
-type dirLoader struct {
- root string
- files []string
- idx int
- filter filter.LoaderFilter
- pathFormat PathFormat
- maxSizeLimitBytes int64
- followSymlinks bool
-}
-
-// Normalize root directory, ex "./src/bundle" -> "src/bundle"
-// We don't need an absolute path, but this makes the joined/trimmed
-// paths more uniform.
-func normalizeRootDirectory(root string) string {
- if len(root) > 1 {
- if root[0] == '.' && root[1] == filepath.Separator {
- if len(root) == 2 {
- root = root[:1] // "./" -> "."
- } else {
- root = root[2:] // remove leading "./"
- }
- }
- }
- return root
-}
+type DirectoryLoader = v1.DirectoryLoader
// NewDirectoryLoader returns a basic DirectoryLoader implementation
// that will load files from a given root directory path.
func NewDirectoryLoader(root string) DirectoryLoader {
- d := dirLoader{
- root: normalizeRootDirectory(root),
- pathFormat: Chrooted,
- }
- return &d
-}
-
-// WithFilter specifies the filter object to use to filter files while loading bundles
-func (d *dirLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader {
- d.filter = filter
- return d
-}
-
-// WithPathFormat specifies how a path is formatted in a Descriptor
-func (d *dirLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader {
- d.pathFormat = pathFormat
- return d
-}
-
-// WithSizeLimitBytes specifies the maximum size of any file in the directory to read
-func (d *dirLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader {
- d.maxSizeLimitBytes = sizeLimitBytes
- return d
-}
-
-// WithFollowSymlinks specifies whether to follow symlinks when loading files from the directory
-func (d *dirLoader) WithFollowSymlinks(followSymlinks bool) DirectoryLoader {
- d.followSymlinks = followSymlinks
- return d
-}
-
-func formatPath(fileName string, root string, pathFormat PathFormat) string {
- switch pathFormat {
- case SlashRooted:
- if !strings.HasPrefix(fileName, string(filepath.Separator)) {
- return string(filepath.Separator) + fileName
- }
- return fileName
- case Chrooted:
- // Trim off the root directory and return path as if chrooted
- result := strings.TrimPrefix(fileName, filepath.FromSlash(root))
- if root == "." && filepath.Base(fileName) == ManifestExt {
- result = fileName
- }
- if !strings.HasPrefix(result, string(filepath.Separator)) {
- result = string(filepath.Separator) + result
- }
- return result
- case Passthrough:
- fallthrough
- default:
- return fileName
- }
-}
-
-// NextFile iterates to the next file in the directory tree
-// and returns a file Descriptor for the file.
-func (d *dirLoader) NextFile() (*Descriptor, error) {
- // build a list of all files we will iterate over and read, but only one time
- if d.files == nil {
- d.files = []string{}
- err := filepath.Walk(d.root, func(path string, info os.FileInfo, _ error) error {
- if info == nil {
- return nil
- }
-
- if info.Mode().IsRegular() {
- if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) {
- return nil
- }
- if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes {
- return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes)
- }
- d.files = append(d.files, path)
- } else if d.followSymlinks && info.Mode().Type()&fs.ModeSymlink == fs.ModeSymlink {
- if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) {
- return nil
- }
- if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes {
- return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes)
- }
- d.files = append(d.files, path)
- } else if info.Mode().IsDir() {
- if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) {
- return filepath.SkipDir
- }
- }
- return nil
- })
- if err != nil {
- return nil, fmt.Errorf("failed to list files: %w", err)
- }
- }
-
- // If done reading files then just return io.EOF
- // errors for each NextFile() call
- if d.idx >= len(d.files) {
- return nil, io.EOF
- }
-
- fileName := d.files[d.idx]
- d.idx++
- fh := newLazyFile(fileName)
-
- cleanedPath := formatPath(fileName, d.root, d.pathFormat)
- f := NewDescriptor(filepath.Join(d.root, cleanedPath), cleanedPath, fh).WithCloser(fh)
- return f, nil
-}
-
-type tarballLoader struct {
- baseURL string
- r io.Reader
- tr *tar.Reader
- files []file
- idx int
- filter filter.LoaderFilter
- skipDir map[string]struct{}
- pathFormat PathFormat
- maxSizeLimitBytes int64
-}
-
-type file struct {
- name string
- reader io.Reader
- path storage.Path
- raw []byte
+ return v1.NewDirectoryLoader(root)
}
// NewTarballLoader is deprecated. Use NewTarballLoaderWithBaseURL instead.
func NewTarballLoader(r io.Reader) DirectoryLoader {
- l := tarballLoader{
- r: r,
- pathFormat: Passthrough,
- }
- return &l
+ return v1.NewTarballLoader(r)
}
// NewTarballLoaderWithBaseURL returns a new DirectoryLoader that reads
// files out of a gzipped tar archive. The file URLs will be prefixed
// with the baseURL.
func NewTarballLoaderWithBaseURL(r io.Reader, baseURL string) DirectoryLoader {
- l := tarballLoader{
- baseURL: strings.TrimSuffix(baseURL, "/"),
- r: r,
- pathFormat: Passthrough,
- }
- return &l
-}
-
-// WithFilter specifies the filter object to use to filter files while loading bundles
-func (t *tarballLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader {
- t.filter = filter
- return t
-}
-
-// WithPathFormat specifies how a path is formatted in a Descriptor
-func (t *tarballLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader {
- t.pathFormat = pathFormat
- return t
-}
-
-// WithSizeLimitBytes specifies the maximum size of any file in the tarball to read
-func (t *tarballLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader {
- t.maxSizeLimitBytes = sizeLimitBytes
- return t
-}
-
-// WithFollowSymlinks is a no-op for tarballLoader
-func (t *tarballLoader) WithFollowSymlinks(_ bool) DirectoryLoader {
- return t
-}
-
-// NextFile iterates to the next file in the directory tree
-// and returns a file Descriptor for the file.
-func (t *tarballLoader) NextFile() (*Descriptor, error) {
- if t.tr == nil {
- gr, err := gzip.NewReader(t.r)
- if err != nil {
- return nil, fmt.Errorf("archive read failed: %w", err)
- }
-
- t.tr = tar.NewReader(gr)
- }
-
- if t.files == nil {
- t.files = []file{}
-
- if t.skipDir == nil {
- t.skipDir = map[string]struct{}{}
- }
-
- for {
- header, err := t.tr.Next()
-
- if err == io.EOF {
- break
- }
-
- if err != nil {
- return nil, err
- }
-
- // Keep iterating on the archive until we find a normal file
- if header.Typeflag == tar.TypeReg {
-
- if t.filter != nil {
-
- if t.filter(filepath.ToSlash(header.Name), header.FileInfo(), getdepth(header.Name, false)) {
- continue
- }
-
- basePath := strings.Trim(filepath.Dir(filepath.ToSlash(header.Name)), "/")
-
- // check if the directory is to be skipped
- if _, ok := t.skipDir[basePath]; ok {
- continue
- }
-
- match := false
- for p := range t.skipDir {
- if strings.HasPrefix(basePath, p) {
- match = true
- break
- }
- }
-
- if match {
- continue
- }
- }
-
- if t.maxSizeLimitBytes > 0 && header.Size > t.maxSizeLimitBytes {
- return nil, fmt.Errorf(maxSizeLimitBytesErrMsg, header.Name, header.Size, t.maxSizeLimitBytes)
- }
-
- f := file{name: header.Name}
-
- // Note(philipc): We rely on the previous size check in this loop for safety.
- buf := bytes.NewBuffer(make([]byte, 0, header.Size))
- if _, err := io.Copy(buf, t.tr); err != nil {
- return nil, fmt.Errorf("failed to copy file %s: %w", header.Name, err)
- }
-
- f.reader = buf
-
- t.files = append(t.files, f)
- } else if header.Typeflag == tar.TypeDir {
- cleanedPath := filepath.ToSlash(header.Name)
- if t.filter != nil && t.filter(cleanedPath, header.FileInfo(), getdepth(header.Name, true)) {
- t.skipDir[strings.Trim(cleanedPath, "/")] = struct{}{}
- }
- }
- }
- }
-
- // If done reading files then just return io.EOF
- // errors for each NextFile() call
- if t.idx >= len(t.files) {
- return nil, io.EOF
- }
-
- f := t.files[t.idx]
- t.idx++
-
- cleanedPath := formatPath(f.name, "", t.pathFormat)
- d := NewDescriptor(filepath.Join(t.baseURL, cleanedPath), cleanedPath, f.reader)
- return d, nil
-}
-
-// Next implements the storage.Iterator interface.
-// It iterates to the next policy or data file in the directory tree
-// and returns a storage.Update for the file.
-func (it *iterator) Next() (*storage.Update, error) {
- if it.files == nil {
- it.files = []file{}
-
- for _, item := range it.raw {
- f := file{name: item.Path}
-
- fpath := strings.TrimLeft(normalizePath(filepath.Dir(f.name)), "/.")
- if strings.HasSuffix(f.name, RegoExt) {
- fpath = strings.Trim(normalizePath(f.name), "/")
- }
-
- p, ok := storage.ParsePathEscaped("/" + fpath)
- if !ok {
- return nil, fmt.Errorf("storage path invalid: %v", f.name)
- }
- f.path = p
-
- f.raw = item.Value
-
- it.files = append(it.files, f)
- }
-
- sortFilePathAscend(it.files)
- }
-
- // If done reading files then just return io.EOF
- // errors for each NextFile() call
- if it.idx >= len(it.files) {
- return nil, io.EOF
- }
-
- f := it.files[it.idx]
- it.idx++
-
- isPolicy := false
- if strings.HasSuffix(f.name, RegoExt) {
- isPolicy = true
- }
-
- return &storage.Update{
- Path: f.path,
- Value: f.raw,
- IsPolicy: isPolicy,
- }, nil
-}
-
-type iterator struct {
- raw []Raw
- files []file
- idx int
+ return v1.NewTarballLoaderWithBaseURL(r, baseURL)
}
func NewIterator(raw []Raw) storage.Iterator {
- it := iterator{
- raw: raw,
- }
- return &it
-}
-
-func sortFilePathAscend(files []file) {
- sort.Slice(files, func(i, j int) bool {
- return len(files[i].path) < len(files[j].path)
- })
-}
-
-func getdepth(path string, isDir bool) int {
- if isDir {
- cleanedPath := strings.Trim(filepath.ToSlash(path), "/")
- return len(strings.Split(cleanedPath, "/"))
- }
-
- basePath := strings.Trim(filepath.Dir(filepath.ToSlash(path)), "/")
- return len(strings.Split(basePath, "/"))
+ return v1.NewIterator(raw)
}
diff --git a/vendor/github.com/open-policy-agent/opa/bundle/filefs.go b/vendor/github.com/open-policy-agent/opa/bundle/filefs.go
index a3a0dbf204..16e00928da 100644
--- a/vendor/github.com/open-policy-agent/opa/bundle/filefs.go
+++ b/vendor/github.com/open-policy-agent/opa/bundle/filefs.go
@@ -4,140 +4,19 @@
package bundle
import (
- "fmt"
- "io"
"io/fs"
- "path/filepath"
- "sync"
- "github.com/open-policy-agent/opa/loader/filter"
+ v1 "github.com/open-policy-agent/opa/v1/bundle"
)
-const (
- defaultFSLoaderRoot = "."
-)
-
-type dirLoaderFS struct {
- sync.Mutex
- filesystem fs.FS
- files []string
- idx int
- filter filter.LoaderFilter
- root string
- pathFormat PathFormat
- maxSizeLimitBytes int64
- followSymlinks bool
-}
-
// NewFSLoader returns a basic DirectoryLoader implementation
// that will load files from a fs.FS interface
func NewFSLoader(filesystem fs.FS) (DirectoryLoader, error) {
- return NewFSLoaderWithRoot(filesystem, defaultFSLoaderRoot), nil
+ return v1.NewFSLoader(filesystem)
}
// NewFSLoaderWithRoot returns a basic DirectoryLoader implementation
// that will load files from a fs.FS interface at the supplied root
func NewFSLoaderWithRoot(filesystem fs.FS, root string) DirectoryLoader {
- d := dirLoaderFS{
- filesystem: filesystem,
- root: normalizeRootDirectory(root),
- pathFormat: Chrooted,
- }
-
- return &d
-}
-
-func (d *dirLoaderFS) walkDir(path string, dirEntry fs.DirEntry, err error) error {
- if err != nil {
- return err
- }
-
- if dirEntry != nil {
- info, err := dirEntry.Info()
- if err != nil {
- return err
- }
-
- if dirEntry.Type().IsRegular() {
- if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) {
- return nil
- }
-
- if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes {
- return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes)
- }
-
- d.files = append(d.files, path)
- } else if dirEntry.Type()&fs.ModeSymlink != 0 && d.followSymlinks {
- if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) {
- return nil
- }
-
- if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes {
- return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes)
- }
-
- d.files = append(d.files, path)
- } else if dirEntry.Type().IsDir() {
- if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) {
- return fs.SkipDir
- }
- }
- }
- return nil
-}
-
-// WithFilter specifies the filter object to use to filter files while loading bundles
-func (d *dirLoaderFS) WithFilter(filter filter.LoaderFilter) DirectoryLoader {
- d.filter = filter
- return d
-}
-
-// WithPathFormat specifies how a path is formatted in a Descriptor
-func (d *dirLoaderFS) WithPathFormat(pathFormat PathFormat) DirectoryLoader {
- d.pathFormat = pathFormat
- return d
-}
-
-// WithSizeLimitBytes specifies the maximum size of any file in the filesystem directory to read
-func (d *dirLoaderFS) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader {
- d.maxSizeLimitBytes = sizeLimitBytes
- return d
-}
-
-func (d *dirLoaderFS) WithFollowSymlinks(followSymlinks bool) DirectoryLoader {
- d.followSymlinks = followSymlinks
- return d
-}
-
-// NextFile iterates to the next file in the directory tree
-// and returns a file Descriptor for the file.
-func (d *dirLoaderFS) NextFile() (*Descriptor, error) {
- d.Lock()
- defer d.Unlock()
-
- if d.files == nil {
- err := fs.WalkDir(d.filesystem, d.root, d.walkDir)
- if err != nil {
- return nil, fmt.Errorf("failed to list files: %w", err)
- }
- }
-
- // If done reading files then just return io.EOF
- // errors for each NextFile() call
- if d.idx >= len(d.files) {
- return nil, io.EOF
- }
-
- fileName := d.files[d.idx]
- d.idx++
-
- fh, err := d.filesystem.Open(fileName)
- if err != nil {
- return nil, fmt.Errorf("failed to open file %s: %w", fileName, err)
- }
-
- cleanedPath := formatPath(fileName, d.root, d.pathFormat)
- f := NewDescriptor(cleanedPath, cleanedPath, fh).WithCloser(fh)
- return f, nil
+ return v1.NewFSLoaderWithRoot(filesystem, root)
}
diff --git a/vendor/github.com/open-policy-agent/opa/bundle/hash.go b/vendor/github.com/open-policy-agent/opa/bundle/hash.go
index 021801bb0a..d4cc601dea 100644
--- a/vendor/github.com/open-policy-agent/opa/bundle/hash.go
+++ b/vendor/github.com/open-policy-agent/opa/bundle/hash.go
@@ -5,137 +5,28 @@
package bundle
import (
- "bytes"
- "crypto/md5"
- "crypto/sha1"
- "crypto/sha256"
- "crypto/sha512"
- "encoding/json"
- "fmt"
- "hash"
- "io"
- "sort"
- "strings"
+ v1 "github.com/open-policy-agent/opa/v1/bundle"
)
// HashingAlgorithm represents a subset of hashing algorithms implemented in Go
-type HashingAlgorithm string
+type HashingAlgorithm = v1.HashingAlgorithm
// Supported values for HashingAlgorithm
const (
- MD5 HashingAlgorithm = "MD5"
- SHA1 HashingAlgorithm = "SHA-1"
- SHA224 HashingAlgorithm = "SHA-224"
- SHA256 HashingAlgorithm = "SHA-256"
- SHA384 HashingAlgorithm = "SHA-384"
- SHA512 HashingAlgorithm = "SHA-512"
- SHA512224 HashingAlgorithm = "SHA-512-224"
- SHA512256 HashingAlgorithm = "SHA-512-256"
+ MD5 = v1.MD5
+ SHA1 = v1.SHA1
+ SHA224 = v1.SHA224
+ SHA256 = v1.SHA256
+ SHA384 = v1.SHA384
+ SHA512 = v1.SHA512
+ SHA512224 = v1.SHA512224
+ SHA512256 = v1.SHA512256
)
-// String returns the string representation of a HashingAlgorithm
-func (alg HashingAlgorithm) String() string {
- return string(alg)
-}
-
// SignatureHasher computes a signature digest for a file with (structured or unstructured) data and policy
-type SignatureHasher interface {
- HashFile(v interface{}) ([]byte, error)
-}
-
-type hasher struct {
- h func() hash.Hash // hash function factory
-}
+type SignatureHasher = v1.SignatureHasher
// NewSignatureHasher returns a signature hasher suitable for a particular hashing algorithm
func NewSignatureHasher(alg HashingAlgorithm) (SignatureHasher, error) {
- h := &hasher{}
-
- switch alg {
- case MD5:
- h.h = md5.New
- case SHA1:
- h.h = sha1.New
- case SHA224:
- h.h = sha256.New224
- case SHA256:
- h.h = sha256.New
- case SHA384:
- h.h = sha512.New384
- case SHA512:
- h.h = sha512.New
- case SHA512224:
- h.h = sha512.New512_224
- case SHA512256:
- h.h = sha512.New512_256
- default:
- return nil, fmt.Errorf("unsupported hashing algorithm: %s", alg)
- }
-
- return h, nil
-}
-
-// HashFile hashes the file content, JSON or binary, both in golang native format.
-func (h *hasher) HashFile(v interface{}) ([]byte, error) {
- hf := h.h()
- walk(v, hf)
- return hf.Sum(nil), nil
-}
-
-// walk hashes the file content, JSON or binary, both in golang native format.
-//
-// Computation for unstructured documents is a hash of the document.
-//
-// Computation for the types of structured JSON document is as follows:
-//
-// object: Hash {, then each key (in alphabetical order) and digest of the value, then comma (between items) and finally }.
-//
-// array: Hash [, then digest of the value, then comma (between items) and finally ].
-func walk(v interface{}, h io.Writer) {
-
- switch x := v.(type) {
- case map[string]interface{}:
- _, _ = h.Write([]byte("{"))
-
- var keys []string
- for k := range x {
- keys = append(keys, k)
- }
- sort.Strings(keys)
-
- for i, key := range keys {
- if i > 0 {
- _, _ = h.Write([]byte(","))
- }
-
- _, _ = h.Write(encodePrimitive(key))
- _, _ = h.Write([]byte(":"))
- walk(x[key], h)
- }
-
- _, _ = h.Write([]byte("}"))
- case []interface{}:
- _, _ = h.Write([]byte("["))
-
- for i, e := range x {
- if i > 0 {
- _, _ = h.Write([]byte(","))
- }
- walk(e, h)
- }
-
- _, _ = h.Write([]byte("]"))
- case []byte:
- _, _ = h.Write(x)
- default:
- _, _ = h.Write(encodePrimitive(x))
- }
-}
-
-func encodePrimitive(v interface{}) []byte {
- var buf bytes.Buffer
- encoder := json.NewEncoder(&buf)
- encoder.SetEscapeHTML(false)
- _ = encoder.Encode(v)
- return []byte(strings.Trim(buf.String(), "\n"))
+ return v1.NewSignatureHasher(alg)
}
diff --git a/vendor/github.com/open-policy-agent/opa/bundle/keys.go b/vendor/github.com/open-policy-agent/opa/bundle/keys.go
index 810bee4b72..99f9b0f165 100644
--- a/vendor/github.com/open-policy-agent/opa/bundle/keys.go
+++ b/vendor/github.com/open-policy-agent/opa/bundle/keys.go
@@ -6,139 +6,25 @@
package bundle
import (
- "encoding/pem"
- "fmt"
- "os"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
- "github.com/open-policy-agent/opa/internal/jwx/jws/sign"
- "github.com/open-policy-agent/opa/keys"
-
- "github.com/open-policy-agent/opa/util"
-)
-
-const (
- defaultTokenSigningAlg = "RS256"
+ v1 "github.com/open-policy-agent/opa/v1/bundle"
)
// KeyConfig holds the keys used to sign or verify bundles and tokens
// Moved to own package, alias kept for backwards compatibility
-type KeyConfig = keys.Config
+type KeyConfig = v1.KeyConfig
// VerificationConfig represents the key configuration used to verify a signed bundle
-type VerificationConfig struct {
- PublicKeys map[string]*KeyConfig
- KeyID string `json:"keyid"`
- Scope string `json:"scope"`
- Exclude []string `json:"exclude_files"`
-}
+type VerificationConfig = v1.VerificationConfig
// NewVerificationConfig return a new VerificationConfig
func NewVerificationConfig(keys map[string]*KeyConfig, id, scope string, exclude []string) *VerificationConfig {
- return &VerificationConfig{
- PublicKeys: keys,
- KeyID: id,
- Scope: scope,
- Exclude: exclude,
- }
-}
-
-// ValidateAndInjectDefaults validates the config and inserts default values
-func (vc *VerificationConfig) ValidateAndInjectDefaults(keys map[string]*KeyConfig) error {
- vc.PublicKeys = keys
-
- if vc.KeyID != "" {
- found := false
- for key := range keys {
- if key == vc.KeyID {
- found = true
- break
- }
- }
-
- if !found {
- return fmt.Errorf("key id %s not found", vc.KeyID)
- }
- }
- return nil
-}
-
-// GetPublicKey returns the public key corresponding to the given key id
-func (vc *VerificationConfig) GetPublicKey(id string) (*KeyConfig, error) {
- var kc *KeyConfig
- var ok bool
-
- if kc, ok = vc.PublicKeys[id]; !ok {
- return nil, fmt.Errorf("verification key corresponding to ID %v not found", id)
- }
- return kc, nil
+ return v1.NewVerificationConfig(keys, id, scope, exclude)
}
// SigningConfig represents the key configuration used to generate a signed bundle
-type SigningConfig struct {
- Plugin string
- Key string
- Algorithm string
- ClaimsPath string
-}
+type SigningConfig = v1.SigningConfig
// NewSigningConfig return a new SigningConfig
func NewSigningConfig(key, alg, claimsPath string) *SigningConfig {
- if alg == "" {
- alg = defaultTokenSigningAlg
- }
-
- return &SigningConfig{
- Plugin: defaultSignerID,
- Key: key,
- Algorithm: alg,
- ClaimsPath: claimsPath,
- }
-}
-
-// WithPlugin sets the signing plugin in the signing config
-func (s *SigningConfig) WithPlugin(plugin string) *SigningConfig {
- if plugin != "" {
- s.Plugin = plugin
- }
- return s
-}
-
-// GetPrivateKey returns the private key or secret from the signing config
-func (s *SigningConfig) GetPrivateKey() (interface{}, error) {
-
- block, _ := pem.Decode([]byte(s.Key))
- if block != nil {
- return sign.GetSigningKey(s.Key, jwa.SignatureAlgorithm(s.Algorithm))
- }
-
- var priv string
- if _, err := os.Stat(s.Key); err == nil {
- bs, err := os.ReadFile(s.Key)
- if err != nil {
- return nil, err
- }
- priv = string(bs)
- } else if os.IsNotExist(err) {
- priv = s.Key
- } else {
- return nil, err
- }
-
- return sign.GetSigningKey(priv, jwa.SignatureAlgorithm(s.Algorithm))
-}
-
-// GetClaims returns the claims by reading the file specified in the signing config
-func (s *SigningConfig) GetClaims() (map[string]interface{}, error) {
- var claims map[string]interface{}
-
- bs, err := os.ReadFile(s.ClaimsPath)
- if err != nil {
- return claims, err
- }
-
- if err := util.UnmarshalJSON(bs, &claims); err != nil {
- return claims, err
- }
- return claims, nil
+ return v1.NewSigningConfig(key, alg, claimsPath)
}
diff --git a/vendor/github.com/open-policy-agent/opa/bundle/sign.go b/vendor/github.com/open-policy-agent/opa/bundle/sign.go
index cf9a3e183a..56e25eec9c 100644
--- a/vendor/github.com/open-policy-agent/opa/bundle/sign.go
+++ b/vendor/github.com/open-policy-agent/opa/bundle/sign.go
@@ -6,130 +6,30 @@
package bundle
import (
- "crypto/rand"
- "encoding/json"
- "fmt"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
- "github.com/open-policy-agent/opa/internal/jwx/jws"
+ v1 "github.com/open-policy-agent/opa/v1/bundle"
)
-const defaultSignerID = "_default"
-
-var signers map[string]Signer
-
// Signer is the interface expected for implementations that generate bundle signatures.
-type Signer interface {
- GenerateSignedToken([]FileInfo, *SigningConfig, string) (string, error)
-}
+type Signer v1.Signer
// GenerateSignedToken will retrieve the Signer implementation based on the Plugin specified
// in SigningConfig, and call its implementation of GenerateSignedToken. The signer generates
// a signed token given the list of files to be included in the payload and the bundle
// signing config. The keyID if non-empty, represents the value for the "keyid" claim in the token.
func GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) {
- var plugin string
- // for backwards compatibility, check if there is no plugin specified, and use default
- if sc.Plugin == "" {
- plugin = defaultSignerID
- } else {
- plugin = sc.Plugin
- }
- signer, err := GetSigner(plugin)
- if err != nil {
- return "", err
- }
- return signer.GenerateSignedToken(files, sc, keyID)
+ return v1.GenerateSignedToken(files, sc, keyID)
}
// DefaultSigner is the default bundle signing implementation. It signs bundles by generating
// a JWT and signing it using a locally-accessible private key.
-type DefaultSigner struct{}
-
-// GenerateSignedToken generates a signed token given the list of files to be
-// included in the payload and the bundle signing config. The keyID if non-empty,
-// represents the value for the "keyid" claim in the token
-func (*DefaultSigner) GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) {
- payload, err := generatePayload(files, sc, keyID)
- if err != nil {
- return "", err
- }
-
- privateKey, err := sc.GetPrivateKey()
- if err != nil {
- return "", err
- }
-
- var headers jws.StandardHeaders
-
- if err := headers.Set(jws.AlgorithmKey, jwa.SignatureAlgorithm(sc.Algorithm)); err != nil {
- return "", err
- }
-
- if keyID != "" {
- if err := headers.Set(jws.KeyIDKey, keyID); err != nil {
- return "", err
- }
- }
-
- hdr, err := json.Marshal(headers)
- if err != nil {
- return "", err
- }
-
- token, err := jws.SignLiteral(payload,
- jwa.SignatureAlgorithm(sc.Algorithm),
- privateKey,
- hdr,
- rand.Reader)
- if err != nil {
- return "", err
- }
- return string(token), nil
-}
-
-func generatePayload(files []FileInfo, sc *SigningConfig, keyID string) ([]byte, error) {
- payload := make(map[string]interface{})
- payload["files"] = files
-
- if sc.ClaimsPath != "" {
- claims, err := sc.GetClaims()
- if err != nil {
- return nil, err
- }
-
- for claim, value := range claims {
- payload[claim] = value
- }
- } else {
- if keyID != "" {
- // keyid claim is deprecated but include it for backwards compatibility.
- payload["keyid"] = keyID
- }
- }
- return json.Marshal(payload)
-}
+type DefaultSigner v1.DefaultSigner
// GetSigner returns the Signer registered under the given id
func GetSigner(id string) (Signer, error) {
- signer, ok := signers[id]
- if !ok {
- return nil, fmt.Errorf("no signer exists under id %s", id)
- }
- return signer, nil
+ return v1.GetSigner(id)
}
// RegisterSigner registers a Signer under the given id
func RegisterSigner(id string, s Signer) error {
- if id == defaultSignerID {
- return fmt.Errorf("signer id %s is reserved, use a different id", id)
- }
- signers[id] = s
- return nil
-}
-
-func init() {
- signers = map[string]Signer{
- defaultSignerID: &DefaultSigner{},
- }
+ return v1.RegisterSigner(id, s)
}
diff --git a/vendor/github.com/open-policy-agent/opa/bundle/store.go b/vendor/github.com/open-policy-agent/opa/bundle/store.go
index 9a49f025e8..9659d67bde 100644
--- a/vendor/github.com/open-policy-agent/opa/bundle/store.go
+++ b/vendor/github.com/open-policy-agent/opa/bundle/store.go
@@ -6,1031 +6,147 @@ package bundle
import (
"context"
- "encoding/base64"
- "encoding/json"
- "fmt"
- "path/filepath"
- "strings"
"github.com/open-policy-agent/opa/ast"
- iCompiler "github.com/open-policy-agent/opa/internal/compiler"
- "github.com/open-policy-agent/opa/internal/json/patch"
- "github.com/open-policy-agent/opa/metrics"
"github.com/open-policy-agent/opa/storage"
- "github.com/open-policy-agent/opa/util"
+ v1 "github.com/open-policy-agent/opa/v1/bundle"
)
// BundlesBasePath is the storage path used for storing bundle metadata
-var BundlesBasePath = storage.MustParsePath("/system/bundles")
+var BundlesBasePath = v1.BundlesBasePath
// Note: As needed these helpers could be memoized.
// ManifestStoragePath is the storage path used for the given named bundle manifest.
func ManifestStoragePath(name string) storage.Path {
- return append(BundlesBasePath, name, "manifest")
+ return v1.ManifestStoragePath(name)
}
// EtagStoragePath is the storage path used for the given named bundle etag.
func EtagStoragePath(name string) storage.Path {
- return append(BundlesBasePath, name, "etag")
-}
-
-func namedBundlePath(name string) storage.Path {
- return append(BundlesBasePath, name)
-}
-
-func rootsPath(name string) storage.Path {
- return append(BundlesBasePath, name, "manifest", "roots")
-}
-
-func revisionPath(name string) storage.Path {
- return append(BundlesBasePath, name, "manifest", "revision")
-}
-
-func wasmModulePath(name string) storage.Path {
- return append(BundlesBasePath, name, "wasm")
-}
-
-func wasmEntrypointsPath(name string) storage.Path {
- return append(BundlesBasePath, name, "manifest", "wasm")
-}
-
-func metadataPath(name string) storage.Path {
- return append(BundlesBasePath, name, "manifest", "metadata")
-}
-
-func read(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (interface{}, error) {
- value, err := store.Read(ctx, txn, path)
- if err != nil {
- return nil, err
- }
-
- if astValue, ok := value.(ast.Value); ok {
- value, err = ast.JSON(astValue)
- if err != nil {
- return nil, err
- }
- }
-
- return value, nil
+ return v1.EtagStoragePath(name)
}
// ReadBundleNamesFromStore will return a list of bundle names which have had their metadata stored.
func ReadBundleNamesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) ([]string, error) {
- value, err := read(ctx, store, txn, BundlesBasePath)
- if err != nil {
- return nil, err
- }
-
- bundleMap, ok := value.(map[string]interface{})
- if !ok {
- return nil, fmt.Errorf("corrupt manifest roots")
- }
-
- bundles := make([]string, len(bundleMap))
- idx := 0
- for name := range bundleMap {
- bundles[idx] = name
- idx++
- }
- return bundles, nil
+ return v1.ReadBundleNamesFromStore(ctx, store, txn)
}
// WriteManifestToStore will write the manifest into the storage. This function is called when
// the bundle is activated.
func WriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string, manifest Manifest) error {
- return write(ctx, store, txn, ManifestStoragePath(name), manifest)
+ return v1.WriteManifestToStore(ctx, store, txn, name, manifest)
}
// WriteEtagToStore will write the bundle etag into the storage. This function is called when the bundle is activated.
func WriteEtagToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name, etag string) error {
- return write(ctx, store, txn, EtagStoragePath(name), etag)
-}
-
-func write(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path, value interface{}) error {
- if err := util.RoundTrip(&value); err != nil {
- return err
- }
-
- var dir []string
- if len(path) > 1 {
- dir = path[:len(path)-1]
- }
-
- if err := storage.MakeDir(ctx, store, txn, dir); err != nil {
- return err
- }
-
- return store.Write(ctx, txn, storage.AddOp, path, value)
+ return v1.WriteEtagToStore(ctx, store, txn, name, etag)
}
// EraseManifestFromStore will remove the manifest from storage. This function is called
// when the bundle is deactivated.
func EraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error {
- path := namedBundlePath(name)
- err := store.Write(ctx, txn, storage.RemoveOp, path, nil)
- return suppressNotFound(err)
-}
-
-// eraseBundleEtagFromStore will remove the bundle etag from storage. This function is called
-// when the bundle is deactivated.
-func eraseBundleEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error {
- path := EtagStoragePath(name)
- err := store.Write(ctx, txn, storage.RemoveOp, path, nil)
- return suppressNotFound(err)
-}
-
-func suppressNotFound(err error) error {
- if err == nil || storage.IsNotFound(err) {
- return nil
- }
- return err
-}
-
-func writeWasmModulesToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string, b *Bundle) error {
- basePath := wasmModulePath(name)
- for _, wm := range b.WasmModules {
- path := append(basePath, wm.Path)
- err := write(ctx, store, txn, path, base64.StdEncoding.EncodeToString(wm.Raw))
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func eraseWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error {
- path := wasmModulePath(name)
-
- err := store.Write(ctx, txn, storage.RemoveOp, path, nil)
- return suppressNotFound(err)
-}
-
-// ReadWasmMetadataFromStore will read Wasm module resolver metadata from the store.
-func ReadWasmMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]WasmResolver, error) {
- path := wasmEntrypointsPath(name)
- value, err := read(ctx, store, txn, path)
- if err != nil {
- return nil, err
- }
-
- bs, err := json.Marshal(value)
- if err != nil {
- return nil, fmt.Errorf("corrupt wasm manifest data")
- }
-
- var wasmMetadata []WasmResolver
-
- err = util.UnmarshalJSON(bs, &wasmMetadata)
- if err != nil {
- return nil, fmt.Errorf("corrupt wasm manifest data")
- }
-
- return wasmMetadata, nil
+ return v1.EraseManifestFromStore(ctx, store, txn, name)
}
// ReadWasmModulesFromStore will write Wasm module resolver metadata from the store.
func ReadWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string][]byte, error) {
- path := wasmModulePath(name)
- value, err := read(ctx, store, txn, path)
- if err != nil {
- return nil, err
- }
-
- encodedModules, ok := value.(map[string]interface{})
- if !ok {
- return nil, fmt.Errorf("corrupt wasm modules")
- }
-
- rawModules := map[string][]byte{}
- for path, enc := range encodedModules {
- encStr, ok := enc.(string)
- if !ok {
- return nil, fmt.Errorf("corrupt wasm modules")
- }
- bs, err := base64.StdEncoding.DecodeString(encStr)
- if err != nil {
- return nil, err
- }
- rawModules[path] = bs
- }
- return rawModules, nil
+ return v1.ReadWasmModulesFromStore(ctx, store, txn, name)
}
// ReadBundleRootsFromStore returns the roots in the specified bundle.
// If the bundle is not activated, this function will return
// storage NotFound error.
func ReadBundleRootsFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]string, error) {
- value, err := read(ctx, store, txn, rootsPath(name))
- if err != nil {
- return nil, err
- }
-
- sl, ok := value.([]interface{})
- if !ok {
- return nil, fmt.Errorf("corrupt manifest roots")
- }
-
- roots := make([]string, len(sl))
-
- for i := range sl {
- roots[i], ok = sl[i].(string)
- if !ok {
- return nil, fmt.Errorf("corrupt manifest root")
- }
- }
-
- return roots, nil
+ return v1.ReadBundleRootsFromStore(ctx, store, txn, name)
}
// ReadBundleRevisionFromStore returns the revision in the specified bundle.
// If the bundle is not activated, this function will return
// storage NotFound error.
func ReadBundleRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (string, error) {
- return readRevisionFromStore(ctx, store, txn, revisionPath(name))
-}
-
-func readRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) {
- value, err := read(ctx, store, txn, path)
- if err != nil {
- return "", err
- }
-
- str, ok := value.(string)
- if !ok {
- return "", fmt.Errorf("corrupt manifest revision")
- }
-
- return str, nil
+ return v1.ReadBundleRevisionFromStore(ctx, store, txn, name)
}
// ReadBundleMetadataFromStore returns the metadata in the specified bundle.
// If the bundle is not activated, this function will return
// storage NotFound error.
-func ReadBundleMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string]interface{}, error) {
- return readMetadataFromStore(ctx, store, txn, metadataPath(name))
-}
-
-func readMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (map[string]interface{}, error) {
- value, err := read(ctx, store, txn, path)
- if err != nil {
- return nil, suppressNotFound(err)
- }
-
- data, ok := value.(map[string]interface{})
- if !ok {
- return nil, fmt.Errorf("corrupt manifest metadata")
- }
-
- return data, nil
+func ReadBundleMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string]any, error) {
+ return v1.ReadBundleMetadataFromStore(ctx, store, txn, name)
}
// ReadBundleEtagFromStore returns the etag for the specified bundle.
// If the bundle is not activated, this function will return
// storage NotFound error.
func ReadBundleEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (string, error) {
- return readEtagFromStore(ctx, store, txn, EtagStoragePath(name))
-}
-
-func readEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) {
- value, err := read(ctx, store, txn, path)
- if err != nil {
- return "", err
- }
-
- str, ok := value.(string)
- if !ok {
- return "", fmt.Errorf("corrupt bundle etag")
- }
-
- return str, nil
+ return v1.ReadBundleEtagFromStore(ctx, store, txn, name)
}
// ActivateOpts defines options for the Activate API call.
-type ActivateOpts struct {
- Ctx context.Context
- Store storage.Store
- Txn storage.Transaction
- TxnCtx *storage.Context
- Compiler *ast.Compiler
- Metrics metrics.Metrics
- Bundles map[string]*Bundle // Optional
- ExtraModules map[string]*ast.Module // Optional
- AuthorizationDecisionRef ast.Ref
- ParserOptions ast.ParserOptions
-
- legacy bool
-}
+type ActivateOpts = v1.ActivateOpts
// Activate the bundle(s) by loading into the given Store. This will load policies, data, and record
// the manifest in storage. The compiler provided will have had the polices compiled on it.
func Activate(opts *ActivateOpts) error {
- opts.legacy = false
- return activateBundles(opts)
+ return v1.Activate(setActivateDefaultRegoVersion(opts))
}
// DeactivateOpts defines options for the Deactivate API call
-type DeactivateOpts struct {
- Ctx context.Context
- Store storage.Store
- Txn storage.Transaction
- BundleNames map[string]struct{}
- ParserOptions ast.ParserOptions
-}
+type DeactivateOpts = v1.DeactivateOpts
// Deactivate the bundle(s). This will erase associated data, policies, and the manifest entry from the store.
func Deactivate(opts *DeactivateOpts) error {
- erase := map[string]struct{}{}
- for name := range opts.BundleNames {
- roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name)
- if suppressNotFound(err) != nil {
- return err
- }
- for _, root := range roots {
- erase[root] = struct{}{}
- }
- }
- _, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, opts.ParserOptions, opts.BundleNames, erase)
- return err
-}
-
-func activateBundles(opts *ActivateOpts) error {
-
- // Build collections of bundle names, modules, and roots to erase
- erase := map[string]struct{}{}
- names := map[string]struct{}{}
- deltaBundles := map[string]*Bundle{}
- snapshotBundles := map[string]*Bundle{}
-
- for name, b := range opts.Bundles {
- if b.Type() == DeltaBundleType {
- deltaBundles[name] = b
- } else {
- snapshotBundles[name] = b
- names[name] = struct{}{}
-
- roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name)
- if suppressNotFound(err) != nil {
- return err
- }
- for _, root := range roots {
- erase[root] = struct{}{}
- }
-
- // Erase data at new roots to prepare for writing the new data
- for _, root := range *b.Manifest.Roots {
- erase[root] = struct{}{}
- }
- }
- }
-
- // Before changing anything make sure the roots don't collide with any
- // other bundles that already are activated or other bundles being activated.
- err := hasRootsOverlap(opts.Ctx, opts.Store, opts.Txn, opts.Bundles)
- if err != nil {
- return err
- }
-
- if len(deltaBundles) != 0 {
- err := activateDeltaBundles(opts, deltaBundles)
- if err != nil {
- return err
- }
- }
-
- // Erase data and policies at new + old roots, and remove the old
- // manifests before activating a new snapshot bundle.
- remaining, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, opts.ParserOptions, names, erase)
- if err != nil {
- return err
- }
-
- // Validate data in bundle does not contain paths outside the bundle's roots.
- for _, b := range snapshotBundles {
-
- if b.lazyLoadingMode {
-
- for _, item := range b.Raw {
- path := filepath.ToSlash(item.Path)
-
- if filepath.Base(path) == dataFile || filepath.Base(path) == yamlDataFile {
- var val map[string]json.RawMessage
- err = util.Unmarshal(item.Value, &val)
- if err == nil {
- err = doDFS(val, filepath.Dir(strings.Trim(path, "/")), *b.Manifest.Roots)
- if err != nil {
- return err
- }
- } else {
- // Build an object for the value
- p := getNormalizedPath(path)
-
- if len(p) == 0 {
- return fmt.Errorf("root value must be object")
- }
-
- // verify valid YAML or JSON value
- var x interface{}
- err := util.Unmarshal(item.Value, &x)
- if err != nil {
- return err
- }
-
- value := item.Value
- dir := map[string]json.RawMessage{}
- for i := len(p) - 1; i > 0; i-- {
- dir[p[i]] = value
-
- bs, err := json.Marshal(dir)
- if err != nil {
- return err
- }
-
- value = bs
- dir = map[string]json.RawMessage{}
- }
- dir[p[0]] = value
-
- err = doDFS(dir, filepath.Dir(strings.Trim(path, "/")), *b.Manifest.Roots)
- if err != nil {
- return err
- }
- }
- }
- }
- }
- }
-
- // Compile the modules all at once to avoid having to re-do work.
- remainingAndExtra := make(map[string]*ast.Module)
- for name, mod := range remaining {
- remainingAndExtra[name] = mod
- }
- for name, mod := range opts.ExtraModules {
- remainingAndExtra[name] = mod
- }
-
- err = compileModules(opts.Compiler, opts.Metrics, snapshotBundles, remainingAndExtra, opts.legacy, opts.AuthorizationDecisionRef)
- if err != nil {
- return err
- }
-
- if err := writeDataAndModules(opts.Ctx, opts.Store, opts.Txn, opts.TxnCtx, snapshotBundles, opts.legacy); err != nil {
- return err
- }
-
- if err := ast.CheckPathConflicts(opts.Compiler, storage.NonEmpty(opts.Ctx, opts.Store, opts.Txn)); len(err) > 0 {
- return err
- }
-
- for name, b := range snapshotBundles {
- if err := writeManifestToStore(opts, name, b.Manifest); err != nil {
- return err
- }
-
- if err := writeEtagToStore(opts, name, b.Etag); err != nil {
- return err
- }
-
- if err := writeWasmModulesToStore(opts.Ctx, opts.Store, opts.Txn, name, b); err != nil {
- return err
- }
- }
-
- return nil
+ return v1.Deactivate(setDeactivateDefaultRegoVersion(opts))
}
-func doDFS(obj map[string]json.RawMessage, path string, roots []string) error {
- if len(roots) == 1 && roots[0] == "" {
- return nil
- }
-
- for key := range obj {
-
- newPath := filepath.Join(strings.Trim(path, "/"), key)
-
- // Note: filepath.Join can return paths with '\' separators, always use
- // filepath.ToSlash to keep them normalized.
- newPath = strings.TrimLeft(normalizePath(newPath), "/.")
-
- contains := false
- prefix := false
- if RootPathsContain(roots, newPath) {
- contains = true
- } else {
- for i := range roots {
- if strings.HasPrefix(strings.Trim(roots[i], "/"), newPath) {
- prefix = true
- break
- }
- }
- }
-
- if !contains && !prefix {
- return fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, newPath)
- }
-
- if contains {
- continue
- }
-
- var next map[string]json.RawMessage
- err := util.Unmarshal(obj[key], &next)
- if err != nil {
- return fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, newPath)
- }
-
- if err := doDFS(next, newPath, roots); err != nil {
- return err
- }
- }
- return nil
-}
-
-func activateDeltaBundles(opts *ActivateOpts, bundles map[string]*Bundle) error {
-
- // Check that the manifest roots and wasm resolvers in the delta bundle
- // match with those currently in the store
- for name, b := range bundles {
- value, err := opts.Store.Read(opts.Ctx, opts.Txn, ManifestStoragePath(name))
- if err != nil {
- if storage.IsNotFound(err) {
- continue
- }
- return err
- }
-
- manifest, err := valueToManifest(value)
- if err != nil {
- return fmt.Errorf("corrupt manifest data: %w", err)
- }
-
- if !b.Manifest.equalWasmResolversAndRoots(manifest) {
- return fmt.Errorf("delta bundle '%s' has wasm resolvers or manifest roots that are different from those in the store", name)
- }
- }
-
- for _, b := range bundles {
- err := applyPatches(opts.Ctx, opts.Store, opts.Txn, b.Patch.Data)
- if err != nil {
- return err
- }
- }
-
- if err := ast.CheckPathConflicts(opts.Compiler, storage.NonEmpty(opts.Ctx, opts.Store, opts.Txn)); len(err) > 0 {
- return err
- }
-
- for name, b := range bundles {
- if err := writeManifestToStore(opts, name, b.Manifest); err != nil {
- return err
- }
-
- if err := writeEtagToStore(opts, name, b.Etag); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func valueToManifest(v interface{}) (Manifest, error) {
- if astV, ok := v.(ast.Value); ok {
- var err error
- v, err = ast.JSON(astV)
- if err != nil {
- return Manifest{}, err
- }
- }
-
- var manifest Manifest
-
- bs, err := json.Marshal(v)
- if err != nil {
- return Manifest{}, err
- }
-
- err = util.UnmarshalJSON(bs, &manifest)
- if err != nil {
- return Manifest{}, err
- }
-
- return manifest, nil
-}
-
-// erase bundles by name and roots. This will clear all policies and data at its roots and remove its
-// manifest from storage.
-func eraseBundles(ctx context.Context, store storage.Store, txn storage.Transaction, parserOpts ast.ParserOptions, names map[string]struct{}, roots map[string]struct{}) (map[string]*ast.Module, error) {
-
- if err := eraseData(ctx, store, txn, roots); err != nil {
- return nil, err
- }
-
- remaining, err := erasePolicies(ctx, store, txn, parserOpts, roots)
- if err != nil {
- return nil, err
- }
-
- for name := range names {
- if err := EraseManifestFromStore(ctx, store, txn, name); suppressNotFound(err) != nil {
- return nil, err
- }
-
- if err := LegacyEraseManifestFromStore(ctx, store, txn); suppressNotFound(err) != nil {
- return nil, err
- }
-
- if err := eraseBundleEtagFromStore(ctx, store, txn, name); suppressNotFound(err) != nil {
- return nil, err
- }
-
- if err := eraseWasmModulesFromStore(ctx, store, txn, name); suppressNotFound(err) != nil {
- return nil, err
- }
- }
-
- return remaining, nil
-}
-
-func eraseData(ctx context.Context, store storage.Store, txn storage.Transaction, roots map[string]struct{}) error {
- for root := range roots {
- path, ok := storage.ParsePathEscaped("/" + root)
- if !ok {
- return fmt.Errorf("manifest root path invalid: %v", root)
- }
-
- if len(path) > 0 {
- if err := store.Write(ctx, txn, storage.RemoveOp, path, nil); suppressNotFound(err) != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func erasePolicies(ctx context.Context, store storage.Store, txn storage.Transaction, parserOpts ast.ParserOptions, roots map[string]struct{}) (map[string]*ast.Module, error) {
-
- ids, err := store.ListPolicies(ctx, txn)
- if err != nil {
- return nil, err
- }
-
- remaining := map[string]*ast.Module{}
-
- for _, id := range ids {
- bs, err := store.GetPolicy(ctx, txn, id)
- if err != nil {
- return nil, err
- }
- module, err := ast.ParseModuleWithOpts(id, string(bs), parserOpts)
- if err != nil {
- return nil, err
- }
- path, err := module.Package.Path.Ptr()
- if err != nil {
- return nil, err
- }
- deleted := false
- for root := range roots {
- if RootPathsContain([]string{root}, path) {
- if err := store.DeletePolicy(ctx, txn, id); err != nil {
- return nil, err
- }
- deleted = true
- break
- }
- }
- if !deleted {
- remaining[id] = module
- }
- }
-
- return remaining, nil
-}
-
-func writeManifestToStore(opts *ActivateOpts, name string, manifest Manifest) error {
- // Always write manifests to the named location. If the plugin is in the older style config
- // then also write to the old legacy unnamed location.
- if err := WriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, name, manifest); err != nil {
- return err
- }
-
- if opts.legacy {
- if err := LegacyWriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, manifest); err != nil {
- return err
- }
- }
-
- return nil
+// LegacyWriteManifestToStore will write the bundle manifest to the older single (unnamed) bundle manifest location.
+// Deprecated: Use WriteManifestToStore and named bundles instead.
+func LegacyWriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, manifest Manifest) error {
+ return v1.LegacyWriteManifestToStore(ctx, store, txn, manifest)
}
-func writeEtagToStore(opts *ActivateOpts, name, etag string) error {
- if err := WriteEtagToStore(opts.Ctx, opts.Store, opts.Txn, name, etag); err != nil {
- return err
- }
-
- return nil
+// LegacyEraseManifestFromStore will erase the bundle manifest from the older single (unnamed) bundle manifest location.
+// Deprecated: Use WriteManifestToStore and named bundles instead.
+func LegacyEraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) error {
+ return v1.LegacyEraseManifestFromStore(ctx, store, txn)
}
-func writeDataAndModules(ctx context.Context, store storage.Store, txn storage.Transaction, txnCtx *storage.Context, bundles map[string]*Bundle, legacy bool) error {
- params := storage.WriteParams
- params.Context = txnCtx
-
- for name, b := range bundles {
- if len(b.Raw) == 0 {
- // Write data from each new bundle into the store. Only write under the
- // roots contained in their manifest.
- if err := writeData(ctx, store, txn, *b.Manifest.Roots, b.Data); err != nil {
- return err
- }
-
- for _, mf := range b.Modules {
- var path string
-
- // For backwards compatibility, in legacy mode, upsert policies to
- // the unprefixed path.
- if legacy {
- path = mf.Path
- } else {
- path = modulePathWithPrefix(name, mf.Path)
- }
-
- if err := store.UpsertPolicy(ctx, txn, path, mf.Raw); err != nil {
- return err
- }
- }
- } else {
- params.BasePaths = *b.Manifest.Roots
-
- err := store.Truncate(ctx, txn, params, NewIterator(b.Raw))
- if err != nil {
- return fmt.Errorf("store truncate failed for bundle '%s': %v", name, err)
- }
- }
- }
-
- return nil
+// LegacyReadRevisionFromStore will read the bundle manifest revision from the older single (unnamed) bundle manifest location.
+// Deprecated: Use ReadBundleRevisionFromStore and named bundles instead.
+func LegacyReadRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) (string, error) {
+ return v1.LegacyReadRevisionFromStore(ctx, store, txn)
}
-func writeData(ctx context.Context, store storage.Store, txn storage.Transaction, roots []string, data map[string]interface{}) error {
- for _, root := range roots {
- path, ok := storage.ParsePathEscaped("/" + root)
- if !ok {
- return fmt.Errorf("manifest root path invalid: %v", root)
- }
- if value, ok := lookup(path, data); ok {
- if len(path) > 0 {
- if err := storage.MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil {
- return err
- }
- }
- if err := store.Write(ctx, txn, storage.AddOp, path, value); err != nil {
- return err
- }
- }
- }
- return nil
+// ActivateLegacy calls Activate for the bundles but will also write their manifest to the older unnamed store location.
+// Deprecated: Use Activate with named bundles instead.
+func ActivateLegacy(opts *ActivateOpts) error {
+ return v1.ActivateLegacy(opts)
}
-func compileModules(compiler *ast.Compiler, m metrics.Metrics, bundles map[string]*Bundle, extraModules map[string]*ast.Module, legacy bool, authorizationDecisionRef ast.Ref) error {
-
- m.Timer(metrics.RegoModuleCompile).Start()
- defer m.Timer(metrics.RegoModuleCompile).Stop()
-
- modules := map[string]*ast.Module{}
-
- // preserve any modules already on the compiler
- for name, module := range compiler.Modules {
- modules[name] = module
- }
-
- // preserve any modules passed in from the store
- for name, module := range extraModules {
- modules[name] = module
- }
-
- // include all the new bundle modules
- for bundleName, b := range bundles {
- if legacy {
- for _, mf := range b.Modules {
- modules[mf.Path] = mf.Parsed
- }
- } else {
- for name, module := range b.ParsedModules(bundleName) {
- modules[name] = module
- }
- }
- }
-
- if compiler.Compile(modules); compiler.Failed() {
- return compiler.Errors
- }
-
- if authorizationDecisionRef.Equal(ast.EmptyRef()) {
+func setActivateDefaultRegoVersion(opts *ActivateOpts) *ActivateOpts {
+ if opts == nil {
return nil
}
- return iCompiler.VerifyAuthorizationPolicySchema(compiler, authorizationDecisionRef)
-}
-
-func writeModules(ctx context.Context, store storage.Store, txn storage.Transaction, compiler *ast.Compiler, m metrics.Metrics, bundles map[string]*Bundle, extraModules map[string]*ast.Module, legacy bool) error {
-
- m.Timer(metrics.RegoModuleCompile).Start()
- defer m.Timer(metrics.RegoModuleCompile).Stop()
-
- modules := map[string]*ast.Module{}
-
- // preserve any modules already on the compiler
- for name, module := range compiler.Modules {
- modules[name] = module
+ if opts.ParserOptions.RegoVersion == ast.RegoUndefined {
+ cpy := *opts
+ cpy.ParserOptions.RegoVersion = ast.DefaultRegoVersion
+ return &cpy
}
- // preserve any modules passed in from the store
- for name, module := range extraModules {
- modules[name] = module
- }
-
- // include all the new bundle modules
- for bundleName, b := range bundles {
- if legacy {
- for _, mf := range b.Modules {
- modules[mf.Path] = mf.Parsed
- }
- } else {
- for name, module := range b.ParsedModules(bundleName) {
- modules[name] = module
- }
- }
- }
-
- if compiler.Compile(modules); compiler.Failed() {
- return compiler.Errors
- }
- for bundleName, b := range bundles {
- for _, mf := range b.Modules {
- var path string
-
- // For backwards compatibility, in legacy mode, upsert policies to
- // the unprefixed path.
- if legacy {
- path = mf.Path
- } else {
- path = modulePathWithPrefix(bundleName, mf.Path)
- }
-
- if err := store.UpsertPolicy(ctx, txn, path, mf.Raw); err != nil {
- return err
- }
- }
- }
- return nil
+ return opts
}
-func lookup(path storage.Path, data map[string]interface{}) (interface{}, bool) {
- if len(path) == 0 {
- return data, true
- }
- for i := 0; i < len(path)-1; i++ {
- value, ok := data[path[i]]
- if !ok {
- return nil, false
- }
- obj, ok := value.(map[string]interface{})
- if !ok {
- return nil, false
- }
- data = obj
- }
- value, ok := data[path[len(path)-1]]
- return value, ok
-}
-
-func hasRootsOverlap(ctx context.Context, store storage.Store, txn storage.Transaction, bundles map[string]*Bundle) error {
- collisions := map[string][]string{}
- allBundles, err := ReadBundleNamesFromStore(ctx, store, txn)
- if suppressNotFound(err) != nil {
- return err
- }
-
- allRoots := map[string][]string{}
-
- // Build a map of roots for existing bundles already in the system
- for _, name := range allBundles {
- roots, err := ReadBundleRootsFromStore(ctx, store, txn, name)
- if suppressNotFound(err) != nil {
- return err
- }
- allRoots[name] = roots
- }
-
- // Add in any bundles that are being activated, overwrite existing roots
- // with new ones where bundles are in both groups.
- for name, bundle := range bundles {
- allRoots[name] = *bundle.Manifest.Roots
- }
-
- // Now check for each new bundle if it conflicts with any of the others
- for name, bundle := range bundles {
- for otherBundle, otherRoots := range allRoots {
- if name == otherBundle {
- // Skip the current bundle being checked
- continue
- }
-
- // Compare the "new" roots with other existing (or a different bundles new roots)
- for _, newRoot := range *bundle.Manifest.Roots {
- for _, otherRoot := range otherRoots {
- if RootPathsOverlap(newRoot, otherRoot) {
- collisions[otherBundle] = append(collisions[otherBundle], newRoot)
- }
- }
- }
- }
- }
-
- if len(collisions) > 0 {
- var bundleNames []string
- for name := range collisions {
- bundleNames = append(bundleNames, name)
- }
- return fmt.Errorf("detected overlapping roots in bundle manifest with: %s", bundleNames)
- }
- return nil
-}
-
-func applyPatches(ctx context.Context, store storage.Store, txn storage.Transaction, patches []PatchOperation) error {
- for _, pat := range patches {
-
- // construct patch path
- path, ok := patch.ParsePatchPathEscaped("/" + strings.Trim(pat.Path, "/"))
- if !ok {
- return fmt.Errorf("error parsing patch path")
- }
-
- var op storage.PatchOp
- switch pat.Op {
- case "upsert":
- op = storage.AddOp
-
- _, err := store.Read(ctx, txn, path[:len(path)-1])
- if err != nil {
- if !storage.IsNotFound(err) {
- return err
- }
-
- if err := storage.MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil {
- return err
- }
- }
- case "remove":
- op = storage.RemoveOp
- case "replace":
- op = storage.ReplaceOp
- default:
- return fmt.Errorf("bad patch operation: %v", pat.Op)
- }
-
- // apply the patch
- if err := store.Write(ctx, txn, op, path, pat.Value); err != nil {
- return err
- }
+func setDeactivateDefaultRegoVersion(opts *DeactivateOpts) *DeactivateOpts {
+ if opts == nil {
+ return nil
}
- return nil
-}
-
-// Helpers for the older single (unnamed) bundle style manifest storage.
-
-// LegacyManifestStoragePath is the older unnamed bundle path for manifests to be stored.
-// Deprecated: Use ManifestStoragePath and named bundles instead.
-var legacyManifestStoragePath = storage.MustParsePath("/system/bundle/manifest")
-var legacyRevisionStoragePath = append(legacyManifestStoragePath, "revision")
-
-// LegacyWriteManifestToStore will write the bundle manifest to the older single (unnamed) bundle manifest location.
-// Deprecated: Use WriteManifestToStore and named bundles instead.
-func LegacyWriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, manifest Manifest) error {
- return write(ctx, store, txn, legacyManifestStoragePath, manifest)
-}
-
-// LegacyEraseManifestFromStore will erase the bundle manifest from the older single (unnamed) bundle manifest location.
-// Deprecated: Use WriteManifestToStore and named bundles instead.
-func LegacyEraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) error {
- err := store.Write(ctx, txn, storage.RemoveOp, legacyManifestStoragePath, nil)
- if err != nil {
- return err
+ if opts.ParserOptions.RegoVersion == ast.RegoUndefined {
+ cpy := *opts
+ cpy.ParserOptions.RegoVersion = ast.DefaultRegoVersion
+ return &cpy
}
- return nil
-}
-// LegacyReadRevisionFromStore will read the bundle manifest revision from the older single (unnamed) bundle manifest location.
-// Deprecated: Use ReadBundleRevisionFromStore and named bundles instead.
-func LegacyReadRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) (string, error) {
- return readRevisionFromStore(ctx, store, txn, legacyRevisionStoragePath)
-}
-
-// ActivateLegacy calls Activate for the bundles but will also write their manifest to the older unnamed store location.
-// Deprecated: Use Activate with named bundles instead.
-func ActivateLegacy(opts *ActivateOpts) error {
- opts.legacy = true
- return activateBundles(opts)
+ return opts
}
diff --git a/vendor/github.com/open-policy-agent/opa/bundle/verify.go b/vendor/github.com/open-policy-agent/opa/bundle/verify.go
index e85be835be..ef2e1e32db 100644
--- a/vendor/github.com/open-policy-agent/opa/bundle/verify.go
+++ b/vendor/github.com/open-policy-agent/opa/bundle/verify.go
@@ -6,26 +6,11 @@
package bundle
import (
- "bytes"
- "encoding/base64"
- "encoding/hex"
- "encoding/json"
- "fmt"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
- "github.com/open-policy-agent/opa/internal/jwx/jws"
- "github.com/open-policy-agent/opa/internal/jwx/jws/verify"
- "github.com/open-policy-agent/opa/util"
+ v1 "github.com/open-policy-agent/opa/v1/bundle"
)
-const defaultVerifierID = "_default"
-
-var verifiers map[string]Verifier
-
// Verifier is the interface expected for implementations that verify bundle signatures.
-type Verifier interface {
- VerifyBundleSignature(SignaturesConfig, *VerificationConfig) (map[string]FileInfo, error)
-}
+type Verifier v1.Verifier
// VerifyBundleSignature will retrieve the Verifier implementation based
// on the Plugin specified in SignaturesConfig, and call its implementation
@@ -33,199 +18,19 @@ type Verifier interface {
// using the given public keys or secret. If a signature is verified, it keeps
// track of the files specified in the JWT payload
func VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) {
- // default implementation does not return a nil for map, so don't
- // do it here either
- files := make(map[string]FileInfo)
- var plugin string
- // for backwards compatibility, check if there is no plugin specified, and use default
- if sc.Plugin == "" {
- plugin = defaultVerifierID
- } else {
- plugin = sc.Plugin
- }
- verifier, err := GetVerifier(plugin)
- if err != nil {
- return files, err
- }
- return verifier.VerifyBundleSignature(sc, bvc)
+ return v1.VerifyBundleSignature(sc, bvc)
}
// DefaultVerifier is the default bundle verification implementation. It verifies bundles by checking
// the JWT signature using a locally-accessible public key.
-type DefaultVerifier struct{}
-
-// VerifyBundleSignature verifies the bundle signature using the given public keys or secret.
-// If a signature is verified, it keeps track of the files specified in the JWT payload
-func (*DefaultVerifier) VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) {
- files := make(map[string]FileInfo)
-
- if len(sc.Signatures) == 0 {
- return files, fmt.Errorf(".signatures.json: missing JWT (expected exactly one)")
- }
-
- if len(sc.Signatures) > 1 {
- return files, fmt.Errorf(".signatures.json: multiple JWTs not supported (expected exactly one)")
- }
-
- for _, token := range sc.Signatures {
- payload, err := verifyJWTSignature(token, bvc)
- if err != nil {
- return files, err
- }
-
- for _, file := range payload.Files {
- files[file.Name] = file
- }
- }
- return files, nil
-}
-
-func verifyJWTSignature(token string, bvc *VerificationConfig) (*DecodedSignature, error) {
- // decode JWT to check if the header specifies the key to use and/or if claims have the scope.
-
- parts, err := jws.SplitCompact(token)
- if err != nil {
- return nil, err
- }
-
- var decodedHeader []byte
- if decodedHeader, err = base64.RawURLEncoding.DecodeString(parts[0]); err != nil {
- return nil, fmt.Errorf("failed to base64 decode JWT headers: %w", err)
- }
-
- var hdr jws.StandardHeaders
- if err := json.Unmarshal(decodedHeader, &hdr); err != nil {
- return nil, fmt.Errorf("failed to parse JWT headers: %w", err)
- }
-
- payload, err := base64.RawURLEncoding.DecodeString(parts[1])
- if err != nil {
- return nil, err
- }
-
- var ds DecodedSignature
- if err := json.Unmarshal(payload, &ds); err != nil {
- return nil, err
- }
-
- // check for the id of the key to use for JWT signature verification
- // first in the OPA config. If not found, then check the JWT kid.
- keyID := bvc.KeyID
- if keyID == "" {
- keyID = hdr.KeyID
- }
- if keyID == "" {
- // If header has no key id, check the deprecated key claim.
- keyID = ds.KeyID
- }
-
- if keyID == "" {
- return nil, fmt.Errorf("verification key ID is empty")
- }
-
- // now that we have the keyID, fetch the actual key
- keyConfig, err := bvc.GetPublicKey(keyID)
- if err != nil {
- return nil, err
- }
-
- // verify JWT signature
- alg := jwa.SignatureAlgorithm(keyConfig.Algorithm)
- key, err := verify.GetSigningKey(keyConfig.Key, alg)
- if err != nil {
- return nil, err
- }
-
- _, err = jws.Verify([]byte(token), alg, key)
- if err != nil {
- return nil, err
- }
-
- // verify the scope
- scope := bvc.Scope
- if scope == "" {
- scope = keyConfig.Scope
- }
-
- if ds.Scope != scope {
- return nil, fmt.Errorf("scope mismatch")
- }
- return &ds, nil
-}
-
-// VerifyBundleFile verifies the hash of a file in the bundle matches to that provided in the bundle's signature
-func VerifyBundleFile(path string, data bytes.Buffer, files map[string]FileInfo) error {
- var file FileInfo
- var ok bool
-
- if file, ok = files[path]; !ok {
- return fmt.Errorf("file %v not included in bundle signature", path)
- }
-
- if file.Algorithm == "" {
- return fmt.Errorf("no hashing algorithm provided for file %v", path)
- }
-
- hash, err := NewSignatureHasher(HashingAlgorithm(file.Algorithm))
- if err != nil {
- return err
- }
-
- // hash the file content
- // For unstructured files, hash the byte stream of the file
- // For structured files, read the byte stream and parse into a JSON structure;
- // then recursively order the fields of all objects alphabetically and then apply
- // the hash function to result to compute the hash. This ensures that the digital signature is
- // independent of whitespace and other non-semantic JSON features.
- var value interface{}
- if IsStructuredDoc(path) {
- err := util.Unmarshal(data.Bytes(), &value)
- if err != nil {
- return err
- }
- } else {
- value = data.Bytes()
- }
-
- bs, err := hash.HashFile(value)
- if err != nil {
- return err
- }
-
- // compare file hash with same file in the JWT payloads
- fb, err := hex.DecodeString(file.Hash)
- if err != nil {
- return err
- }
-
- if !bytes.Equal(fb, bs) {
- return fmt.Errorf("%v: digest mismatch (want: %x, got: %x)", path, fb, bs)
- }
-
- delete(files, path)
- return nil
-}
+type DefaultVerifier = v1.DefaultVerifier
// GetVerifier returns the Verifier registered under the given id
func GetVerifier(id string) (Verifier, error) {
- verifier, ok := verifiers[id]
- if !ok {
- return nil, fmt.Errorf("no verifier exists under id %s", id)
- }
- return verifier, nil
+ return v1.GetVerifier(id)
}
// RegisterVerifier registers a Verifier under the given id
func RegisterVerifier(id string, v Verifier) error {
- if id == defaultVerifierID {
- return fmt.Errorf("verifier id %s is reserved, use a different id", id)
- }
- verifiers[id] = v
- return nil
-}
-
-func init() {
- verifiers = map[string]Verifier{
- defaultVerifierID: &DefaultVerifier{},
- }
+ return v1.RegisterVerifier(id, v)
}
diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/doc.go b/vendor/github.com/open-policy-agent/opa/capabilities/doc.go
new file mode 100644
index 0000000000..189c2e727a
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/capabilities/doc.go
@@ -0,0 +1,8 @@
+// Copyright 2024 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended.
+// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead.
+// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information.
+package capabilities
diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.0.json
new file mode 100644
index 0000000000..48a87b0c35
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.0.json
@@ -0,0 +1,4835 @@
+{
+ "builtins": [
+ {
+ "name": "abs",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "all",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "and",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "\u0026"
+ },
+ {
+ "name": "any",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.concat",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.reverse",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.slice",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "assign",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": ":="
+ },
+ {
+ "name": "base64.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode_no_pad",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.and",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.lsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.negate",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.or",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.rsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.xor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "null"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "ceil",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "concat",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "count",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.equal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.parse_private_keys",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificate_request",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_keypair",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_rsa_private_key",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "div",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "/"
+ },
+ {
+ "name": "endswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "eq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "="
+ },
+ {
+ "name": "equal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "=="
+ },
+ {
+ "name": "floor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "format_int",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.quote_meta",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable_paths",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_and_verify",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_query",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_schema",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.schema_is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "gt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e"
+ },
+ {
+ "name": "gte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e="
+ },
+ {
+ "name": "hex.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "hex.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "http.send",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "indexof",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "indexof_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.member_2",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.member_3",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.print",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "intersection",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode_verify",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign_raw",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.verify_es256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_number",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "indent",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "prefix",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "pretty",
+ "value": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.match_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "static": [
+ {
+ "key": "desc",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "error",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "field",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "type",
+ "value": {
+ "type": "string"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.patch",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "op",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "path",
+ "value": {
+ "type": "any"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.verify_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lower",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c"
+ },
+ {
+ "name": "lte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c="
+ },
+ {
+ "name": "max",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "min",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "minus",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "function"
+ },
+ "infix": "-"
+ },
+ {
+ "name": "mul",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "*"
+ },
+ {
+ "name": "neq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "!="
+ },
+ {
+ "name": "net.cidr_contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_contains_matches",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "static": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_expand",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_intersects",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_merge",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_overlap",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.lookup_ip_addr",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "numbers.range",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "numbers.range_step",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.get",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.keys",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.subset",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "opa.runtime",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "or",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "|"
+ },
+ {
+ "name": "plus",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "+"
+ },
+ {
+ "name": "print",
+ "decl": {
+ "type": "function",
+ "variadic": {
+ "type": "any"
+ }
+ }
+ },
+ {
+ "name": "product",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "providers.aws.sign_req",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rand.intn",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "re_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_all_string_submatch_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.globs_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.template_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.chain",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.rule",
+ "decl": {
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.parse_module",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rem",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "%"
+ },
+ {
+ "name": "replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "round",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.compare",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "set_diff",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sort",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sprintf",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "startswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_prefix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_suffix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.count",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.render_template",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.replace_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.reverse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "substring",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sum",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.add_date",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.clock",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.date",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.diff",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.format",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.now_ns",
+ "decl": {
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "time.parse_duration_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_rfc3339_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.weekday",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "to_number",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_left",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_prefix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_right",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_space",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_suffix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "type_name",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "union",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse_bytes",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "upper",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode_object",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode_object",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.rfc4122",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "walk",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "relation": true
+ },
+ {
+ "name": "yaml.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ }
+ ],
+ "wasm_abi_versions": [
+ {
+ "version": 1,
+ "minor_version": 1
+ },
+ {
+ "version": 1,
+ "minor_version": 2
+ }
+ ],
+ "features": [
+ "rego_v1"
+ ]
+}
diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.1.json
new file mode 100644
index 0000000000..48a87b0c35
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.1.json
@@ -0,0 +1,4835 @@
+{
+ "builtins": [
+ {
+ "name": "abs",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "all",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "and",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "\u0026"
+ },
+ {
+ "name": "any",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.concat",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.reverse",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.slice",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "assign",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": ":="
+ },
+ {
+ "name": "base64.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode_no_pad",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.and",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.lsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.negate",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.or",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.rsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.xor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "null"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "ceil",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "concat",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "count",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.equal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.parse_private_keys",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificate_request",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_keypair",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_rsa_private_key",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "div",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "/"
+ },
+ {
+ "name": "endswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "eq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "="
+ },
+ {
+ "name": "equal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "=="
+ },
+ {
+ "name": "floor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "format_int",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.quote_meta",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable_paths",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_and_verify",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_query",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_schema",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.schema_is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "gt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e"
+ },
+ {
+ "name": "gte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e="
+ },
+ {
+ "name": "hex.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "hex.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "http.send",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "indexof",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "indexof_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.member_2",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.member_3",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.print",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "intersection",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode_verify",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign_raw",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.verify_es256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_number",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "indent",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "prefix",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "pretty",
+ "value": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.match_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "static": [
+ {
+ "key": "desc",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "error",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "field",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "type",
+ "value": {
+ "type": "string"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.patch",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "op",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "path",
+ "value": {
+ "type": "any"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.verify_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lower",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c"
+ },
+ {
+ "name": "lte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c="
+ },
+ {
+ "name": "max",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "min",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "minus",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "function"
+ },
+ "infix": "-"
+ },
+ {
+ "name": "mul",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "*"
+ },
+ {
+ "name": "neq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "!="
+ },
+ {
+ "name": "net.cidr_contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_contains_matches",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "static": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_expand",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_intersects",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_merge",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_overlap",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.lookup_ip_addr",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "numbers.range",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "numbers.range_step",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.get",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.keys",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.subset",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "opa.runtime",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "or",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "|"
+ },
+ {
+ "name": "plus",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "+"
+ },
+ {
+ "name": "print",
+ "decl": {
+ "type": "function",
+ "variadic": {
+ "type": "any"
+ }
+ }
+ },
+ {
+ "name": "product",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "providers.aws.sign_req",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rand.intn",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "re_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_all_string_submatch_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.globs_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.template_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.chain",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.rule",
+ "decl": {
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.parse_module",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rem",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "%"
+ },
+ {
+ "name": "replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "round",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.compare",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "set_diff",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sort",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sprintf",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "startswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_prefix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_suffix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.count",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.render_template",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.replace_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.reverse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "substring",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sum",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.add_date",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.clock",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.date",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.diff",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.format",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.now_ns",
+ "decl": {
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "time.parse_duration_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_rfc3339_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.weekday",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "to_number",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_left",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_prefix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_right",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_space",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_suffix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "type_name",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "union",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse_bytes",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "upper",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode_object",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode_object",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.rfc4122",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "walk",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "relation": true
+ },
+ {
+ "name": "yaml.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ }
+ ],
+ "wasm_abi_versions": [
+ {
+ "version": 1,
+ "minor_version": 1
+ },
+ {
+ "version": 1,
+ "minor_version": 2
+ }
+ ],
+ "features": [
+ "rego_v1"
+ ]
+}
diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.1.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.1.0.json
new file mode 100644
index 0000000000..48a87b0c35
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.1.0.json
@@ -0,0 +1,4835 @@
+{
+ "builtins": [
+ {
+ "name": "abs",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "all",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "and",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "\u0026"
+ },
+ {
+ "name": "any",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.concat",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.reverse",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.slice",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "assign",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": ":="
+ },
+ {
+ "name": "base64.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode_no_pad",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.and",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.lsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.negate",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.or",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.rsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.xor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "null"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "ceil",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "concat",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "count",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.equal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.parse_private_keys",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificate_request",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_keypair",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_rsa_private_key",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "div",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "/"
+ },
+ {
+ "name": "endswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "eq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "="
+ },
+ {
+ "name": "equal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "=="
+ },
+ {
+ "name": "floor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "format_int",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.quote_meta",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable_paths",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_and_verify",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_query",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_schema",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.schema_is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "gt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e"
+ },
+ {
+ "name": "gte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e="
+ },
+ {
+ "name": "hex.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "hex.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "http.send",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "indexof",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "indexof_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.member_2",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.member_3",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.print",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "intersection",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode_verify",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign_raw",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.verify_es256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_number",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "indent",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "prefix",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "pretty",
+ "value": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.match_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "static": [
+ {
+ "key": "desc",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "error",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "field",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "type",
+ "value": {
+ "type": "string"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.patch",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "op",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "path",
+ "value": {
+ "type": "any"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.verify_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lower",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c"
+ },
+ {
+ "name": "lte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c="
+ },
+ {
+ "name": "max",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "min",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "minus",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "function"
+ },
+ "infix": "-"
+ },
+ {
+ "name": "mul",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "*"
+ },
+ {
+ "name": "neq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "!="
+ },
+ {
+ "name": "net.cidr_contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_contains_matches",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "static": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_expand",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_intersects",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_merge",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_overlap",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.lookup_ip_addr",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "numbers.range",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "numbers.range_step",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.get",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.keys",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.subset",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "opa.runtime",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "or",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "|"
+ },
+ {
+ "name": "plus",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "+"
+ },
+ {
+ "name": "print",
+ "decl": {
+ "type": "function",
+ "variadic": {
+ "type": "any"
+ }
+ }
+ },
+ {
+ "name": "product",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "providers.aws.sign_req",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rand.intn",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "re_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_all_string_submatch_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.globs_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.template_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.chain",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.rule",
+ "decl": {
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.parse_module",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rem",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "%"
+ },
+ {
+ "name": "replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "round",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.compare",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "set_diff",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sort",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sprintf",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "startswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_prefix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_suffix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.count",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.render_template",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.replace_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.reverse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "substring",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sum",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.add_date",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.clock",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.date",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.diff",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.format",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.now_ns",
+ "decl": {
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "time.parse_duration_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_rfc3339_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.weekday",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "to_number",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_left",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_prefix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_right",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_space",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_suffix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "type_name",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "union",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse_bytes",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "upper",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode_object",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode_object",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.rfc4122",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "walk",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "relation": true
+ },
+ {
+ "name": "yaml.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ }
+ ],
+ "wasm_abi_versions": [
+ {
+ "version": 1,
+ "minor_version": 1
+ },
+ {
+ "version": 1,
+ "minor_version": 2
+ }
+ ],
+ "features": [
+ "rego_v1"
+ ]
+}
diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.10.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.10.0.json
new file mode 100644
index 0000000000..0a37621d0c
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.10.0.json
@@ -0,0 +1,4867 @@
+{
+ "builtins": [
+ {
+ "name": "abs",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "all",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "and",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "\u0026"
+ },
+ {
+ "name": "any",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.concat",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.reverse",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.slice",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "assign",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": ":="
+ },
+ {
+ "name": "base64.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode_no_pad",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.and",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.lsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.negate",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.or",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.rsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.xor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "null"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "ceil",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "concat",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "count",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.equal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.parse_private_keys",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificate_request",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_keypair",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_rsa_private_key",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "div",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "/"
+ },
+ {
+ "name": "endswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "eq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "="
+ },
+ {
+ "name": "equal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "=="
+ },
+ {
+ "name": "floor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "format_int",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.quote_meta",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable_paths",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_and_verify",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_query",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_schema",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.schema_is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "gt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e"
+ },
+ {
+ "name": "gte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e="
+ },
+ {
+ "name": "hex.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "hex.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "http.send",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "indexof",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "indexof_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.member_2",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.member_3",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.print",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.test_case",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "intersection",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode_verify",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign_raw",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.verify_eddsa",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_number",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "indent",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "prefix",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "pretty",
+ "value": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.match_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "static": [
+ {
+ "key": "desc",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "error",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "field",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "type",
+ "value": {
+ "type": "string"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.patch",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "op",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "path",
+ "value": {
+ "type": "any"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.verify_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lower",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c"
+ },
+ {
+ "name": "lte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c="
+ },
+ {
+ "name": "max",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "min",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "minus",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "function"
+ },
+ "infix": "-"
+ },
+ {
+ "name": "mul",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "*"
+ },
+ {
+ "name": "neq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "!="
+ },
+ {
+ "name": "net.cidr_contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_contains_matches",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "static": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_expand",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_intersects",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_merge",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_overlap",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.lookup_ip_addr",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "numbers.range",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "numbers.range_step",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.get",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.keys",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.subset",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "opa.runtime",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "or",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "|"
+ },
+ {
+ "name": "plus",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "+"
+ },
+ {
+ "name": "print",
+ "decl": {
+ "type": "function",
+ "variadic": {
+ "type": "any"
+ }
+ }
+ },
+ {
+ "name": "product",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "providers.aws.sign_req",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rand.intn",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "re_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_all_string_submatch_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.globs_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.template_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.chain",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.rule",
+ "decl": {
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.parse_module",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rem",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "%"
+ },
+ {
+ "name": "replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "round",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.compare",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "set_diff",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sort",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sprintf",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "startswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_prefix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_suffix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.count",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.render_template",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.replace_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.reverse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "substring",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sum",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.add_date",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.clock",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.date",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.diff",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.format",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.now_ns",
+ "decl": {
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "time.parse_duration_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_rfc3339_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.weekday",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "to_number",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_left",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_prefix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_right",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_space",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_suffix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "type_name",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "union",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse_bytes",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "upper",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode_object",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode_object",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.rfc4122",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "walk",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "relation": true
+ },
+ {
+ "name": "yaml.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ }
+ ],
+ "wasm_abi_versions": [
+ {
+ "version": 1,
+ "minor_version": 1
+ },
+ {
+ "version": 1,
+ "minor_version": 2
+ }
+ ],
+ "features": [
+ "keywords_in_refs",
+ "rego_v1"
+ ]
+}
diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.10.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.10.1.json
new file mode 100644
index 0000000000..0a37621d0c
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.10.1.json
@@ -0,0 +1,4867 @@
+{
+ "builtins": [
+ {
+ "name": "abs",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "all",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "and",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "\u0026"
+ },
+ {
+ "name": "any",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.concat",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.reverse",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.slice",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "assign",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": ":="
+ },
+ {
+ "name": "base64.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode_no_pad",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.and",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.lsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.negate",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.or",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.rsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.xor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "null"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "ceil",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "concat",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "count",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.equal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.parse_private_keys",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificate_request",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_keypair",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_rsa_private_key",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "div",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "/"
+ },
+ {
+ "name": "endswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "eq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "="
+ },
+ {
+ "name": "equal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "=="
+ },
+ {
+ "name": "floor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "format_int",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.quote_meta",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable_paths",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_and_verify",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_query",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_schema",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.schema_is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "gt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e"
+ },
+ {
+ "name": "gte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e="
+ },
+ {
+ "name": "hex.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "hex.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "http.send",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "indexof",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "indexof_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.member_2",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.member_3",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.print",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.test_case",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "intersection",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode_verify",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign_raw",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.verify_eddsa",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_number",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "indent",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "prefix",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "pretty",
+ "value": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.match_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "static": [
+ {
+ "key": "desc",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "error",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "field",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "type",
+ "value": {
+ "type": "string"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.patch",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "op",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "path",
+ "value": {
+ "type": "any"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.verify_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lower",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c"
+ },
+ {
+ "name": "lte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c="
+ },
+ {
+ "name": "max",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "min",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "minus",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "function"
+ },
+ "infix": "-"
+ },
+ {
+ "name": "mul",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "*"
+ },
+ {
+ "name": "neq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "!="
+ },
+ {
+ "name": "net.cidr_contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_contains_matches",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "static": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_expand",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_intersects",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_merge",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_overlap",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.lookup_ip_addr",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "numbers.range",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "numbers.range_step",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.get",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.keys",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.subset",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "opa.runtime",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "or",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "|"
+ },
+ {
+ "name": "plus",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "+"
+ },
+ {
+ "name": "print",
+ "decl": {
+ "type": "function",
+ "variadic": {
+ "type": "any"
+ }
+ }
+ },
+ {
+ "name": "product",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "providers.aws.sign_req",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rand.intn",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "re_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_all_string_submatch_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.globs_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.template_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.chain",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.rule",
+ "decl": {
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.parse_module",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rem",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "%"
+ },
+ {
+ "name": "replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "round",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.compare",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "set_diff",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sort",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sprintf",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "startswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_prefix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_suffix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.count",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.render_template",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.replace_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.reverse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "substring",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sum",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.add_date",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.clock",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.date",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.diff",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.format",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.now_ns",
+ "decl": {
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "time.parse_duration_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_rfc3339_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.weekday",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "to_number",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_left",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_prefix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_right",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_space",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_suffix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "type_name",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "union",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse_bytes",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "upper",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode_object",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode_object",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.rfc4122",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "walk",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "relation": true
+ },
+ {
+ "name": "yaml.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ }
+ ],
+ "wasm_abi_versions": [
+ {
+ "version": 1,
+ "minor_version": 1
+ },
+ {
+ "version": 1,
+ "minor_version": 2
+ }
+ ],
+ "features": [
+ "keywords_in_refs",
+ "rego_v1"
+ ]
+}
diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.2.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.2.0.json
new file mode 100644
index 0000000000..1253c88b30
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.2.0.json
@@ -0,0 +1,4849 @@
+{
+ "builtins": [
+ {
+ "name": "abs",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "all",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "and",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "\u0026"
+ },
+ {
+ "name": "any",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.concat",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.reverse",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.slice",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "assign",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": ":="
+ },
+ {
+ "name": "base64.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode_no_pad",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.and",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.lsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.negate",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.or",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.rsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.xor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "null"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "ceil",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "concat",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "count",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.equal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.parse_private_keys",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificate_request",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_keypair",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_rsa_private_key",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "div",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "/"
+ },
+ {
+ "name": "endswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "eq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "="
+ },
+ {
+ "name": "equal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "=="
+ },
+ {
+ "name": "floor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "format_int",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.quote_meta",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable_paths",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_and_verify",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_query",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_schema",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.schema_is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "gt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e"
+ },
+ {
+ "name": "gte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e="
+ },
+ {
+ "name": "hex.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "hex.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "http.send",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "indexof",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "indexof_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.member_2",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.member_3",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.print",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.test_case",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "intersection",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode_verify",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign_raw",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.verify_es256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_number",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "indent",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "prefix",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "pretty",
+ "value": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.match_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "static": [
+ {
+ "key": "desc",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "error",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "field",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "type",
+ "value": {
+ "type": "string"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.patch",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "op",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "path",
+ "value": {
+ "type": "any"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.verify_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lower",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c"
+ },
+ {
+ "name": "lte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c="
+ },
+ {
+ "name": "max",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "min",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "minus",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "function"
+ },
+ "infix": "-"
+ },
+ {
+ "name": "mul",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "*"
+ },
+ {
+ "name": "neq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "!="
+ },
+ {
+ "name": "net.cidr_contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_contains_matches",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "static": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_expand",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_intersects",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_merge",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_overlap",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.lookup_ip_addr",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "numbers.range",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "numbers.range_step",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.get",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.keys",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.subset",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "opa.runtime",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "or",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "|"
+ },
+ {
+ "name": "plus",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "+"
+ },
+ {
+ "name": "print",
+ "decl": {
+ "type": "function",
+ "variadic": {
+ "type": "any"
+ }
+ }
+ },
+ {
+ "name": "product",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "providers.aws.sign_req",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rand.intn",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "re_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_all_string_submatch_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.globs_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.template_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.chain",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.rule",
+ "decl": {
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.parse_module",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rem",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "%"
+ },
+ {
+ "name": "replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "round",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.compare",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "set_diff",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sort",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sprintf",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "startswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_prefix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_suffix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.count",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.render_template",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.replace_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.reverse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "substring",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sum",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.add_date",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.clock",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.date",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.diff",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.format",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.now_ns",
+ "decl": {
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "time.parse_duration_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_rfc3339_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.weekday",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "to_number",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_left",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_prefix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_right",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_space",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_suffix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "type_name",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "union",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse_bytes",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "upper",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode_object",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode_object",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.rfc4122",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "walk",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "relation": true
+ },
+ {
+ "name": "yaml.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ }
+ ],
+ "wasm_abi_versions": [
+ {
+ "version": 1,
+ "minor_version": 1
+ },
+ {
+ "version": 1,
+ "minor_version": 2
+ }
+ ],
+ "features": [
+ "rego_v1"
+ ]
+}
diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.3.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.3.0.json
new file mode 100644
index 0000000000..1253c88b30
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.3.0.json
@@ -0,0 +1,4849 @@
+{
+ "builtins": [
+ {
+ "name": "abs",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "all",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "and",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "\u0026"
+ },
+ {
+ "name": "any",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.concat",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.reverse",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.slice",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "assign",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": ":="
+ },
+ {
+ "name": "base64.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode_no_pad",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.and",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.lsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.negate",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.or",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.rsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.xor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "null"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "ceil",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "concat",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "count",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.equal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.parse_private_keys",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificate_request",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_keypair",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_rsa_private_key",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "div",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "/"
+ },
+ {
+ "name": "endswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "eq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "="
+ },
+ {
+ "name": "equal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "=="
+ },
+ {
+ "name": "floor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "format_int",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.quote_meta",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable_paths",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_and_verify",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_query",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_schema",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.schema_is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "gt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e"
+ },
+ {
+ "name": "gte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e="
+ },
+ {
+ "name": "hex.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "hex.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "http.send",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "indexof",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "indexof_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.member_2",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.member_3",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.print",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.test_case",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "intersection",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode_verify",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign_raw",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.verify_es256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_number",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "indent",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "prefix",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "pretty",
+ "value": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.match_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "static": [
+ {
+ "key": "desc",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "error",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "field",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "type",
+ "value": {
+ "type": "string"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.patch",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "op",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "path",
+ "value": {
+ "type": "any"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.verify_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lower",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c"
+ },
+ {
+ "name": "lte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c="
+ },
+ {
+ "name": "max",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "min",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "minus",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "function"
+ },
+ "infix": "-"
+ },
+ {
+ "name": "mul",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "*"
+ },
+ {
+ "name": "neq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "!="
+ },
+ {
+ "name": "net.cidr_contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_contains_matches",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "static": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_expand",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_intersects",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_merge",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_overlap",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.lookup_ip_addr",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "numbers.range",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "numbers.range_step",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.get",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.keys",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.subset",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "opa.runtime",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "or",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "|"
+ },
+ {
+ "name": "plus",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "+"
+ },
+ {
+ "name": "print",
+ "decl": {
+ "type": "function",
+ "variadic": {
+ "type": "any"
+ }
+ }
+ },
+ {
+ "name": "product",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "providers.aws.sign_req",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rand.intn",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "re_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_all_string_submatch_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.globs_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.template_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.chain",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.rule",
+ "decl": {
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.parse_module",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rem",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "%"
+ },
+ {
+ "name": "replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "round",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.compare",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "set_diff",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sort",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sprintf",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "startswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_prefix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_suffix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.count",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.render_template",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.replace_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.reverse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "substring",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sum",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.add_date",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.clock",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.date",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.diff",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.format",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.now_ns",
+ "decl": {
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "time.parse_duration_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_rfc3339_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.weekday",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "to_number",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_left",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_prefix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_right",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_space",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_suffix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "type_name",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "union",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse_bytes",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "upper",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode_object",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode_object",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.rfc4122",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "walk",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "relation": true
+ },
+ {
+ "name": "yaml.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ }
+ ],
+ "wasm_abi_versions": [
+ {
+ "version": 1,
+ "minor_version": 1
+ },
+ {
+ "version": 1,
+ "minor_version": 2
+ }
+ ],
+ "features": [
+ "rego_v1"
+ ]
+}
diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.0.json
new file mode 100644
index 0000000000..1253c88b30
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.0.json
@@ -0,0 +1,4849 @@
+{
+ "builtins": [
+ {
+ "name": "abs",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "all",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "and",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "\u0026"
+ },
+ {
+ "name": "any",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.concat",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.reverse",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.slice",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "assign",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": ":="
+ },
+ {
+ "name": "base64.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode_no_pad",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.and",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.lsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.negate",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.or",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.rsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.xor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "null"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "ceil",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "concat",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "count",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.equal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.parse_private_keys",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificate_request",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_keypair",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_rsa_private_key",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "div",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "/"
+ },
+ {
+ "name": "endswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "eq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "="
+ },
+ {
+ "name": "equal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "=="
+ },
+ {
+ "name": "floor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "format_int",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.quote_meta",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable_paths",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_and_verify",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_query",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_schema",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.schema_is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "gt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e"
+ },
+ {
+ "name": "gte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e="
+ },
+ {
+ "name": "hex.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "hex.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "http.send",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "indexof",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "indexof_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.member_2",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.member_3",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.print",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.test_case",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "intersection",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode_verify",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign_raw",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.verify_es256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_number",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "indent",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "prefix",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "pretty",
+ "value": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.match_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "static": [
+ {
+ "key": "desc",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "error",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "field",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "type",
+ "value": {
+ "type": "string"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.patch",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "op",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "path",
+ "value": {
+ "type": "any"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.verify_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lower",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c"
+ },
+ {
+ "name": "lte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c="
+ },
+ {
+ "name": "max",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "min",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "minus",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "function"
+ },
+ "infix": "-"
+ },
+ {
+ "name": "mul",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "*"
+ },
+ {
+ "name": "neq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "!="
+ },
+ {
+ "name": "net.cidr_contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_contains_matches",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "static": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_expand",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_intersects",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_merge",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_overlap",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.lookup_ip_addr",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "numbers.range",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "numbers.range_step",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.get",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.keys",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.subset",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "opa.runtime",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "or",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "|"
+ },
+ {
+ "name": "plus",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "+"
+ },
+ {
+ "name": "print",
+ "decl": {
+ "type": "function",
+ "variadic": {
+ "type": "any"
+ }
+ }
+ },
+ {
+ "name": "product",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "providers.aws.sign_req",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rand.intn",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "re_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_all_string_submatch_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.globs_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.template_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.chain",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.rule",
+ "decl": {
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.parse_module",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rem",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "%"
+ },
+ {
+ "name": "replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "round",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.compare",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "set_diff",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sort",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sprintf",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "startswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_prefix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_suffix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.count",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.render_template",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.replace_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.reverse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "substring",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sum",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.add_date",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.clock",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.date",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.diff",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.format",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.now_ns",
+ "decl": {
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "time.parse_duration_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_rfc3339_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.weekday",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "to_number",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_left",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_prefix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_right",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_space",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_suffix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "type_name",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "union",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse_bytes",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "upper",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode_object",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode_object",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.rfc4122",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "walk",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "relation": true
+ },
+ {
+ "name": "yaml.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ }
+ ],
+ "wasm_abi_versions": [
+ {
+ "version": 1,
+ "minor_version": 1
+ },
+ {
+ "version": 1,
+ "minor_version": 2
+ }
+ ],
+ "features": [
+ "rego_v1"
+ ]
+}
diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.1.json
new file mode 100644
index 0000000000..1253c88b30
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.1.json
@@ -0,0 +1,4849 @@
+{
+ "builtins": [
+ {
+ "name": "abs",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "all",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "and",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "\u0026"
+ },
+ {
+ "name": "any",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.concat",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.reverse",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.slice",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "assign",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": ":="
+ },
+ {
+ "name": "base64.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode_no_pad",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.and",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.lsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.negate",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.or",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.rsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.xor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "null"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "ceil",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "concat",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "count",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.equal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.parse_private_keys",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificate_request",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_keypair",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_rsa_private_key",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "div",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "/"
+ },
+ {
+ "name": "endswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "eq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "="
+ },
+ {
+ "name": "equal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "=="
+ },
+ {
+ "name": "floor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "format_int",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.quote_meta",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable_paths",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_and_verify",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_query",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_schema",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.schema_is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "gt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e"
+ },
+ {
+ "name": "gte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e="
+ },
+ {
+ "name": "hex.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "hex.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "http.send",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "indexof",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "indexof_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.member_2",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.member_3",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.print",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.test_case",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "intersection",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode_verify",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign_raw",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.verify_es256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_number",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "indent",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "prefix",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "pretty",
+ "value": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.match_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "static": [
+ {
+ "key": "desc",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "error",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "field",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "type",
+ "value": {
+ "type": "string"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.patch",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "op",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "path",
+ "value": {
+ "type": "any"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.verify_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lower",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c"
+ },
+ {
+ "name": "lte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c="
+ },
+ {
+ "name": "max",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "min",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "minus",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "function"
+ },
+ "infix": "-"
+ },
+ {
+ "name": "mul",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "*"
+ },
+ {
+ "name": "neq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "!="
+ },
+ {
+ "name": "net.cidr_contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_contains_matches",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "static": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_expand",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_intersects",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_merge",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_overlap",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.lookup_ip_addr",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "numbers.range",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "numbers.range_step",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.get",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.keys",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.subset",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "opa.runtime",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "or",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "|"
+ },
+ {
+ "name": "plus",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "+"
+ },
+ {
+ "name": "print",
+ "decl": {
+ "type": "function",
+ "variadic": {
+ "type": "any"
+ }
+ }
+ },
+ {
+ "name": "product",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "providers.aws.sign_req",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rand.intn",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "re_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_all_string_submatch_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.globs_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.template_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.chain",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.rule",
+ "decl": {
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.parse_module",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rem",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "%"
+ },
+ {
+ "name": "replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "round",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.compare",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "set_diff",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sort",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sprintf",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "startswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_prefix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_suffix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.count",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.render_template",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.replace_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.reverse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "substring",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sum",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.add_date",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.clock",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.date",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.diff",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.format",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.now_ns",
+ "decl": {
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "time.parse_duration_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_rfc3339_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.weekday",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "to_number",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_left",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_prefix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_right",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_space",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_suffix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "type_name",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "union",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse_bytes",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "upper",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode_object",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode_object",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.rfc4122",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "walk",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "relation": true
+ },
+ {
+ "name": "yaml.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ }
+ ],
+ "wasm_abi_versions": [
+ {
+ "version": 1,
+ "minor_version": 1
+ },
+ {
+ "version": 1,
+ "minor_version": 2
+ }
+ ],
+ "features": [
+ "rego_v1"
+ ]
+}
diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.2.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.2.json
new file mode 100644
index 0000000000..1253c88b30
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.2.json
@@ -0,0 +1,4849 @@
+{
+ "builtins": [
+ {
+ "name": "abs",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "all",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "and",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "\u0026"
+ },
+ {
+ "name": "any",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.concat",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.reverse",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.slice",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "assign",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": ":="
+ },
+ {
+ "name": "base64.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode_no_pad",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.and",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.lsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.negate",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.or",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.rsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.xor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "null"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "ceil",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "concat",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "count",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.equal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.parse_private_keys",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificate_request",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_keypair",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_rsa_private_key",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "div",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "/"
+ },
+ {
+ "name": "endswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "eq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "="
+ },
+ {
+ "name": "equal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "=="
+ },
+ {
+ "name": "floor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "format_int",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.quote_meta",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable_paths",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_and_verify",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_query",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_schema",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.schema_is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "gt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e"
+ },
+ {
+ "name": "gte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e="
+ },
+ {
+ "name": "hex.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "hex.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "http.send",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "indexof",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "indexof_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.member_2",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.member_3",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.print",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.test_case",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "intersection",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode_verify",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign_raw",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.verify_es256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_number",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "indent",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "prefix",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "pretty",
+ "value": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.match_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "static": [
+ {
+ "key": "desc",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "error",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "field",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "type",
+ "value": {
+ "type": "string"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.patch",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "op",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "path",
+ "value": {
+ "type": "any"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.verify_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lower",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c"
+ },
+ {
+ "name": "lte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c="
+ },
+ {
+ "name": "max",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "min",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "minus",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "function"
+ },
+ "infix": "-"
+ },
+ {
+ "name": "mul",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "*"
+ },
+ {
+ "name": "neq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "!="
+ },
+ {
+ "name": "net.cidr_contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_contains_matches",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "static": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_expand",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_intersects",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_merge",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_overlap",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.lookup_ip_addr",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "numbers.range",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "numbers.range_step",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.get",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.keys",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.subset",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "opa.runtime",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "or",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "|"
+ },
+ {
+ "name": "plus",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "+"
+ },
+ {
+ "name": "print",
+ "decl": {
+ "type": "function",
+ "variadic": {
+ "type": "any"
+ }
+ }
+ },
+ {
+ "name": "product",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "providers.aws.sign_req",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rand.intn",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "re_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_all_string_submatch_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.globs_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.template_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.chain",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.rule",
+ "decl": {
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.parse_module",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rem",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "%"
+ },
+ {
+ "name": "replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "round",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.compare",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "set_diff",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sort",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sprintf",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "startswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_prefix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_suffix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.count",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.render_template",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.replace_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.reverse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "substring",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sum",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.add_date",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.clock",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.date",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.diff",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.format",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.now_ns",
+ "decl": {
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "time.parse_duration_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_rfc3339_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.weekday",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "to_number",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_left",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_prefix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_right",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_space",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_suffix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "type_name",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "union",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse_bytes",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "upper",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode_object",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode_object",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.rfc4122",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "walk",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "relation": true
+ },
+ {
+ "name": "yaml.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ }
+ ],
+ "wasm_abi_versions": [
+ {
+ "version": 1,
+ "minor_version": 1
+ },
+ {
+ "version": 1,
+ "minor_version": 2
+ }
+ ],
+ "features": [
+ "rego_v1"
+ ]
+}
diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.5.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.5.0.json
new file mode 100644
index 0000000000..1253c88b30
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.5.0.json
@@ -0,0 +1,4849 @@
+{
+ "builtins": [
+ {
+ "name": "abs",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "all",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "and",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "\u0026"
+ },
+ {
+ "name": "any",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.concat",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.reverse",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.slice",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "assign",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": ":="
+ },
+ {
+ "name": "base64.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode_no_pad",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.and",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.lsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.negate",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.or",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.rsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.xor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "null"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "ceil",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "concat",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "count",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.equal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.parse_private_keys",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificate_request",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_keypair",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_rsa_private_key",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "div",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "/"
+ },
+ {
+ "name": "endswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "eq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "="
+ },
+ {
+ "name": "equal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "=="
+ },
+ {
+ "name": "floor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "format_int",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.quote_meta",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable_paths",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_and_verify",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_query",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_schema",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.schema_is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "gt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e"
+ },
+ {
+ "name": "gte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e="
+ },
+ {
+ "name": "hex.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "hex.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "http.send",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "indexof",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "indexof_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.member_2",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.member_3",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.print",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.test_case",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "intersection",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode_verify",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign_raw",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.verify_es256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_number",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "indent",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "prefix",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "pretty",
+ "value": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.match_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "static": [
+ {
+ "key": "desc",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "error",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "field",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "type",
+ "value": {
+ "type": "string"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.patch",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "op",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "path",
+ "value": {
+ "type": "any"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.verify_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lower",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c"
+ },
+ {
+ "name": "lte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c="
+ },
+ {
+ "name": "max",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "min",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "minus",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "function"
+ },
+ "infix": "-"
+ },
+ {
+ "name": "mul",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "*"
+ },
+ {
+ "name": "neq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "!="
+ },
+ {
+ "name": "net.cidr_contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_contains_matches",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "static": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_expand",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_intersects",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_merge",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_overlap",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.lookup_ip_addr",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "numbers.range",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "numbers.range_step",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.get",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.keys",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.subset",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "opa.runtime",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "or",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "|"
+ },
+ {
+ "name": "plus",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "+"
+ },
+ {
+ "name": "print",
+ "decl": {
+ "type": "function",
+ "variadic": {
+ "type": "any"
+ }
+ }
+ },
+ {
+ "name": "product",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "providers.aws.sign_req",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rand.intn",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "re_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_all_string_submatch_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.globs_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.template_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.chain",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.rule",
+ "decl": {
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.parse_module",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rem",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "%"
+ },
+ {
+ "name": "replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "round",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.compare",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "set_diff",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sort",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sprintf",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "startswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_prefix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_suffix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.count",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.render_template",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.replace_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.reverse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "substring",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sum",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.add_date",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.clock",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.date",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.diff",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.format",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.now_ns",
+ "decl": {
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "time.parse_duration_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_rfc3339_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.weekday",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "to_number",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_left",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_prefix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_right",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_space",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_suffix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "type_name",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "union",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse_bytes",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "upper",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode_object",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode_object",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.rfc4122",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "walk",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "relation": true
+ },
+ {
+ "name": "yaml.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ }
+ ],
+ "wasm_abi_versions": [
+ {
+ "version": 1,
+ "minor_version": 1
+ },
+ {
+ "version": 1,
+ "minor_version": 2
+ }
+ ],
+ "features": [
+ "rego_v1"
+ ]
+}
diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.5.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.5.1.json
new file mode 100644
index 0000000000..1253c88b30
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.5.1.json
@@ -0,0 +1,4849 @@
+{
+ "builtins": [
+ {
+ "name": "abs",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "all",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "and",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "\u0026"
+ },
+ {
+ "name": "any",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.concat",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.reverse",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.slice",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "assign",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": ":="
+ },
+ {
+ "name": "base64.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode_no_pad",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.and",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.lsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.negate",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.or",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.rsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.xor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "null"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "ceil",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "concat",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "count",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.equal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.parse_private_keys",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificate_request",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_keypair",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_rsa_private_key",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "div",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "/"
+ },
+ {
+ "name": "endswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "eq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "="
+ },
+ {
+ "name": "equal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "=="
+ },
+ {
+ "name": "floor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "format_int",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.quote_meta",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable_paths",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_and_verify",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_query",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_schema",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.schema_is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "gt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e"
+ },
+ {
+ "name": "gte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e="
+ },
+ {
+ "name": "hex.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "hex.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "http.send",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "indexof",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "indexof_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.member_2",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.member_3",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.print",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.test_case",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "intersection",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode_verify",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign_raw",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.verify_es256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_number",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "indent",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "prefix",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "pretty",
+ "value": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.match_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "static": [
+ {
+ "key": "desc",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "error",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "field",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "type",
+ "value": {
+ "type": "string"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.patch",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "op",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "path",
+ "value": {
+ "type": "any"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.verify_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lower",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c"
+ },
+ {
+ "name": "lte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c="
+ },
+ {
+ "name": "max",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "min",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "minus",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "function"
+ },
+ "infix": "-"
+ },
+ {
+ "name": "mul",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "*"
+ },
+ {
+ "name": "neq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "!="
+ },
+ {
+ "name": "net.cidr_contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_contains_matches",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "static": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_expand",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_intersects",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_merge",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_overlap",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.lookup_ip_addr",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "numbers.range",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "numbers.range_step",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.get",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.keys",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.subset",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "opa.runtime",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "or",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "|"
+ },
+ {
+ "name": "plus",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "+"
+ },
+ {
+ "name": "print",
+ "decl": {
+ "type": "function",
+ "variadic": {
+ "type": "any"
+ }
+ }
+ },
+ {
+ "name": "product",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "providers.aws.sign_req",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rand.intn",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "re_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_all_string_submatch_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.globs_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.template_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.chain",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.rule",
+ "decl": {
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.parse_module",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rem",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "%"
+ },
+ {
+ "name": "replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "round",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.compare",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "set_diff",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sort",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sprintf",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "startswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_prefix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_suffix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.count",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.render_template",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.replace_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.reverse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "substring",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sum",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.add_date",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.clock",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.date",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.diff",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.format",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.now_ns",
+ "decl": {
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "time.parse_duration_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_rfc3339_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.weekday",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "to_number",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_left",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_prefix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_right",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_space",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_suffix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "type_name",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "union",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse_bytes",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "upper",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode_object",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode_object",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.rfc4122",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "walk",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "relation": true
+ },
+ {
+ "name": "yaml.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ }
+ ],
+ "wasm_abi_versions": [
+ {
+ "version": 1,
+ "minor_version": 1
+ },
+ {
+ "version": 1,
+ "minor_version": 2
+ }
+ ],
+ "features": [
+ "rego_v1"
+ ]
+}
diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.6.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.6.0.json
new file mode 100644
index 0000000000..110c3eca91
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.6.0.json
@@ -0,0 +1,4850 @@
+{
+ "builtins": [
+ {
+ "name": "abs",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "all",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "and",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "\u0026"
+ },
+ {
+ "name": "any",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.concat",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.reverse",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.slice",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "assign",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": ":="
+ },
+ {
+ "name": "base64.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode_no_pad",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.and",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.lsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.negate",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.or",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.rsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.xor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "null"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "ceil",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "concat",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "count",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.equal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.parse_private_keys",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificate_request",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_keypair",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_rsa_private_key",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "div",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "/"
+ },
+ {
+ "name": "endswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "eq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "="
+ },
+ {
+ "name": "equal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "=="
+ },
+ {
+ "name": "floor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "format_int",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.quote_meta",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable_paths",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_and_verify",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_query",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_schema",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.schema_is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "gt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e"
+ },
+ {
+ "name": "gte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e="
+ },
+ {
+ "name": "hex.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "hex.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "http.send",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "indexof",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "indexof_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.member_2",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.member_3",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.print",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.test_case",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "intersection",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode_verify",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign_raw",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.verify_es256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_number",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "indent",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "prefix",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "pretty",
+ "value": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.match_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "static": [
+ {
+ "key": "desc",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "error",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "field",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "type",
+ "value": {
+ "type": "string"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.patch",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "op",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "path",
+ "value": {
+ "type": "any"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.verify_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lower",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c"
+ },
+ {
+ "name": "lte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c="
+ },
+ {
+ "name": "max",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "min",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "minus",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "function"
+ },
+ "infix": "-"
+ },
+ {
+ "name": "mul",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "*"
+ },
+ {
+ "name": "neq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "!="
+ },
+ {
+ "name": "net.cidr_contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_contains_matches",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "static": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_expand",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_intersects",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_merge",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_overlap",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.lookup_ip_addr",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "numbers.range",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "numbers.range_step",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.get",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.keys",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.subset",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "opa.runtime",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "or",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "|"
+ },
+ {
+ "name": "plus",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "+"
+ },
+ {
+ "name": "print",
+ "decl": {
+ "type": "function",
+ "variadic": {
+ "type": "any"
+ }
+ }
+ },
+ {
+ "name": "product",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "providers.aws.sign_req",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rand.intn",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "re_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_all_string_submatch_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.globs_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.template_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.chain",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.rule",
+ "decl": {
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.parse_module",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rem",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "%"
+ },
+ {
+ "name": "replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "round",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.compare",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "set_diff",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sort",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sprintf",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "startswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_prefix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_suffix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.count",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.render_template",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.replace_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.reverse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "substring",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sum",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.add_date",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.clock",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.date",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.diff",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.format",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.now_ns",
+ "decl": {
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "time.parse_duration_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_rfc3339_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.weekday",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "to_number",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_left",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_prefix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_right",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_space",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_suffix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "type_name",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "union",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse_bytes",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "upper",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode_object",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode_object",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.rfc4122",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "walk",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "relation": true
+ },
+ {
+ "name": "yaml.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ }
+ ],
+ "wasm_abi_versions": [
+ {
+ "version": 1,
+ "minor_version": 1
+ },
+ {
+ "version": 1,
+ "minor_version": 2
+ }
+ ],
+ "features": [
+ "keywords_in_refs",
+ "rego_v1"
+ ]
+}
diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.7.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.7.0.json
new file mode 100644
index 0000000000..110c3eca91
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.7.0.json
@@ -0,0 +1,4850 @@
+{
+ "builtins": [
+ {
+ "name": "abs",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "all",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "and",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "\u0026"
+ },
+ {
+ "name": "any",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.concat",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.reverse",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.slice",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "assign",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": ":="
+ },
+ {
+ "name": "base64.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode_no_pad",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.and",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.lsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.negate",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.or",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.rsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.xor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "null"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "ceil",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "concat",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "count",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.equal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.parse_private_keys",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificate_request",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_keypair",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_rsa_private_key",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "div",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "/"
+ },
+ {
+ "name": "endswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "eq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "="
+ },
+ {
+ "name": "equal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "=="
+ },
+ {
+ "name": "floor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "format_int",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.quote_meta",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable_paths",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_and_verify",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_query",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_schema",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.schema_is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "gt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e"
+ },
+ {
+ "name": "gte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e="
+ },
+ {
+ "name": "hex.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "hex.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "http.send",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "indexof",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "indexof_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.member_2",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.member_3",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.print",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.test_case",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "intersection",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode_verify",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign_raw",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.verify_es256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_number",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "indent",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "prefix",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "pretty",
+ "value": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.match_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "static": [
+ {
+ "key": "desc",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "error",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "field",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "type",
+ "value": {
+ "type": "string"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.patch",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "op",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "path",
+ "value": {
+ "type": "any"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.verify_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lower",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c"
+ },
+ {
+ "name": "lte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c="
+ },
+ {
+ "name": "max",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "min",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "minus",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "function"
+ },
+ "infix": "-"
+ },
+ {
+ "name": "mul",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "*"
+ },
+ {
+ "name": "neq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "!="
+ },
+ {
+ "name": "net.cidr_contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_contains_matches",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "static": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_expand",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_intersects",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_merge",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_overlap",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.lookup_ip_addr",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "numbers.range",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "numbers.range_step",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.get",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.keys",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.subset",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "opa.runtime",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "or",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "|"
+ },
+ {
+ "name": "plus",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "+"
+ },
+ {
+ "name": "print",
+ "decl": {
+ "type": "function",
+ "variadic": {
+ "type": "any"
+ }
+ }
+ },
+ {
+ "name": "product",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "providers.aws.sign_req",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rand.intn",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "re_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_all_string_submatch_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.globs_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.template_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.chain",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.rule",
+ "decl": {
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.parse_module",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rem",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "%"
+ },
+ {
+ "name": "replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "round",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.compare",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "set_diff",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sort",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sprintf",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "startswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_prefix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_suffix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.count",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.render_template",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.replace_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.reverse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "substring",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sum",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.add_date",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.clock",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.date",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.diff",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.format",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.now_ns",
+ "decl": {
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "time.parse_duration_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_rfc3339_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.weekday",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "to_number",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_left",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_prefix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_right",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_space",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_suffix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "type_name",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "union",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse_bytes",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "upper",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode_object",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode_object",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.rfc4122",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "walk",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "relation": true
+ },
+ {
+ "name": "yaml.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ }
+ ],
+ "wasm_abi_versions": [
+ {
+ "version": 1,
+ "minor_version": 1
+ },
+ {
+ "version": 1,
+ "minor_version": 2
+ }
+ ],
+ "features": [
+ "keywords_in_refs",
+ "rego_v1"
+ ]
+}
diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.8.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.8.0.json
new file mode 100644
index 0000000000..0a37621d0c
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.8.0.json
@@ -0,0 +1,4867 @@
+{
+ "builtins": [
+ {
+ "name": "abs",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "all",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "and",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "\u0026"
+ },
+ {
+ "name": "any",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.concat",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.reverse",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.slice",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "assign",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": ":="
+ },
+ {
+ "name": "base64.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode_no_pad",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.and",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.lsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.negate",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.or",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.rsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.xor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "null"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "ceil",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "concat",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "count",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.equal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.parse_private_keys",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificate_request",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_keypair",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_rsa_private_key",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "div",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "/"
+ },
+ {
+ "name": "endswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "eq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "="
+ },
+ {
+ "name": "equal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "=="
+ },
+ {
+ "name": "floor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "format_int",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.quote_meta",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable_paths",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_and_verify",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_query",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_schema",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.schema_is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "gt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e"
+ },
+ {
+ "name": "gte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e="
+ },
+ {
+ "name": "hex.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "hex.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "http.send",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "indexof",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "indexof_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.member_2",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.member_3",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.print",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.test_case",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "intersection",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode_verify",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign_raw",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.verify_eddsa",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_number",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "indent",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "prefix",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "pretty",
+ "value": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.match_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "static": [
+ {
+ "key": "desc",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "error",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "field",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "type",
+ "value": {
+ "type": "string"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.patch",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "op",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "path",
+ "value": {
+ "type": "any"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.verify_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lower",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c"
+ },
+ {
+ "name": "lte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c="
+ },
+ {
+ "name": "max",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "min",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "minus",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "function"
+ },
+ "infix": "-"
+ },
+ {
+ "name": "mul",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "*"
+ },
+ {
+ "name": "neq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "!="
+ },
+ {
+ "name": "net.cidr_contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_contains_matches",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "static": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_expand",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_intersects",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_merge",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_overlap",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.lookup_ip_addr",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "numbers.range",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "numbers.range_step",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.get",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.keys",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.subset",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "opa.runtime",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "or",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "|"
+ },
+ {
+ "name": "plus",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "+"
+ },
+ {
+ "name": "print",
+ "decl": {
+ "type": "function",
+ "variadic": {
+ "type": "any"
+ }
+ }
+ },
+ {
+ "name": "product",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "providers.aws.sign_req",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rand.intn",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "re_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_all_string_submatch_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.globs_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.template_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.chain",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.rule",
+ "decl": {
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.parse_module",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rem",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "%"
+ },
+ {
+ "name": "replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "round",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.compare",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "set_diff",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sort",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sprintf",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "startswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_prefix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_suffix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.count",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.render_template",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.replace_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.reverse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "substring",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sum",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.add_date",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.clock",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.date",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.diff",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.format",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.now_ns",
+ "decl": {
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "time.parse_duration_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_rfc3339_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.weekday",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "to_number",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_left",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_prefix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_right",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_space",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_suffix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "type_name",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "union",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse_bytes",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "upper",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode_object",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode_object",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.rfc4122",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "walk",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "relation": true
+ },
+ {
+ "name": "yaml.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ }
+ ],
+ "wasm_abi_versions": [
+ {
+ "version": 1,
+ "minor_version": 1
+ },
+ {
+ "version": 1,
+ "minor_version": 2
+ }
+ ],
+ "features": [
+ "keywords_in_refs",
+ "rego_v1"
+ ]
+}
diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.9.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.9.0.json
new file mode 100644
index 0000000000..0a37621d0c
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.9.0.json
@@ -0,0 +1,4867 @@
+{
+ "builtins": [
+ {
+ "name": "abs",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "all",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "and",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "\u0026"
+ },
+ {
+ "name": "any",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.concat",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.reverse",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "array.slice",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "assign",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": ":="
+ },
+ {
+ "name": "base64.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "base64url.encode_no_pad",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.and",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.lsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.negate",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.or",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.rsh",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "bits.xor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "null"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "cast_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "ceil",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "concat",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "count",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.equal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.hmac.sha512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.md5",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.parse_private_keys",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha1",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.sha256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_and_verify_certificates_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificate_request",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_certificates",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_keypair",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "crypto.x509.parse_rsa_private_key",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "div",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "/"
+ },
+ {
+ "name": "endswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "eq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "="
+ },
+ {
+ "name": "equal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "=="
+ },
+ {
+ "name": "floor",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "format_int",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "glob.quote_meta",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graph.reachable_paths",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_and_verify",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_query",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.parse_schema",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "graphql.schema_is_valid",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "gt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e"
+ },
+ {
+ "name": "gte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003e="
+ },
+ {
+ "name": "hex.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "hex.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "http.send",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "indexof",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "indexof_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.member_2",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.member_3",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "in"
+ },
+ {
+ "name": "internal.print",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "internal.test_case",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "function"
+ }
+ },
+ {
+ "name": "intersection",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.decode_verify",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.encode_sign_raw",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "io.jwt.verify_eddsa",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_es512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_hs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_ps512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs256",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs384",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "io.jwt.verify_rs512",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_array",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_boolean",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_null",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_number",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_object",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_set",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "is_string",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.marshal_with_options",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "indent",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "prefix",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "pretty",
+ "value": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.match_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "dynamic": {
+ "static": [
+ {
+ "key": "desc",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "error",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "field",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "type",
+ "value": {
+ "type": "string"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.patch",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "static": [
+ {
+ "key": "op",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "key": "path",
+ "value": {
+ "type": "any"
+ }
+ }
+ ],
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "json.verify_schema",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "boolean"
+ },
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lower",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "lt",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c"
+ },
+ {
+ "name": "lte",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "\u003c="
+ },
+ {
+ "name": "max",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "min",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "minus",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "function"
+ },
+ "infix": "-"
+ },
+ {
+ "name": "mul",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "*"
+ },
+ {
+ "name": "neq",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ },
+ "infix": "!="
+ },
+ {
+ "name": "net.cidr_contains",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_contains_matches",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "static": [
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_expand",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_intersects",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_merge",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "of": [
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.cidr_overlap",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "net.lookup_ip_addr",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "numbers.range",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "numbers.range_step",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.filter",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.get",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "any"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.keys",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.remove",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.subset",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "object.union_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "opa.runtime",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "or",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ },
+ "infix": "|"
+ },
+ {
+ "name": "plus",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "+"
+ },
+ {
+ "name": "print",
+ "decl": {
+ "type": "function",
+ "variadic": {
+ "type": "any"
+ }
+ }
+ },
+ {
+ "name": "product",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "providers.aws.sign_req",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "any"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rand.intn",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "re_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_all_string_submatch_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.find_n",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.globs_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "regex.template_match",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.chain",
+ "decl": {
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.metadata.rule",
+ "decl": {
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rego.parse_module",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "rem",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "infix": "%"
+ },
+ {
+ "name": "replace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "round",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.compare",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "semver.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "set_diff",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sort",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "split",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sprintf",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "startswith",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_prefix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.any_suffix_match",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.count",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.render_template",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.replace_n",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "strings.reverse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "substring",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "sum",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "dynamic": {
+ "type": "number"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "number"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.add_date",
+ "decl": {
+ "args": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.clock",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.date",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.diff",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ },
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "number"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.format",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.now_ns",
+ "decl": {
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "time.parse_duration_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.parse_rfc3339_ns",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "time.weekday",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "number"
+ },
+ {
+ "static": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "array"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "to_number",
+ "decl": {
+ "args": [
+ {
+ "of": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trace",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_left",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_prefix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_right",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_space",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "trim_suffix",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "type_name",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "union",
+ "decl": {
+ "args": [
+ {
+ "of": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "set"
+ }
+ ],
+ "result": {
+ "of": {
+ "type": "any"
+ },
+ "type": "set"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "units.parse_bytes",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "number"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "upper",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.decode_object",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "urlquery.encode_object",
+ "decl": {
+ "args": [
+ {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "of": [
+ {
+ "type": "string"
+ },
+ {
+ "dynamic": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "of": {
+ "type": "string"
+ },
+ "type": "set"
+ }
+ ],
+ "type": "any"
+ }
+ },
+ "type": "object"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.parse",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "dynamic": {
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "any"
+ }
+ },
+ "type": "object"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "uuid.rfc4122",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ },
+ "nondeterministic": true
+ },
+ {
+ "name": "walk",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "static": [
+ {
+ "dynamic": {
+ "type": "any"
+ },
+ "type": "array"
+ },
+ {
+ "type": "any"
+ }
+ ],
+ "type": "array"
+ },
+ "type": "function"
+ },
+ "relation": true
+ },
+ {
+ "name": "yaml.is_valid",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "boolean"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.marshal",
+ "decl": {
+ "args": [
+ {
+ "type": "any"
+ }
+ ],
+ "result": {
+ "type": "string"
+ },
+ "type": "function"
+ }
+ },
+ {
+ "name": "yaml.unmarshal",
+ "decl": {
+ "args": [
+ {
+ "type": "string"
+ }
+ ],
+ "result": {
+ "type": "any"
+ },
+ "type": "function"
+ }
+ }
+ ],
+ "wasm_abi_versions": [
+ {
+ "version": 1,
+ "minor_version": 1
+ },
+ {
+ "version": 1,
+ "minor_version": 2
+ }
+ ],
+ "features": [
+ "keywords_in_refs",
+ "rego_v1"
+ ]
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go b/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go
index 064649733a..98093b774e 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go
@@ -6,15 +6,16 @@ package bundle
import (
"context"
+ "errors"
"fmt"
"io"
"os"
"path/filepath"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/bundle"
- "github.com/open-policy-agent/opa/resolver/wasm"
- "github.com/open-policy-agent/opa/storage"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/bundle"
+ "github.com/open-policy-agent/opa/v1/resolver/wasm"
+ "github.com/open-policy-agent/opa/v1/storage"
)
// LoadWasmResolversFromStore will lookup all Wasm modules from the store along with the
@@ -71,7 +72,7 @@ func LoadWasmResolversFromStore(ctx context.Context, store storage.Store, txn st
var resolvers []*wasm.Resolver
if len(resolversToLoad) > 0 {
// Get a full snapshot of the current data (including any from "outside" the bundles)
- data, err := store.Read(ctx, txn, storage.Path{})
+ data, err := store.Read(ctx, txn, storage.RootPath)
if err != nil {
return nil, fmt.Errorf("failed to initialize wasm runtime: %s", err)
}
@@ -97,7 +98,7 @@ func LoadBundleFromDiskForRegoVersion(regoVersion ast.RegoVersion, path, name st
_, err := os.Stat(bundlePath)
if err == nil {
- f, err := os.Open(filepath.Join(bundlePath))
+ f, err := os.Open(bundlePath)
if err != nil {
return nil, err
}
@@ -132,7 +133,7 @@ func SaveBundleToDisk(path string, raw io.Reader) (string, error) {
}
if raw == nil {
- return "", fmt.Errorf("no raw bundle bytes to persist to disk")
+ return "", errors.New("no raw bundle bytes to persist to disk")
}
dest, err := os.CreateTemp(path, ".bundle.tar.gz.*.tmp")
diff --git a/vendor/github.com/open-policy-agent/opa/internal/cidr/merge/merge.go b/vendor/github.com/open-policy-agent/opa/internal/cidr/merge/merge.go
index a019cde128..c2392b6775 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/cidr/merge/merge.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/cidr/merge/merge.go
@@ -114,7 +114,7 @@ func GetAddressRange(ipNet net.IPNet) (net.IP, net.IP) {
copy(lastIPMask, ipNet.Mask)
for i := range lastIPMask {
lastIPMask[len(lastIPMask)-i-1] = ^lastIPMask[len(lastIPMask)-i-1]
- lastIP[net.IPv6len-i-1] = lastIP[net.IPv6len-i-1] | lastIPMask[len(lastIPMask)-i-1]
+ lastIP[net.IPv6len-i-1] |= lastIPMask[len(lastIPMask)-i-1]
}
return firstIP, lastIP
diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go b/vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go
index 4d80aeeef9..5d2e778b13 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go
@@ -5,9 +5,12 @@
package compiler
import (
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/schemas"
- "github.com/open-policy-agent/opa/util"
+ "errors"
+ "sync"
+
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/schemas"
+ "github.com/open-policy-agent/opa/v1/util"
)
type SchemaFile string
@@ -16,12 +19,35 @@ const (
AuthorizationPolicySchema SchemaFile = "authorizationPolicy.json"
)
-var schemaDefinitions = map[SchemaFile]interface{}{}
+var schemaDefinitions = map[SchemaFile]any{}
+
+var loadOnce = sync.OnceValue(func() error {
+ cont, err := schemas.FS.ReadFile(string(AuthorizationPolicySchema))
+ if err != nil {
+ return err
+ }
+
+ if len(cont) == 0 {
+ return errors.New("expected authorization policy schema file to be present")
+ }
+
+ var schema any
+ if err := util.Unmarshal(cont, &schema); err != nil {
+ return err
+ }
+
+ schemaDefinitions[AuthorizationPolicySchema] = schema
+
+ return nil
+})
// VerifyAuthorizationPolicySchema performs type checking on rules against the schema for the Authorization Policy
// Input document.
// NOTE: The provided compiler should have already run the compilation process on the input modules
func VerifyAuthorizationPolicySchema(compiler *ast.Compiler, ref ast.Ref) error {
+ if err := loadOnce(); err != nil {
+ panic(err)
+ }
rules := getRulesWithDependencies(compiler, ref)
@@ -32,7 +58,10 @@ func VerifyAuthorizationPolicySchema(compiler *ast.Compiler, ref ast.Ref) error
schemaSet := ast.NewSchemaSet()
schemaSet.Put(ast.SchemaRootRef, schemaDefinitions[AuthorizationPolicySchema])
- errs := ast.NewCompiler().WithSchemas(schemaSet).PassesTypeCheckRules(rules)
+ errs := ast.NewCompiler().
+ WithDefaultRegoVersion(compiler.DefaultRegoVersion()).
+ WithSchemas(schemaSet).
+ PassesTypeCheckRules(rules)
if len(errs) > 0 {
return errs
@@ -64,26 +93,3 @@ func transitiveDependencies(compiler *ast.Compiler, rule *ast.Rule, deps map[*as
transitiveDependencies(compiler, other, deps)
}
}
-
-func loadAuthorizationPolicySchema() {
-
- cont, err := schemas.FS.ReadFile(string(AuthorizationPolicySchema))
- if err != nil {
- panic(err)
- }
-
- if len(cont) == 0 {
- panic("expected authorization policy schema file to be present")
- }
-
- var schema interface{}
- if err := util.Unmarshal(cont, &schema); err != nil {
- panic(err)
- }
-
- schemaDefinitions[AuthorizationPolicySchema] = schema
-}
-
-func init() {
- loadAuthorizationPolicySchema()
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go
index 9a5cebec54..25cbc13b47 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go
@@ -12,7 +12,6 @@ import (
"fmt"
"io"
- "github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/internal/compiler/wasm/opa"
"github.com/open-policy-agent/opa/internal/debug"
"github.com/open-policy-agent/opa/internal/wasm/encoding"
@@ -20,8 +19,9 @@ import (
"github.com/open-policy-agent/opa/internal/wasm/module"
"github.com/open-policy-agent/opa/internal/wasm/types"
"github.com/open-policy-agent/opa/internal/wasm/util"
- "github.com/open-policy-agent/opa/ir"
- opatypes "github.com/open-policy-agent/opa/types"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/ir"
+ opatypes "github.com/open-policy-agent/opa/v1/types"
)
// Record Wasm ABI version in exported global variable
@@ -340,7 +340,7 @@ func (c *Compiler) initModule() error {
// two times. But let's deal with that when it happens.
if _, ok := c.funcs[name]; ok { // already seen
c.debug.Printf("function name duplicate: %s (%d)", name, fn.Index)
- name = name + ".1"
+ name += ".1"
}
c.funcs[name] = fn.Index
}
@@ -348,7 +348,7 @@ func (c *Compiler) initModule() error {
for _, fn := range c.policy.Funcs.Funcs {
params := make([]types.ValueType, len(fn.Params))
- for i := 0; i < len(params); i++ {
+ for i := range params {
params[i] = types.I32
}
@@ -827,7 +827,7 @@ func (c *Compiler) compileFunc(fn *ir.Func) error {
memoize := len(fn.Params) == 2
if len(fn.Params) == 0 {
- return fmt.Errorf("illegal function: zero args")
+ return errors.New("illegal function: zero args")
}
c.nextLocal = 0
@@ -996,12 +996,16 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err
for _, stmt := range block.Stmts {
switch stmt := stmt.(type) {
case *ir.ResultSetAddStmt:
- instrs = append(instrs, instruction.GetLocal{Index: c.lrs})
- instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Value)})
- instrs = append(instrs, instruction.Call{Index: c.function(opaSetAdd)})
+ instrs = append(instrs,
+ instruction.GetLocal{Index: c.lrs},
+ instruction.GetLocal{Index: c.local(stmt.Value)},
+ instruction.Call{Index: c.function(opaSetAdd)},
+ )
case *ir.ReturnLocalStmt:
- instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Source)})
- instrs = append(instrs, instruction.Return{})
+ instrs = append(instrs,
+ instruction.GetLocal{Index: c.local(stmt.Source)},
+ instruction.Return{},
+ )
case *ir.BlockStmt:
for i := range stmt.Blocks {
block, err := c.compileBlock(stmt.Blocks[i])
@@ -1029,8 +1033,10 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err
return instrs, err
}
case *ir.AssignVarStmt:
- instrs = append(instrs, c.instrRead(stmt.Source))
- instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)})
+ instrs = append(instrs,
+ c.instrRead(stmt.Source),
+ instruction.SetLocal{Index: c.local(stmt.Target)},
+ )
case *ir.AssignVarOnceStmt:
instrs = append(instrs, instruction.Block{
Instrs: []instruction.Instruction{
@@ -1360,7 +1366,7 @@ func (c *Compiler) compileUpsert(local ir.Local, path []int, value ir.Operand, _
// Initialize the locals that specify the path of the upsert operation.
lpath := make(map[int]uint32, len(path))
- for i := 0; i < len(path); i++ {
+ for i := range path {
lpath[i] = c.genLocal()
instrs = append(instrs, instruction.I32Const{Value: c.opaStringAddr(path[i])})
instrs = append(instrs, instruction.SetLocal{Index: lpath[i]})
@@ -1369,10 +1375,10 @@ func (c *Compiler) compileUpsert(local ir.Local, path []int, value ir.Operand, _
// Generate a block that traverses the path of the upsert operation,
// shallowing copying values at each step as needed. Stop before the final
// segment that will only be inserted.
- var inner []instruction.Instruction
+ inner := make([]instruction.Instruction, 0, len(path)*21+1)
ltemp := c.genLocal()
- for i := 0; i < len(path)-1; i++ {
+ for i := range len(path) - 1 {
// Lookup the next part of the path.
inner = append(inner, instruction.GetLocal{Index: lcopy})
@@ -1408,10 +1414,10 @@ func (c *Compiler) compileUpsert(local ir.Local, path []int, value ir.Operand, _
inner = append(inner, instruction.Br{Index: uint32(len(path) - 1)})
// Generate blocks that handle missing nodes during traversal.
- var block []instruction.Instruction
+ block := make([]instruction.Instruction, 0, len(path)*10)
lval := c.genLocal()
- for i := 0; i < len(path)-1; i++ {
+ for i := range len(path) - 1 {
block = append(block, instruction.Block{Instrs: inner})
block = append(block, instruction.Call{Index: c.function(opaObject)})
block = append(block, instruction.SetLocal{Index: lval})
@@ -1535,8 +1541,7 @@ func (c *Compiler) compileExternalCall(stmt *ir.CallStmt, ef externalFunc, resul
}
instrs := *result
- instrs = append(instrs, instruction.I32Const{Value: ef.ID})
- instrs = append(instrs, instruction.I32Const{Value: 0}) // unused context parameter
+ instrs = append(instrs, instruction.I32Const{Value: ef.ID}, instruction.I32Const{Value: 0}) // unused context parameter
for _, arg := range stmt.Args {
instrs = append(instrs, c.instrRead(arg))
@@ -1545,9 +1550,11 @@ func (c *Compiler) compileExternalCall(stmt *ir.CallStmt, ef externalFunc, resul
instrs = append(instrs, instruction.Call{Index: c.function(builtinDispatchers[len(stmt.Args)])})
if ef.Decl.Result() != nil {
- instrs = append(instrs, instruction.TeeLocal{Index: c.local(stmt.Result)})
- instrs = append(instrs, instruction.I32Eqz{})
- instrs = append(instrs, instruction.BrIf{Index: 0})
+ instrs = append(instrs,
+ instruction.TeeLocal{Index: c.local(stmt.Result)},
+ instruction.I32Eqz{},
+ instruction.BrIf{Index: 0},
+ )
} else {
instrs = append(instrs, instruction.Drop{})
}
@@ -1678,7 +1685,7 @@ func (c *Compiler) genLocal() uint32 {
func (c *Compiler) function(name string) uint32 {
fidx, ok := c.funcs[name]
if !ok {
- panic(fmt.Sprintf("function not found: %s", name))
+ panic("function not found: " + name)
}
return fidx
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/config/config.go b/vendor/github.com/open-policy-agent/opa/internal/config/config.go
index b1a9731f65..53dfc6d6cb 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/config/config.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/config/config.go
@@ -15,11 +15,11 @@ import (
"sigs.k8s.io/yaml"
"github.com/open-policy-agent/opa/internal/strvals"
- "github.com/open-policy-agent/opa/keys"
- "github.com/open-policy-agent/opa/logging"
- "github.com/open-policy-agent/opa/plugins/rest"
- "github.com/open-policy-agent/opa/tracing"
- "github.com/open-policy-agent/opa/util"
+ "github.com/open-policy-agent/opa/v1/keys"
+ "github.com/open-policy-agent/opa/v1/logging"
+ "github.com/open-policy-agent/opa/v1/plugins/rest"
+ "github.com/open-policy-agent/opa/v1/tracing"
+ "github.com/open-policy-agent/opa/v1/util"
)
// ServiceOptions stores the options passed to ParseServicesConfig
@@ -70,7 +70,7 @@ func ParseServicesConfig(opts ServiceOptions) (map[string]rest.Client, error) {
// read from disk (if specified) and overrides will be applied. If no config file is
// specified, the overrides can still be applied to an empty config.
func Load(configFile string, overrides []string, overrideFiles []string) ([]byte, error) {
- baseConf := map[string]interface{}{}
+ baseConf := map[string]any{}
// User specified config file
if configFile != "" {
@@ -88,7 +88,7 @@ func Load(configFile string, overrides []string, overrideFiles []string) ([]byte
}
}
- overrideConf := map[string]interface{}{}
+ overrideConf := map[string]any{}
// User specified a config override via --set
for _, override := range overrides {
@@ -100,7 +100,7 @@ func Load(configFile string, overrides []string, overrideFiles []string) ([]byte
// User specified a config override value via --set-file
for _, override := range overrideFiles {
- reader := func(rs []rune) (interface{}, error) {
+ reader := func(rs []rune) (any, error) {
bytes, err := os.ReadFile(string(rs))
value := strings.TrimSpace(string(bytes))
return value, err
@@ -120,8 +120,12 @@ func Load(configFile string, overrides []string, overrideFiles []string) ([]byte
// regex looking for ${...} notation strings
var envRegex = regexp.MustCompile(`(?U:\${.*})`)
-// subEnvVars will look for any environment variables in the passed in string
+// SubEnvVars will look for any environment variables in the passed in string
// with the syntax of ${VAR_NAME} and replace that string with ENV[VAR_NAME]
+func SubEnvVars(s string) string {
+ return subEnvVars(s)
+}
+
func subEnvVars(s string) string {
updatedConfig := envRegex.ReplaceAllStringFunc(s, func(s string) string {
// Trim off the '${' and '}'
@@ -131,31 +135,35 @@ func subEnvVars(s string) string {
}
varName := s[2 : len(s)-1]
- // Lookup the variable in the environment. We play by
- // bash rules.. if its undefined we'll treat it as an
- // empty string instead of raising an error.
- return os.Getenv(varName)
+ // Lookup the variable in the environment. We do not
+ // play by bash rules: if its undefined we'll keep it
+ // as-is, it could be replaced somewhere down the line.
+ // If it's set to "", we'll return that.
+ if lu, ok := os.LookupEnv(varName); ok {
+ return lu
+ }
+ return s
})
return updatedConfig
}
// mergeValues will merge source and destination map, preferring values from the source map
-func mergeValues(dest map[string]interface{}, src map[string]interface{}) map[string]interface{} {
+func mergeValues(dest map[string]any, src map[string]any) map[string]any {
for k, v := range src {
// If the key doesn't exist already, then just set the key to that value
if _, exists := dest[k]; !exists {
dest[k] = v
continue
}
- nextMap, ok := v.(map[string]interface{})
+ nextMap, ok := v.(map[string]any)
// If it isn't another map, overwrite the value
if !ok {
dest[k] = v
continue
}
// Edge case: If the key exists in the destination, but isn't a map
- destMap, isMap := dest[k].(map[string]interface{})
+ destMap, isMap := dest[k].(map[string]any)
// If the source map has a map for this key, prefer it
if !isMap {
dest[k] = v
diff --git a/vendor/github.com/open-policy-agent/opa/internal/debug/debug.go b/vendor/github.com/open-policy-agent/opa/internal/debug/debug.go
index 7b90bd1bb0..9448aeb288 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/debug/debug.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/debug/debug.go
@@ -8,7 +8,7 @@ import (
// Debug allows printing debug messages.
type Debug interface {
// Printf prints, with a short file:line-number prefix
- Printf(format string, args ...interface{})
+ Printf(format string, args ...any)
// Writer returns the writer being written to, which may be
// `io.Discard` if no debug output is requested.
Writer() io.Writer
diff --git a/vendor/github.com/open-policy-agent/opa/internal/deepcopy/deepcopy.go b/vendor/github.com/open-policy-agent/opa/internal/deepcopy/deepcopy.go
index 00e8df6f88..dc3a231bc1 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/deepcopy/deepcopy.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/deepcopy/deepcopy.go
@@ -5,25 +5,25 @@
package deepcopy
// DeepCopy performs a recursive deep copy for nested slices/maps and
-// returns the copied object. Supports []interface{}
-// and map[string]interface{} only
-func DeepCopy(val interface{}) interface{} {
+// returns the copied object. Supports []any
+// and map[string]any only
+func DeepCopy(val any) any {
switch val := val.(type) {
- case []interface{}:
- cpy := make([]interface{}, len(val))
+ case []any:
+ cpy := make([]any, len(val))
for i := range cpy {
cpy[i] = DeepCopy(val[i])
}
return cpy
- case map[string]interface{}:
+ case map[string]any:
return Map(val)
default:
return val
}
}
-func Map(val map[string]interface{}) map[string]interface{} {
- cpy := make(map[string]interface{}, len(val))
+func Map(val map[string]any) map[string]any {
+ cpy := make(map[string]any, len(val))
for k := range val {
cpy[k] = DeepCopy(val[k])
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/bitvector.go b/vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/bitvector.go
index 89e7e137b7..bfacf3bcea 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/bitvector.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/bitvector.go
@@ -36,7 +36,7 @@ func (vector *BitVector) Length() int {
// position of the last byte in the slice.
// This returns the bit that was shifted off of the last byte.
func shiftLower(bit byte, b []byte) byte {
- bit = bit << 7
+ bit <<= 7
for i := len(b) - 1; i >= 0; i-- {
newByte := b[i] >> 1
newByte |= bit
@@ -51,7 +51,7 @@ func shiftLower(bit byte, b []byte) byte {
// position of the first byte in the slice.
// This returns the bit that was shifted off the last byte.
func shiftHigher(bit byte, b []byte) byte {
- for i := 0; i < len(b); i++ {
+ for i := range b {
newByte := b[i] << 1
newByte |= bit
bit = (b[i] & 0x80) >> 7
diff --git a/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go b/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go
index 9cfaee8baf..ebfa875d75 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go
@@ -146,14 +146,13 @@
package edittree
import (
- "encoding/json"
+ "errors"
"fmt"
- "math/big"
"sort"
"strings"
- "github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/internal/edittree/bitvector"
+ "github.com/open-policy-agent/opa/v1/ast"
)
// Deletions are encoded with a nil value pointer.
@@ -203,89 +202,13 @@ func NewEditTree(term *ast.Term) *EditTree {
// it was found in the table already.
func (e *EditTree) getKeyHash(key *ast.Term) (int, bool) {
hash := key.Hash()
- // This `equal` utility is duplicated and manually inlined a number of
- // time in this file. Inlining it avoids heap allocations, so it makes
- // a big performance difference: some operations like lookup become twice
- // as slow without it.
- var equal func(v ast.Value) bool
-
- switch x := key.Value.(type) {
- case ast.Null, ast.Boolean, ast.String, ast.Var:
- equal = func(y ast.Value) bool { return x == y }
- case ast.Number:
- if xi, err := json.Number(x).Int64(); err == nil {
- equal = func(y ast.Value) bool {
- if y, ok := y.(ast.Number); ok {
- if yi, err := json.Number(y).Int64(); err == nil {
- return xi == yi
- }
- }
-
- return false
- }
- break
- }
-
- // We use big.Rat for comparing big numbers.
- // It replaces big.Float due to following reason:
- // big.Float comes with a default precision of 64, and setting a
- // larger precision results in more memory being allocated
- // (regardless of the actual number we are parsing with SetString).
- //
- // Note: If we're so close to zero that big.Float says we are zero, do
- // *not* big.Rat).SetString on the original string it'll potentially
- // take very long.
- var a *big.Rat
- fa, ok := new(big.Float).SetString(string(x))
- if !ok {
- panic("illegal value")
- }
- if fa.IsInt() {
- if i, _ := fa.Int64(); i == 0 {
- a = new(big.Rat).SetInt64(0)
- }
- }
- if a == nil {
- a, ok = new(big.Rat).SetString(string(x))
- if !ok {
- panic("illegal value")
- }
- }
-
- equal = func(b ast.Value) bool {
- if bNum, ok := b.(ast.Number); ok {
- var b *big.Rat
- fb, ok := new(big.Float).SetString(string(bNum))
- if !ok {
- panic("illegal value")
- }
- if fb.IsInt() {
- if i, _ := fb.Int64(); i == 0 {
- b = new(big.Rat).SetInt64(0)
- }
- }
- if b == nil {
- b, ok = new(big.Rat).SetString(string(bNum))
- if !ok {
- panic("illegal value")
- }
- }
-
- return a.Cmp(b) == 0
- }
- return false
- }
-
- default:
- equal = func(y ast.Value) bool { return ast.Compare(x, y) == 0 }
- }
// Look through childKeys, looking up the original hash
// value first, and then use linear-probing to iter
// through the keys until we either find the Term we're
// after, or run out of candidates.
for curr, ok := e.childKeys[hash]; ok; {
- if equal(curr.Value) {
+ if ast.KeyHashEqual(curr.Value, key.Value) {
return hash, true
}
@@ -336,13 +259,13 @@ func (e *EditTree) deleteChildValue(hash int) {
// Insert creates a new child of e, and returns the new child EditTree node.
func (e *EditTree) Insert(key, value *ast.Term) (*EditTree, error) {
if e.value == nil {
- return nil, fmt.Errorf("deleted node encountered during insert operation")
+ return nil, errors.New("deleted node encountered during insert operation")
}
if key == nil {
- return nil, fmt.Errorf("nil key provided for insert operation")
+ return nil, errors.New("nil key provided for insert operation")
}
if value == nil {
- return nil, fmt.Errorf("nil value provided for insert operation")
+ return nil, errors.New("nil value provided for insert operation")
}
switch x := e.value.Value.(type) {
@@ -368,7 +291,7 @@ func (e *EditTree) Insert(key, value *ast.Term) (*EditTree, error) {
return nil, err
}
if idx < 0 || idx > e.insertions.Length() {
- return nil, fmt.Errorf("index for array insertion out of bounds")
+ return nil, errors.New("index for array insertion out of bounds")
}
return e.unsafeInsertArray(idx, value), nil
default:
@@ -458,10 +381,10 @@ func (e *EditTree) unsafeInsertArray(idx int, value *ast.Term) *EditTree {
// already present in e. It then returns the deleted child EditTree node.
func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) {
if e.value == nil {
- return nil, fmt.Errorf("deleted node encountered during delete operation")
+ return nil, errors.New("deleted node encountered during delete operation")
}
if key == nil {
- return nil, fmt.Errorf("nil key provided for delete operation")
+ return nil, errors.New("nil key provided for delete operation")
}
switch e.value.Value.(type) {
@@ -532,7 +455,7 @@ func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) {
return nil, err
}
if idx < 0 || idx > e.insertions.Length()-1 {
- return nil, fmt.Errorf("index for array delete out of bounds")
+ return nil, errors.New("index for array delete out of bounds")
}
// Collect insertion indexes above the delete site for rewriting.
@@ -553,14 +476,14 @@ func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) {
}
// Do rewrites to clear out the newly-removed element.
e.deleteChildValue(idx)
- for i := 0; i < len(rewritesScalars); i++ {
+ for i := range rewritesScalars {
originalIdx := rewritesScalars[i]
rewriteIdx := rewritesScalars[i] - 1
v := e.childScalarValues[originalIdx]
e.deleteChildValue(originalIdx)
e.setChildScalarValue(rewriteIdx, v)
}
- for i := 0; i < len(rewritesComposites); i++ {
+ for i := range rewritesComposites {
originalIdx := rewritesComposites[i]
rewriteIdx := rewritesComposites[i] - 1
v := e.childCompositeValues[originalIdx]
@@ -592,7 +515,7 @@ func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) {
//gcassert:inline
func sumZeroesBelowIndex(index int, bv *bitvector.BitVector) int {
zeroesSeen := 0
- for i := 0; i < index; i++ {
+ for i := range index {
if bv.Element(i) == 0 {
zeroesSeen++
}
@@ -602,7 +525,7 @@ func sumZeroesBelowIndex(index int, bv *bitvector.BitVector) int {
func findIndexOfNthZero(n int, bv *bitvector.BitVector) (int, bool) {
zeroesSeen := 0
- for i := 0; i < bv.Length(); i++ {
+ for i := range bv.Length() {
if bv.Element(i) == 0 {
zeroesSeen++
}
@@ -638,7 +561,7 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) {
}
// 1+ path segment case.
if e.value == nil {
- return nil, fmt.Errorf("nil value encountered where composite value was expected")
+ return nil, errors.New("nil value encountered where composite value was expected")
}
// Switch behavior based on types.
@@ -723,15 +646,17 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) {
return child.Unfold(path[1:])
}
+ idxt := ast.InternedTerm(idx)
+
// Fall back to looking up the key in e.value.
// Extend the tree if key is present. Error otherwise.
- if v, err := x.Find(ast.Ref{ast.IntNumberTerm(idx)}); err == nil {
+ if v, err := x.Find(ast.Ref{idxt}); err == nil {
// TODO: Consider a more efficient "Replace" function that special-cases this for arrays instead?
- _, err := e.Delete(ast.IntNumberTerm(idx))
+ _, err := e.Delete(idxt)
if err != nil {
return nil, err
}
- child, err := e.Insert(ast.IntNumberTerm(idx), ast.NewTerm(v))
+ child, err := e.Insert(idxt, ast.NewTerm(v))
if err != nil {
return nil, err
}
@@ -832,7 +757,7 @@ func (e *EditTree) Render() *ast.Term {
// original array. We build a new Array with modified/deleted keys.
out := make([]*ast.Term, 0, e.insertions.Length())
eIdx := 0
- for i := 0; i < e.insertions.Length(); i++ {
+ for i := range e.insertions.Length() {
// If the index == 0, that indicates we should look up the next
// surviving original element.
// If the index == 1, that indicates we should look up that
@@ -880,7 +805,7 @@ func (e *EditTree) Render() *ast.Term {
// Returns the inserted EditTree node.
func (e *EditTree) InsertAtPath(path ast.Ref, value *ast.Term) (*EditTree, error) {
if value == nil {
- return nil, fmt.Errorf("cannot insert nil value into EditTree")
+ return nil, errors.New("cannot insert nil value into EditTree")
}
if len(path) == 0 {
@@ -911,7 +836,7 @@ func (e *EditTree) DeleteAtPath(path ast.Ref) (*EditTree, error) {
// Root document case:
if len(path) == 0 {
if e.value == nil {
- return nil, fmt.Errorf("deleted node encountered during delete operation")
+ return nil, errors.New("deleted node encountered during delete operation")
}
e.value = nil
e.childKeys = nil
@@ -1026,8 +951,7 @@ func (e *EditTree) Exists(path ast.Ref) bool {
}
// Fallback if child lookup failed.
// We have to ensure that the lookup term is a number here, or Find will fail.
- k := ast.Ref{ast.IntNumberTerm(idx)}.Concat(path[1:])
- _, err = x.Find(k)
+ _, err = x.Find(ast.Ref{ast.InternedTerm(idx)}.Concat(path[1:]))
return err == nil
default:
// Catch all primitive types.
@@ -1048,7 +972,7 @@ func toIndex(arrayLength int, term *ast.Term) (int, error) {
switch v := term.Value.(type) {
case ast.Number:
if i, ok = v.Int(); !ok {
- return 0, fmt.Errorf("invalid number type for indexing")
+ return 0, errors.New("invalid number type for indexing")
}
case ast.String:
if v == "-" {
@@ -1056,13 +980,13 @@ func toIndex(arrayLength int, term *ast.Term) (int, error) {
}
num := ast.Number(v)
if i, ok = num.Int(); !ok {
- return 0, fmt.Errorf("invalid string for indexing")
+ return 0, errors.New("invalid string for indexing")
}
if v != "0" && strings.HasPrefix(string(v), "0") {
- return 0, fmt.Errorf("leading zeros are not allowed in JSON paths")
+ return 0, errors.New("leading zeros are not allowed in JSON paths")
}
default:
- return 0, fmt.Errorf("invalid type for indexing")
+ return 0, errors.New("invalid type for indexing")
}
return i, nil
@@ -1181,5 +1105,5 @@ func (e *EditTree) Filter(paths []ast.Ref) *ast.Term {
type termSlice []*ast.Term
func (s termSlice) Less(i, j int) bool { return ast.Compare(s[i].Value, s[j].Value) < 0 }
-func (s termSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x }
+func (s termSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s termSlice) Len() int { return len(s) }
diff --git a/vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go b/vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go
index cf5721101a..27ca5559f1 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go
@@ -4,7 +4,7 @@
package future
-import "github.com/open-policy-agent/opa/ast"
+import "github.com/open-policy-agent/opa/v1/ast"
// FilterFutureImports filters OUT any future imports from the passed slice of
// `*ast.Import`s.
@@ -24,7 +24,7 @@ func IsAllFutureKeywords(imp *ast.Import) bool {
path := imp.Path.Value.(ast.Ref)
return len(path) == 2 &&
ast.FutureRootDocument.Equal(path[0]) &&
- path[1].Equal(ast.StringTerm("keywords"))
+ path[1].Equal(ast.InternedTerm("keywords"))
}
// IsFutureKeyword returns true if the passed *ast.Import is `future.keywords.{kw}`
@@ -32,7 +32,7 @@ func IsFutureKeyword(imp *ast.Import, kw string) bool {
path := imp.Path.Value.(ast.Ref)
return len(path) == 3 &&
ast.FutureRootDocument.Equal(path[0]) &&
- path[1].Equal(ast.StringTerm("keywords")) &&
+ path[1].Equal(ast.InternedTerm("keywords")) &&
path[2].Equal(ast.StringTerm(kw))
}
@@ -40,7 +40,7 @@ func WhichFutureKeyword(imp *ast.Import) (string, bool) {
path := imp.Path.Value.(ast.Ref)
if len(path) == 3 &&
ast.FutureRootDocument.Equal(path[0]) &&
- path[1].Equal(ast.StringTerm("keywords")) {
+ path[1].Equal(ast.InternedTerm("keywords")) {
if str, ok := path[2].Value.(ast.String); ok {
return string(str), true
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go b/vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go
index 804702b945..eaeb87e296 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go
@@ -5,9 +5,10 @@
package future
import (
+ "errors"
"fmt"
- "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/v1/ast"
)
// ParserOptionsFromFutureImports transforms a slice of `ast.Import`s into the
@@ -33,7 +34,7 @@ func ParserOptionsFromFutureImports(imports []*ast.Import) (ast.ParserOptions, e
}
if len(path) == 3 {
if imp.Alias != "" {
- return popts, fmt.Errorf("alias not supported")
+ return popts, errors.New("alias not supported")
}
popts.FutureKeywords = append(popts.FutureKeywords, string(path[2].Value.(ast.String)))
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/draft.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/draft.go
index dac1aafdac..656804acb7 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/draft.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/draft.go
@@ -86,12 +86,12 @@ func (dc draftConfigs) GetSchemaURL(draft Draft) string {
return ""
}
-func parseSchemaURL(documentNode interface{}) (string, *Draft, error) {
+func parseSchemaURL(documentNode any) (string, *Draft, error) {
if _, ok := documentNode.(bool); ok {
return "", nil, nil
}
- m, ok := documentNode.(map[string]interface{})
+ m, ok := documentNode.(map[string]any)
if !ok {
return "", nil, errors.New("schema is invalid")
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/errors.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/errors.go
index f7aaf90306..a937d9b3b9 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/errors.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/errors.go
@@ -212,7 +212,7 @@ type (
)
// newError takes a ResultError type and sets the type, context, description, details, value, and field
-func newError(err ResultError, context *JSONContext, value interface{}, locale locale, details ErrorDetails) {
+func newError(err ResultError, context *JSONContext, value any, locale locale, details ErrorDetails) {
var t string
var d string
switch err.(type) {
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/format_checkers.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/format_checkers.go
index 1e770464e8..c078e9862f 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/format_checkers.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/format_checkers.go
@@ -14,7 +14,7 @@ type (
// FormatChecker is the interface all formatters added to FormatCheckerChain must implement
FormatChecker interface {
// IsFormat checks if input has the correct format
- IsFormat(input interface{}) bool
+ IsFormat(input any) bool
}
// FormatCheckerChain holds the formatters
@@ -174,7 +174,7 @@ func (c *FormatCheckerChain) Has(name string) bool {
// IsFormat will check an input against a FormatChecker with the given name
// to see if it is the correct format
-func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool {
+func (c *FormatCheckerChain) IsFormat(name string, input any) bool {
lock.RLock()
f, ok := c.formatters[name]
lock.RUnlock()
@@ -188,7 +188,7 @@ func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted e-mail address
-func (f EmailFormatChecker) IsFormat(input interface{}) bool {
+func (f EmailFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -199,7 +199,7 @@ func (f EmailFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted IPv4-address
-func (f IPV4FormatChecker) IsFormat(input interface{}) bool {
+func (f IPV4FormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -211,7 +211,7 @@ func (f IPV4FormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted IPv6=address
-func (f IPV6FormatChecker) IsFormat(input interface{}) bool {
+func (f IPV6FormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -223,7 +223,7 @@ func (f IPV6FormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted date/time per RFC3339 5.6
-func (f DateTimeFormatChecker) IsFormat(input interface{}) bool {
+func (f DateTimeFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -247,7 +247,7 @@ func (f DateTimeFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted date (YYYY-MM-DD)
-func (f DateFormatChecker) IsFormat(input interface{}) bool {
+func (f DateFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -257,7 +257,7 @@ func (f DateFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input correctly formatted time (HH:MM:SS or HH:MM:SSZ-07:00)
-func (f TimeFormatChecker) IsFormat(input interface{}) bool {
+func (f TimeFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -272,7 +272,7 @@ func (f TimeFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is correctly formatted URI with a valid Scheme per RFC3986
-func (f URIFormatChecker) IsFormat(input interface{}) bool {
+func (f URIFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -288,7 +288,7 @@ func (f URIFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted URI or relative-reference per RFC3986
-func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool {
+func (f URIReferenceFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -299,7 +299,7 @@ func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted URI template per RFC6570
-func (f URITemplateFormatChecker) IsFormat(input interface{}) bool {
+func (f URITemplateFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -314,7 +314,7 @@ func (f URITemplateFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted hostname
-func (f HostnameFormatChecker) IsFormat(input interface{}) bool {
+func (f HostnameFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -324,7 +324,7 @@ func (f HostnameFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted UUID
-func (f UUIDFormatChecker) IsFormat(input interface{}) bool {
+func (f UUIDFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -334,7 +334,7 @@ func (f UUIDFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted regular expression
-func (f RegexFormatChecker) IsFormat(input interface{}) bool {
+func (f RegexFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -348,7 +348,7 @@ func (f RegexFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted JSON Pointer per RFC6901
-func (f JSONPointerFormatChecker) IsFormat(input interface{}) bool {
+func (f JSONPointerFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -358,7 +358,7 @@ func (f JSONPointerFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted relative JSON Pointer
-func (f RelativeJSONPointerFormatChecker) IsFormat(input interface{}) bool {
+func (f RelativeJSONPointerFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/internalLog.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/internalLog.go
index 4ef7a8d03e..bab75112eb 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/internalLog.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/internalLog.go
@@ -32,6 +32,6 @@ import (
const internalLogEnabled = false
-func internalLog(format string, v ...interface{}) {
+func internalLog(format string, v ...any) {
log.Printf(format, v...)
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/jsonLoader.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/jsonLoader.go
index 1011552dee..73f25e3b7f 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/jsonLoader.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/jsonLoader.go
@@ -77,8 +77,8 @@ var osFS = osFileSystem(os.Open)
// JSONLoader defines the JSON loader interface
type JSONLoader interface {
- JSONSource() interface{}
- LoadJSON() (interface{}, error)
+ JSONSource() any
+ LoadJSON() (any, error)
JSONReference() (gojsonreference.JsonReference, error)
LoaderFactory() JSONLoaderFactory
}
@@ -130,7 +130,7 @@ type jsonReferenceLoader struct {
source string
}
-func (l *jsonReferenceLoader) JSONSource() interface{} {
+func (l *jsonReferenceLoader) JSONSource() any {
return l.source
}
@@ -160,7 +160,7 @@ func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) JSONLoader
}
}
-func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) {
+func (l *jsonReferenceLoader) LoadJSON() (any, error) {
var err error
@@ -207,7 +207,7 @@ func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) {
return nil, fmt.Errorf("remote reference loading disabled: %s", reference.String())
}
-func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) {
+func (l *jsonReferenceLoader) loadFromHTTP(address string) (any, error) {
resp, err := http.Get(address)
if err != nil {
@@ -227,7 +227,7 @@ func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error)
return decodeJSONUsingNumber(bytes.NewReader(bodyBuff))
}
-func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) {
+func (l *jsonReferenceLoader) loadFromFile(path string) (any, error) {
f, err := l.fs.Open(path)
if err != nil {
return nil, err
@@ -249,7 +249,7 @@ type jsonStringLoader struct {
source string
}
-func (l *jsonStringLoader) JSONSource() interface{} {
+func (l *jsonStringLoader) JSONSource() any {
return l.source
}
@@ -266,7 +266,7 @@ func NewStringLoader(source string) JSONLoader {
return &jsonStringLoader{source: source}
}
-func (l *jsonStringLoader) LoadJSON() (interface{}, error) {
+func (l *jsonStringLoader) LoadJSON() (any, error) {
return decodeJSONUsingNumber(strings.NewReader(l.JSONSource().(string)))
@@ -278,7 +278,7 @@ type jsonBytesLoader struct {
source []byte
}
-func (l *jsonBytesLoader) JSONSource() interface{} {
+func (l *jsonBytesLoader) JSONSource() any {
return l.source
}
@@ -295,18 +295,18 @@ func NewBytesLoader(source []byte) JSONLoader {
return &jsonBytesLoader{source: source}
}
-func (l *jsonBytesLoader) LoadJSON() (interface{}, error) {
+func (l *jsonBytesLoader) LoadJSON() (any, error) {
return decodeJSONUsingNumber(bytes.NewReader(l.JSONSource().([]byte)))
}
// JSON Go (types) loader
-// used to load JSONs from the code as maps, interface{}, structs ...
+// used to load JSONs from the code as maps, any, structs ...
type jsonGoLoader struct {
- source interface{}
+ source any
}
-func (l *jsonGoLoader) JSONSource() interface{} {
+func (l *jsonGoLoader) JSONSource() any {
return l.source
}
@@ -319,11 +319,11 @@ func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory {
}
// NewGoLoader creates a new JSONLoader from a given Go struct
-func NewGoLoader(source interface{}) JSONLoader {
+func NewGoLoader(source any) JSONLoader {
return &jsonGoLoader{source: source}
}
-func (l *jsonGoLoader) LoadJSON() (interface{}, error) {
+func (l *jsonGoLoader) LoadJSON() (any, error) {
// convert it to a compliant JSON first to avoid types "mismatches"
@@ -352,11 +352,11 @@ func NewWriterLoader(source io.Writer) (JSONLoader, io.Writer) {
return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf)
}
-func (l *jsonIOLoader) JSONSource() interface{} {
+func (l *jsonIOLoader) JSONSource() any {
return l.buf.String()
}
-func (l *jsonIOLoader) LoadJSON() (interface{}, error) {
+func (l *jsonIOLoader) LoadJSON() (any, error) {
return decodeJSONUsingNumber(l.buf)
}
@@ -369,21 +369,21 @@ func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory {
}
// JSON raw loader
-// In case the JSON is already marshalled to interface{} use this loader
+// In case the JSON is already marshalled to any use this loader
// This is used for testing as otherwise there is no guarantee the JSON is marshalled
// "properly" by using https://golang.org/pkg/encoding/json/#Decoder.UseNumber
type jsonRawLoader struct {
- source interface{}
+ source any
}
// NewRawLoader creates a new JSON raw loader for the given source
-func NewRawLoader(source interface{}) JSONLoader {
+func NewRawLoader(source any) JSONLoader {
return &jsonRawLoader{source: source}
}
-func (l *jsonRawLoader) JSONSource() interface{} {
+func (l *jsonRawLoader) JSONSource() any {
return l.source
}
-func (l *jsonRawLoader) LoadJSON() (interface{}, error) {
+func (l *jsonRawLoader) LoadJSON() (any, error) {
return l.source, nil
}
func (l *jsonRawLoader) JSONReference() (gojsonreference.JsonReference, error) {
@@ -393,9 +393,9 @@ func (l *jsonRawLoader) LoaderFactory() JSONLoaderFactory {
return &DefaultJSONLoaderFactory{}
}
-func decodeJSONUsingNumber(r io.Reader) (interface{}, error) {
+func decodeJSONUsingNumber(r io.Reader) (any, error) {
- var document interface{}
+ var document any
decoder := json.NewDecoder(r)
decoder.UseNumber()
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/result.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/result.go
index 8baff07179..0329721c20 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/result.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/result.go
@@ -33,7 +33,7 @@ import (
type (
// ErrorDetails is a map of details specific to each error.
// While the values will vary, every error will contain a "field" value
- ErrorDetails map[string]interface{}
+ ErrorDetails map[string]any
// ResultError is the interface that library errors must implement
ResultError interface {
@@ -57,9 +57,9 @@ type (
// DescriptionFormat returns the format for the description in the default text/template format
DescriptionFormat() string
// SetValue sets the value related to the error
- SetValue(interface{})
+ SetValue(any)
// Value returns the value related to the error
- Value() interface{}
+ Value() any
// SetDetails sets the details specific to the error
SetDetails(ErrorDetails)
// Details returns details about the error
@@ -76,7 +76,7 @@ type (
context *JSONContext // Tree like notation of the part that failed the validation. ex (root).a.b ...
description string // A human readable error message
descriptionFormat string // A format for human readable error message
- value interface{} // Value given by the JSON file that is the source of the error
+ value any // Value given by the JSON file that is the source of the error
details ErrorDetails
}
@@ -136,12 +136,12 @@ func (v *ResultErrorFields) DescriptionFormat() string {
}
// SetValue sets the value related to the error
-func (v *ResultErrorFields) SetValue(value interface{}) {
+func (v *ResultErrorFields) SetValue(value any) {
v.value = value
}
// Value returns the value related to the error
-func (v *ResultErrorFields) Value() interface{} {
+func (v *ResultErrorFields) Value() any {
return v.value
}
@@ -203,7 +203,7 @@ func (v *Result) AddError(err ResultError, details ErrorDetails) {
v.errors = append(v.errors, err)
}
-func (v *Result) addInternalError(err ResultError, context *JSONContext, value interface{}, details ErrorDetails) {
+func (v *Result) addInternalError(err ResultError, context *JSONContext, value any, details ErrorDetails) {
newError(err, context, value, Locale, details)
v.errors = append(v.errors, err)
v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schema.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schema.go
index 8e035013c2..e8007ee2b6 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schema.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schema.go
@@ -58,7 +58,7 @@ type Schema struct {
ReferencePool *schemaReferencePool
}
-func (d *Schema) parse(document interface{}, draft Draft) error {
+func (d *Schema) parse(document any, draft Draft) error {
d.RootSchema = &SubSchema{Property: StringRootSchemaProperty, Draft: &draft}
return d.parseSchema(document, d.RootSchema)
}
@@ -73,7 +73,7 @@ func (d *Schema) SetRootSchemaName(name string) {
// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring
// Not much magic involved here, most of the job is to validate the key names and their values,
// then the values are copied into SubSchema struct
-func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema) error {
+func (d *Schema) parseSchema(documentNode any, currentSchema *SubSchema) error {
if currentSchema.Draft == nil {
if currentSchema.Parent == nil {
@@ -90,7 +90,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
}
}
- m, isMap := documentNode.(map[string]interface{})
+ m, isMap := documentNode.(map[string]any)
if !isMap {
return errors.New(formatErrorDescription(
Locale.ParseError(),
@@ -146,10 +146,10 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
// definitions
if v, ok := m[KeyDefinitions]; ok {
switch mt := v.(type) {
- case map[string]interface{}:
+ case map[string]any:
for _, dv := range mt {
switch dv.(type) {
- case bool, map[string]interface{}:
+ case bool, map[string]any:
newSchema := &SubSchema{Property: KeyDefinitions, Parent: currentSchema}
err := d.parseSchema(dv, newSchema)
if err != nil {
@@ -203,7 +203,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
if err != nil {
return err
}
- case []interface{}:
+ case []any:
for _, typeInArray := range t {
s, isString := typeInArray.(string)
if !isString {
@@ -231,7 +231,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
switch v := additionalProperties.(type) {
case bool:
currentSchema.additionalProperties = v
- case map[string]interface{}:
+ case map[string]any:
newSchema := &SubSchema{Property: KeyAdditionalProperties, Parent: currentSchema, Ref: currentSchema.Ref}
currentSchema.additionalProperties = newSchema
err := d.parseSchema(v, newSchema)
@@ -270,7 +270,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
// propertyNames
if propertyNames, found := m[KeyPropertyNames]; found && *currentSchema.Draft >= Draft6 {
switch propertyNames.(type) {
- case bool, map[string]interface{}:
+ case bool, map[string]any:
newSchema := &SubSchema{Property: KeyPropertyNames, Parent: currentSchema, Ref: currentSchema.Ref}
currentSchema.propertyNames = newSchema
err := d.parseSchema(propertyNames, newSchema)
@@ -299,10 +299,10 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
// items
if items, found := m[KeyItems]; found {
switch i := items.(type) {
- case []interface{}:
+ case []any:
for _, itemElement := range i {
switch itemElement.(type) {
- case map[string]interface{}, bool:
+ case map[string]any, bool:
newSchema := &SubSchema{Parent: currentSchema, Property: KeyItems}
newSchema.Ref = currentSchema.Ref
currentSchema.ItemsChildren = append(currentSchema.ItemsChildren, newSchema)
@@ -315,7 +315,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
}
currentSchema.ItemsChildrenIsSingleSchema = false
}
- case map[string]interface{}, bool:
+ case map[string]any, bool:
newSchema := &SubSchema{Parent: currentSchema, Property: KeyItems}
newSchema.Ref = currentSchema.Ref
currentSchema.ItemsChildren = append(currentSchema.ItemsChildren, newSchema)
@@ -334,7 +334,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
switch i := additionalItems.(type) {
case bool:
currentSchema.additionalItems = i
- case map[string]interface{}:
+ case map[string]any:
newSchema := &SubSchema{Property: KeyAdditionalItems, Parent: currentSchema, Ref: currentSchema.Ref}
currentSchema.additionalItems = newSchema
err := d.parseSchema(additionalItems, newSchema)
@@ -717,7 +717,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
if vNot, found := m[KeyNot]; found {
switch vNot.(type) {
- case bool, map[string]interface{}:
+ case bool, map[string]any:
newSchema := &SubSchema{Property: KeyNot, Parent: currentSchema, Ref: currentSchema.Ref}
currentSchema.not = newSchema
err := d.parseSchema(vNot, newSchema)
@@ -735,7 +735,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
if *currentSchema.Draft >= Draft7 {
if vIf, found := m[KeyIf]; found {
switch vIf.(type) {
- case bool, map[string]interface{}:
+ case bool, map[string]any:
newSchema := &SubSchema{Property: KeyIf, Parent: currentSchema, Ref: currentSchema.Ref}
currentSchema._if = newSchema
err := d.parseSchema(vIf, newSchema)
@@ -752,7 +752,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
if then, found := m[KeyThen]; found {
switch then.(type) {
- case bool, map[string]interface{}:
+ case bool, map[string]any:
newSchema := &SubSchema{Property: KeyThen, Parent: currentSchema, Ref: currentSchema.Ref}
currentSchema._then = newSchema
err := d.parseSchema(then, newSchema)
@@ -769,7 +769,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
if vElse, found := m[KeyElse]; found {
switch vElse.(type) {
- case bool, map[string]interface{}:
+ case bool, map[string]any:
newSchema := &SubSchema{Property: KeyElse, Parent: currentSchema, Ref: currentSchema.Ref}
currentSchema._else = newSchema
err := d.parseSchema(vElse, newSchema)
@@ -788,9 +788,9 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
return nil
}
-func (d *Schema) parseReference(_ interface{}, currentSchema *SubSchema) error {
+func (d *Schema) parseReference(_ any, currentSchema *SubSchema) error {
var (
- refdDocumentNode interface{}
+ refdDocumentNode any
dsp *schemaPoolDocument
err error
)
@@ -809,7 +809,7 @@ func (d *Schema) parseReference(_ interface{}, currentSchema *SubSchema) error {
newSchema.Draft = dsp.Draft
switch refdDocumentNode.(type) {
- case bool, map[string]interface{}:
+ case bool, map[string]any:
// expected
default:
return errors.New(formatErrorDescription(
@@ -829,8 +829,8 @@ func (d *Schema) parseReference(_ interface{}, currentSchema *SubSchema) error {
}
-func (d *Schema) parseProperties(documentNode interface{}, currentSchema *SubSchema) error {
- m, isMap := documentNode.(map[string]interface{})
+func (d *Schema) parseProperties(documentNode any, currentSchema *SubSchema) error {
+ m, isMap := documentNode.(map[string]any)
if !isMap {
return errors.New(formatErrorDescription(
Locale.MustBeOfType(),
@@ -851,19 +851,19 @@ func (d *Schema) parseProperties(documentNode interface{}, currentSchema *SubSch
return nil
}
-func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *SubSchema) error {
- m, isMap := documentNode.(map[string]interface{})
+func (d *Schema) parseDependencies(documentNode any, currentSchema *SubSchema) error {
+ m, isMap := documentNode.(map[string]any)
if !isMap {
return errors.New(formatErrorDescription(
Locale.MustBeOfType(),
ErrorDetails{"key": KeyDependencies, "type": TypeObject},
))
}
- currentSchema.dependencies = make(map[string]interface{})
+ currentSchema.dependencies = make(map[string]any)
for k := range m {
switch values := m[k].(type) {
- case []interface{}:
+ case []any:
var valuesToRegister []string
for _, value := range values {
str, isString := value.(string)
@@ -880,7 +880,7 @@ func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *SubS
currentSchema.dependencies[k] = valuesToRegister
}
- case bool, map[string]interface{}:
+ case bool, map[string]any:
depSchema := &SubSchema{Property: k, Parent: currentSchema, Ref: currentSchema.Ref}
err := d.parseSchema(m[k], depSchema)
if err != nil {
@@ -913,7 +913,7 @@ func invalidType(expected, given string) error {
))
}
-func getString(m map[string]interface{}, key string) (*string, error) {
+func getString(m map[string]any, key string) (*string, error) {
v, found := m[key]
if !found {
// not found
@@ -927,13 +927,13 @@ func getString(m map[string]interface{}, key string) (*string, error) {
return &s, nil
}
-func getMap(m map[string]interface{}, key string) (map[string]interface{}, error) {
+func getMap(m map[string]any, key string) (map[string]any, error) {
v, found := m[key]
if !found {
// not found
return nil, nil
}
- s, isMap := v.(map[string]interface{})
+ s, isMap := v.(map[string]any)
if !isMap {
// wrong type
return nil, invalidType(StringSchema, key)
@@ -941,12 +941,12 @@ func getMap(m map[string]interface{}, key string) (map[string]interface{}, error
return s, nil
}
-func getSlice(m map[string]interface{}, key string) ([]interface{}, error) {
+func getSlice(m map[string]any, key string) ([]any, error) {
v, found := m[key]
if !found {
return nil, nil
}
- s, isArray := v.([]interface{})
+ s, isArray := v.([]any)
if !isArray {
return nil, errors.New(formatErrorDescription(
Locale.MustBeOfAn(),
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaLoader.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaLoader.go
index 8cc6dc03b8..88caa65de2 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaLoader.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaLoader.go
@@ -45,7 +45,7 @@ func NewSchemaLoader() *SchemaLoader {
return ps
}
-func (sl *SchemaLoader) validateMetaschema(documentNode interface{}) error {
+func (sl *SchemaLoader) validateMetaschema(documentNode any) error {
var (
schema string
@@ -158,7 +158,7 @@ func (sl *SchemaLoader) Compile(rootSchema JSONLoader) (*Schema, error) {
d.DocumentReference = ref
d.ReferencePool = newSchemaReferencePool()
- var doc interface{}
+ var doc any
if ref.String() != "" {
// Get document from schema pool
spd, err := d.Pool.GetDocument(d.DocumentReference)
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaPool.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaPool.go
index ed8ff688b5..513f8df2cc 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaPool.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaPool.go
@@ -34,7 +34,7 @@ import (
)
type schemaPoolDocument struct {
- Document interface{}
+ Document any
Draft *Draft
}
@@ -44,7 +44,7 @@ type schemaPool struct {
autoDetect *bool
}
-func (p *schemaPool) parseReferences(document interface{}, ref gojsonreference.JsonReference, pooled bool) error {
+func (p *schemaPool) parseReferences(document any, ref gojsonreference.JsonReference, pooled bool) error {
var (
draft *Draft
@@ -72,7 +72,7 @@ func (p *schemaPool) parseReferences(document interface{}, ref gojsonreference.J
return err
}
-func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonreference.JsonReference, draft *Draft) error {
+func (p *schemaPool) parseReferencesRecursive(document any, ref gojsonreference.JsonReference, draft *Draft) error {
// parseReferencesRecursive parses a JSON document and resolves all $id and $ref references.
// For $ref references it takes into account the $id scope it is in and replaces
// the reference by the absolute resolved reference
@@ -80,14 +80,14 @@ func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonre
// When encountering errors it fails silently. Error handling is done when the schema
// is syntactically parsed and any error encountered here should also come up there.
switch m := document.(type) {
- case []interface{}:
+ case []any:
for _, v := range m {
err := p.parseReferencesRecursive(v, ref, draft)
if err != nil {
return err
}
}
- case map[string]interface{}:
+ case map[string]any:
localRef := &ref
keyID := KeyIDNew
@@ -129,7 +129,7 @@ func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonre
// Something like a property or a dependency is not a valid schema, as it might describe properties named "$ref", "$id" or "const", etc
// Therefore don't treat it like a schema.
if k == KeyProperties || k == KeyDependencies || k == KeyPatternProperties {
- if child, ok := v.(map[string]interface{}); ok {
+ if child, ok := v.(map[string]any); ok {
for _, v := range child {
err := p.parseReferencesRecursive(v, *localRef, draft)
if err != nil {
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaReferencePool.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaReferencePool.go
index 876419f56c..515702095b 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaReferencePool.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaReferencePool.go
@@ -25,10 +25,6 @@
package gojsonschema
-import (
- "fmt"
-)
-
type schemaReferencePool struct {
documents map[string]*SubSchema
}
@@ -44,7 +40,7 @@ func newSchemaReferencePool() *schemaReferencePool {
func (p *schemaReferencePool) Get(ref string) (r *SubSchema, o bool) {
if internalLogEnabled {
- internalLog(fmt.Sprintf("Schema Reference ( %s )", ref))
+ internalLog("Schema Reference ( %s )", ref)
}
if sch, ok := p.documents[ref]; ok {
@@ -60,7 +56,7 @@ func (p *schemaReferencePool) Get(ref string) (r *SubSchema, o bool) {
func (p *schemaReferencePool) Add(ref string, sch *SubSchema) {
if internalLogEnabled {
- internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref))
+ internalLog("Add Schema Reference %s to pool", ref)
}
if _, ok := p.documents[ref]; !ok {
p.documents[ref] = sch
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaType.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaType.go
index 271832d334..4abcc6814e 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaType.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaType.go
@@ -28,6 +28,7 @@ package gojsonschema
import (
"errors"
"fmt"
+ "slices"
"strings"
)
@@ -58,13 +59,7 @@ func (t *jsonSchemaType) Add(etype string) error {
func (t *jsonSchemaType) Contains(etype string) bool {
- for _, v := range t.types {
- if v == etype {
- return true
- }
- }
-
- return false
+ return slices.Contains(t.types, etype)
}
func (t *jsonSchemaType) String() string {
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/subSchema.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/subSchema.go
index d8bc0cb568..b7ceb3136e 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/subSchema.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/subSchema.go
@@ -123,8 +123,8 @@ type SubSchema struct {
maxProperties *int
required []string
- dependencies map[string]interface{}
- additionalProperties interface{}
+ dependencies map[string]any
+ additionalProperties any
patternProperties map[string]*SubSchema
propertyNames *SubSchema
@@ -134,7 +134,7 @@ type SubSchema struct {
uniqueItems bool
contains *SubSchema
- additionalItems interface{}
+ additionalItems any
// validation : all
_const *string //const is a golang keyword
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/utils.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/utils.go
index fd0f1870f9..ca071930f2 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/utils.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/utils.go
@@ -29,18 +29,14 @@ package gojsonschema
import (
"encoding/json"
"math/big"
+ "slices"
)
func isStringInSlice(s []string, what string) bool {
- for i := range s {
- if s[i] == what {
- return true
- }
- }
- return false
+ return slices.Contains(s, what)
}
-func marshalToJSONString(value interface{}) (*string, error) {
+func marshalToJSONString(value any) (*string, error) {
mBytes, err := json.Marshal(value)
if err != nil {
@@ -51,7 +47,7 @@ func marshalToJSONString(value interface{}) (*string, error) {
return &sBytes, nil
}
-func marshalWithoutNumber(value interface{}) (*string, error) {
+func marshalWithoutNumber(value any) (*string, error) {
// The JSON is decoded using https://golang.org/pkg/encoding/json/#Decoder.UseNumber
// This means the numbers are internally still represented as strings and therefore 1.00 is unequal to 1
@@ -63,7 +59,7 @@ func marshalWithoutNumber(value interface{}) (*string, error) {
return nil, err
}
- var document interface{}
+ var document any
err = json.Unmarshal([]byte(*jsonString), &document)
if err != nil {
@@ -73,7 +69,7 @@ func marshalWithoutNumber(value interface{}) (*string, error) {
return marshalToJSONString(document)
}
-func isJSONNumber(what interface{}) bool {
+func isJSONNumber(what any) bool {
switch what.(type) {
@@ -84,7 +80,7 @@ func isJSONNumber(what interface{}) bool {
return false
}
-func checkJSONInteger(what interface{}) (isInt bool) {
+func checkJSONInteger(what any) (isInt bool) {
jsonNumber := what.(json.Number)
@@ -100,7 +96,7 @@ const (
minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1
)
-func mustBeInteger(what interface{}) *int {
+func mustBeInteger(what any) *int {
number, ok := what.(json.Number)
if !ok {
return nil
@@ -123,7 +119,7 @@ func mustBeInteger(what interface{}) *int {
return &int32Value
}
-func mustBeNumber(what interface{}) *big.Rat {
+func mustBeNumber(what any) *big.Rat {
number, ok := what.(json.Number)
if !ok {
return nil
@@ -136,11 +132,11 @@ func mustBeNumber(what interface{}) *big.Rat {
return nil
}
-func convertDocumentNode(val interface{}) interface{} {
+func convertDocumentNode(val any) any {
- if lval, ok := val.([]interface{}); ok {
+ if lval, ok := val.([]any); ok {
- res := []interface{}{}
+ res := []any{}
for _, v := range lval {
res = append(res, convertDocumentNode(v))
}
@@ -149,9 +145,9 @@ func convertDocumentNode(val interface{}) interface{} {
}
- if mval, ok := val.(map[interface{}]interface{}); ok {
+ if mval, ok := val.(map[any]any); ok {
- res := map[string]interface{}{}
+ res := map[string]any{}
for k, v := range mval {
res[k.(string)] = convertDocumentNode(v)
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go
index 7c86e37245..e33a0f3d27 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go
@@ -54,21 +54,21 @@ func (v *Schema) Validate(l JSONLoader) (*Result, error) {
return v.validateDocument(root), nil
}
-func (v *Schema) validateDocument(root interface{}) *Result {
+func (v *Schema) validateDocument(root any) *Result {
result := &Result{}
context := NewJSONContext(StringContextRoot, nil)
v.RootSchema.validateRecursive(v.RootSchema, root, result, context)
return result
}
-func (v *SubSchema) subValidateWithContext(document interface{}, context *JSONContext) *Result {
+func (v *SubSchema) subValidateWithContext(document any, context *JSONContext) *Result {
result := &Result{}
v.validateRecursive(v, document, result, context)
return result
}
// Walker function to validate the json recursively against the SubSchema
-func (v *SubSchema) validateRecursive(currentSubSchema *SubSchema, currentNode interface{}, result *Result, context *JSONContext) {
+func (v *SubSchema) validateRecursive(currentSubSchema *SubSchema, currentNode any, result *Result, context *JSONContext) {
if internalLogEnabled {
internalLog("validateRecursive %s", context.String())
@@ -167,7 +167,7 @@ func (v *SubSchema) validateRecursive(currentSubSchema *SubSchema, currentNode i
return
}
- castCurrentNode := currentNode.([]interface{})
+ castCurrentNode := currentNode.([]any)
currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context)
@@ -190,9 +190,9 @@ func (v *SubSchema) validateRecursive(currentSubSchema *SubSchema, currentNode i
return
}
- castCurrentNode, ok := currentNode.(map[string]interface{})
+ castCurrentNode, ok := currentNode.(map[string]any)
if !ok {
- castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{})
+ castCurrentNode = convertDocumentNode(currentNode).(map[string]any)
}
currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context)
@@ -264,7 +264,7 @@ func (v *SubSchema) validateRecursive(currentSubSchema *SubSchema, currentNode i
}
// Different kinds of validation there, SubSchema / common / array / object / string...
-func (v *SubSchema) validateSchema(currentSubSchema *SubSchema, currentNode interface{}, result *Result, context *JSONContext) {
+func (v *SubSchema) validateSchema(currentSubSchema *SubSchema, currentNode any, result *Result, context *JSONContext) {
if internalLogEnabled {
internalLog("validateSchema %s", context.String())
@@ -348,15 +348,15 @@ func (v *SubSchema) validateSchema(currentSubSchema *SubSchema, currentNode inte
}
}
- if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 {
- if currentNodeMap, ok := currentNode.(map[string]interface{}); ok {
+ if len(currentSubSchema.dependencies) > 0 {
+ if currentNodeMap, ok := currentNode.(map[string]any); ok {
for elementKey := range currentNodeMap {
if dependency, ok := currentSubSchema.dependencies[elementKey]; ok {
switch dependency := dependency.(type) {
case []string:
for _, dependOnKey := range dependency {
- if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved {
+ if _, dependencyResolved := currentNode.(map[string]any)[dependOnKey]; !dependencyResolved {
result.addInternalError(
new(MissingDependencyError),
context,
@@ -395,7 +395,7 @@ func (v *SubSchema) validateSchema(currentSubSchema *SubSchema, currentNode inte
result.incrementScore()
}
-func (v *SubSchema) validateCommon(currentSubSchema *SubSchema, value interface{}, result *Result, context *JSONContext) {
+func (v *SubSchema) validateCommon(currentSubSchema *SubSchema, value any, result *Result, context *JSONContext) {
if internalLogEnabled {
internalLog("validateCommon %s", context.String())
@@ -452,7 +452,7 @@ func (v *SubSchema) validateCommon(currentSubSchema *SubSchema, value interface{
result.incrementScore()
}
-func (v *SubSchema) validateArray(currentSubSchema *SubSchema, value []interface{}, result *Result, context *JSONContext) {
+func (v *SubSchema) validateArray(currentSubSchema *SubSchema, value []any, result *Result, context *JSONContext) {
if internalLogEnabled {
internalLog("validateArray %s", context.String())
@@ -469,7 +469,7 @@ func (v *SubSchema) validateArray(currentSubSchema *SubSchema, value []interface
result.mergeErrors(validationResult)
}
} else {
- if currentSubSchema.ItemsChildren != nil && len(currentSubSchema.ItemsChildren) > 0 {
+ if len(currentSubSchema.ItemsChildren) > 0 {
nbItems := len(currentSubSchema.ItemsChildren)
@@ -578,7 +578,7 @@ func (v *SubSchema) validateArray(currentSubSchema *SubSchema, value []interface
result.incrementScore()
}
-func (v *SubSchema) validateObject(currentSubSchema *SubSchema, value map[string]interface{}, result *Result, context *JSONContext) {
+func (v *SubSchema) validateObject(currentSubSchema *SubSchema, value map[string]any, result *Result, context *JSONContext) {
if internalLogEnabled {
internalLog("validateObject %s", context.String())
@@ -675,7 +675,7 @@ func (v *SubSchema) validateObject(currentSubSchema *SubSchema, value map[string
result.incrementScore()
}
-func (v *SubSchema) validatePatternProperty(currentSubSchema *SubSchema, key string, value interface{}, result *Result, context *JSONContext) bool {
+func (v *SubSchema) validatePatternProperty(currentSubSchema *SubSchema, key string, value any, result *Result, context *JSONContext) bool {
if internalLogEnabled {
internalLog("validatePatternProperty %s", context.String())
@@ -701,7 +701,7 @@ func (v *SubSchema) validatePatternProperty(currentSubSchema *SubSchema, key str
return true
}
-func (v *SubSchema) validateString(currentSubSchema *SubSchema, value interface{}, result *Result, context *JSONContext) {
+func (v *SubSchema) validateString(currentSubSchema *SubSchema, value any, result *Result, context *JSONContext) {
// Ignore JSON numbers
stringValue, isString := value.(string)
@@ -752,7 +752,7 @@ func (v *SubSchema) validateString(currentSubSchema *SubSchema, value interface{
result.incrementScore()
}
-func (v *SubSchema) validateNumber(currentSubSchema *SubSchema, value interface{}, result *Result, context *JSONContext) {
+func (v *SubSchema) validateNumber(currentSubSchema *SubSchema, value any, result *Result, context *JSONContext) {
// Ignore non numbers
number, isNumber := value.(json.Number)
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/error.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/error.go
deleted file mode 100644
index f31f180a2e..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/error.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package validator
-
-import (
- "fmt"
-
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
- "github.com/open-policy-agent/opa/internal/gqlparser/gqlerror"
-)
-
-type ErrorOption func(err *gqlerror.Error)
-
-func Message(msg string, args ...interface{}) ErrorOption {
- return func(err *gqlerror.Error) {
- err.Message += fmt.Sprintf(msg, args...)
- }
-}
-
-func At(position *ast.Position) ErrorOption {
- return func(err *gqlerror.Error) {
- if position == nil {
- return
- }
- err.Locations = append(err.Locations, gqlerror.Location{
- Line: position.Line,
- Column: position.Column,
- })
- if position.Src.Name != "" {
- err.SetFile(position.Src.Name)
- }
- }
-}
-
-func SuggestListQuoted(prefix string, typed string, suggestions []string) ErrorOption {
- suggested := SuggestionList(typed, suggestions)
- return func(err *gqlerror.Error) {
- if len(suggested) > 0 {
- err.Message += " " + prefix + " " + QuotedOrList(suggested...) + "?"
- }
- }
-}
-
-func SuggestListUnquoted(prefix string, typed string, suggestions []string) ErrorOption {
- suggested := SuggestionList(typed, suggestions)
- return func(err *gqlerror.Error) {
- if len(suggested) > 0 {
- err.Message += " " + prefix + " " + OrList(suggested...) + "?"
- }
- }
-}
-
-func Suggestf(suggestion string, args ...interface{}) ErrorOption {
- return func(err *gqlerror.Error) {
- err.Message += " Did you mean " + fmt.Sprintf(suggestion, args...) + "?"
- }
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/messaging.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/messaging.go
deleted file mode 100644
index f1ab5873f3..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/messaging.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package validator
-
-import "bytes"
-
-// Given [ A, B, C ] return '"A", "B", or "C"'.
-func QuotedOrList(items ...string) string {
- itemsQuoted := make([]string, len(items))
- for i, item := range items {
- itemsQuoted[i] = `"` + item + `"`
- }
- return OrList(itemsQuoted...)
-}
-
-// Given [ A, B, C ] return 'A, B, or C'.
-func OrList(items ...string) string {
- var buf bytes.Buffer
-
- if len(items) > 5 {
- items = items[:5]
- }
- if len(items) == 2 {
- buf.WriteString(items[0])
- buf.WriteString(" or ")
- buf.WriteString(items[1])
- return buf.String()
- }
-
- for i, item := range items {
- if i != 0 {
- if i == len(items)-1 {
- buf.WriteString(", or ")
- } else {
- buf.WriteString(", ")
- }
- }
- buf.WriteString(item)
- }
- return buf.String()
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/prelude.graphql b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/prelude.graphql
deleted file mode 100644
index bdca0096a5..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/prelude.graphql
+++ /dev/null
@@ -1,121 +0,0 @@
-# This file defines all the implicitly declared types that are required by the graphql spec. It is implicitly included by calls to LoadSchema
-
-"The `Int` scalar type represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1."
-scalar Int
-
-"The `Float` scalar type represents signed double-precision fractional values as specified by [IEEE 754](http://en.wikipedia.org/wiki/IEEE_floating_point)."
-scalar Float
-
-"The `String`scalar type represents textual data, represented as UTF-8 character sequences. The String type is most often used by GraphQL to represent free-form human-readable text."
-scalar String
-
-"The `Boolean` scalar type represents `true` or `false`."
-scalar Boolean
-
-"""The `ID` scalar type represents a unique identifier, often used to refetch an object or as key for a cache. The ID type appears in a JSON response as a String; however, it is not intended to be human-readable. When expected as an input type, any string (such as "4") or integer (such as 4) input value will be accepted as an ID."""
-scalar ID
-
-"The @include directive may be provided for fields, fragment spreads, and inline fragments, and allows for conditional inclusion during execution as described by the if argument."
-directive @include(if: Boolean!) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
-
-"The @skip directive may be provided for fields, fragment spreads, and inline fragments, and allows for conditional exclusion during execution as described by the if argument."
-directive @skip(if: Boolean!) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
-
-"The @deprecated built-in directive is used within the type system definition language to indicate deprecated portions of a GraphQL service's schema, such as deprecated fields on a type, arguments on a field, input fields on an input type, or values of an enum type."
-directive @deprecated(reason: String = "No longer supported") on FIELD_DEFINITION | ARGUMENT_DEFINITION | INPUT_FIELD_DEFINITION | ENUM_VALUE
-
-"The @specifiedBy built-in directive is used within the type system definition language to provide a scalar specification URL for specifying the behavior of custom scalar types."
-directive @specifiedBy(url: String!) on SCALAR
-
-type __Schema {
- description: String
- types: [__Type!]!
- queryType: __Type!
- mutationType: __Type
- subscriptionType: __Type
- directives: [__Directive!]!
-}
-
-type __Type {
- kind: __TypeKind!
- name: String
- description: String
- # must be non-null for OBJECT and INTERFACE, otherwise null.
- fields(includeDeprecated: Boolean = false): [__Field!]
- # must be non-null for OBJECT and INTERFACE, otherwise null.
- interfaces: [__Type!]
- # must be non-null for INTERFACE and UNION, otherwise null.
- possibleTypes: [__Type!]
- # must be non-null for ENUM, otherwise null.
- enumValues(includeDeprecated: Boolean = false): [__EnumValue!]
- # must be non-null for INPUT_OBJECT, otherwise null.
- inputFields: [__InputValue!]
- # must be non-null for NON_NULL and LIST, otherwise null.
- ofType: __Type
- # may be non-null for custom SCALAR, otherwise null.
- specifiedByURL: String
-}
-
-type __Field {
- name: String!
- description: String
- args: [__InputValue!]!
- type: __Type!
- isDeprecated: Boolean!
- deprecationReason: String
-}
-
-type __InputValue {
- name: String!
- description: String
- type: __Type!
- defaultValue: String
-}
-
-type __EnumValue {
- name: String!
- description: String
- isDeprecated: Boolean!
- deprecationReason: String
-}
-
-enum __TypeKind {
- SCALAR
- OBJECT
- INTERFACE
- UNION
- ENUM
- INPUT_OBJECT
- LIST
- NON_NULL
-}
-
-type __Directive {
- name: String!
- description: String
- locations: [__DirectiveLocation!]!
- args: [__InputValue!]!
- isRepeatable: Boolean!
-}
-
-enum __DirectiveLocation {
- QUERY
- MUTATION
- SUBSCRIPTION
- FIELD
- FRAGMENT_DEFINITION
- FRAGMENT_SPREAD
- INLINE_FRAGMENT
- VARIABLE_DEFINITION
- SCHEMA
- SCALAR
- OBJECT
- FIELD_DEFINITION
- ARGUMENT_DEFINITION
- INTERFACE
- UNION
- ENUM
- ENUM_VALUE
- INPUT_OBJECT
- INPUT_FIELD_DEFINITION
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_argument_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_argument_names.go
deleted file mode 100644
index 36b2d057c9..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_argument_names.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package validator
-
-import (
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
-
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
-)
-
-func init() {
- AddRule("KnownArgumentNames", func(observers *Events, addError AddErrFunc) {
- // A GraphQL field is only valid if all supplied arguments are defined by that field.
- observers.OnField(func(_ *Walker, field *ast.Field) {
- if field.Definition == nil || field.ObjectDefinition == nil {
- return
- }
- for _, arg := range field.Arguments {
- def := field.Definition.Arguments.ForName(arg.Name)
- if def != nil {
- continue
- }
-
- var suggestions []string
- for _, argDef := range field.Definition.Arguments {
- suggestions = append(suggestions, argDef.Name)
- }
-
- addError(
- Message(`Unknown argument "%s" on field "%s.%s".`, arg.Name, field.ObjectDefinition.Name, field.Name),
- SuggestListQuoted("Did you mean", arg.Name, suggestions),
- At(field.Position),
- )
- }
- })
-
- observers.OnDirective(func(_ *Walker, directive *ast.Directive) {
- if directive.Definition == nil {
- return
- }
- for _, arg := range directive.Arguments {
- def := directive.Definition.Arguments.ForName(arg.Name)
- if def != nil {
- continue
- }
-
- var suggestions []string
- for _, argDef := range directive.Definition.Arguments {
- suggestions = append(suggestions, argDef.Name)
- }
-
- addError(
- Message(`Unknown argument "%s" on directive "@%s".`, arg.Name, directive.Name),
- SuggestListQuoted("Did you mean", arg.Name, suggestions),
- At(directive.Position),
- )
- }
- })
- })
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_fragment_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_fragment_names.go
deleted file mode 100644
index 8ae1fc33f4..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_fragment_names.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package validator
-
-import (
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
-
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
-)
-
-func init() {
- AddRule("KnownFragmentNames", func(observers *Events, addError AddErrFunc) {
- observers.OnFragmentSpread(func(_ *Walker, fragmentSpread *ast.FragmentSpread) {
- if fragmentSpread.Definition == nil {
- addError(
- Message(`Unknown fragment "%s".`, fragmentSpread.Name),
- At(fragmentSpread.Position),
- )
- }
- })
- })
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_type_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_type_names.go
deleted file mode 100644
index aa9809be34..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_type_names.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package validator
-
-import (
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
-
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
-)
-
-func init() {
- AddRule("KnownTypeNames", func(observers *Events, addError AddErrFunc) {
- observers.OnVariable(func(walker *Walker, variable *ast.VariableDefinition) {
- typeName := variable.Type.Name()
- typdef := walker.Schema.Types[typeName]
- if typdef != nil {
- return
- }
-
- addError(
- Message(`Unknown type "%s".`, typeName),
- At(variable.Position),
- )
- })
-
- observers.OnInlineFragment(func(walker *Walker, inlineFragment *ast.InlineFragment) {
- typedName := inlineFragment.TypeCondition
- if typedName == "" {
- return
- }
-
- def := walker.Schema.Types[typedName]
- if def != nil {
- return
- }
-
- addError(
- Message(`Unknown type "%s".`, typedName),
- At(inlineFragment.Position),
- )
- })
-
- observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) {
- typeName := fragment.TypeCondition
- def := walker.Schema.Types[typeName]
- if def != nil {
- return
- }
-
- var possibleTypes []string
- for _, t := range walker.Schema.Types {
- possibleTypes = append(possibleTypes, t.Name)
- }
-
- addError(
- Message(`Unknown type "%s".`, typeName),
- SuggestListQuoted("Did you mean", typeName, possibleTypes),
- At(fragment.Position),
- )
- })
- })
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/lone_anonymous_operation.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/lone_anonymous_operation.go
deleted file mode 100644
index 2af7b5a038..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/lone_anonymous_operation.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package validator
-
-import (
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
-
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
-)
-
-func init() {
- AddRule("LoneAnonymousOperation", func(observers *Events, addError AddErrFunc) {
- observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) {
- if operation.Name == "" && len(walker.Document.Operations) > 1 {
- addError(
- Message(`This anonymous operation must be the only defined operation.`),
- At(operation.Position),
- )
- }
- })
- })
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_fragments.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_fragments.go
deleted file mode 100644
index f6ba046a1c..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_fragments.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package validator
-
-import (
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
-
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
-)
-
-func init() {
- AddRule("NoUnusedFragments", func(observers *Events, addError AddErrFunc) {
-
- inFragmentDefinition := false
- fragmentNameUsed := make(map[string]bool)
-
- observers.OnFragmentSpread(func(_ *Walker, fragmentSpread *ast.FragmentSpread) {
- if !inFragmentDefinition {
- fragmentNameUsed[fragmentSpread.Name] = true
- }
- })
-
- observers.OnFragment(func(_ *Walker, fragment *ast.FragmentDefinition) {
- inFragmentDefinition = true
- if !fragmentNameUsed[fragment.Name] {
- addError(
- Message(`Fragment "%s" is never used.`, fragment.Name),
- At(fragment.Position),
- )
- }
- })
- })
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_argument_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_argument_names.go
deleted file mode 100644
index 7458c5f6cb..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_argument_names.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package validator
-
-import (
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
-
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
-)
-
-func init() {
- AddRule("UniqueArgumentNames", func(observers *Events, addError AddErrFunc) {
- observers.OnField(func(_ *Walker, field *ast.Field) {
- checkUniqueArgs(field.Arguments, addError)
- })
-
- observers.OnDirective(func(_ *Walker, directive *ast.Directive) {
- checkUniqueArgs(directive.Arguments, addError)
- })
- })
-}
-
-func checkUniqueArgs(args ast.ArgumentList, addError AddErrFunc) {
- knownArgNames := map[string]int{}
-
- for _, arg := range args {
- if knownArgNames[arg.Name] == 1 {
- addError(
- Message(`There can be only one argument named "%s".`, arg.Name),
- At(arg.Position),
- )
- }
-
- knownArgNames[arg.Name]++
- }
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_directives_per_location.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_directives_per_location.go
deleted file mode 100644
index ecf5a0a82e..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_directives_per_location.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package validator
-
-import (
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
-
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
-)
-
-func init() {
- AddRule("UniqueDirectivesPerLocation", func(observers *Events, addError AddErrFunc) {
- observers.OnDirectiveList(func(_ *Walker, directives []*ast.Directive) {
- seen := map[string]bool{}
-
- for _, dir := range directives {
- if dir.Name != "repeatable" && seen[dir.Name] {
- addError(
- Message(`The directive "@%s" can only be used once at this location.`, dir.Name),
- At(dir.Position),
- )
- }
- seen[dir.Name] = true
- }
- })
- })
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_fragment_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_fragment_names.go
deleted file mode 100644
index c94f3ad27c..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_fragment_names.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package validator
-
-import (
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
-
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
-)
-
-func init() {
- AddRule("UniqueFragmentNames", func(observers *Events, addError AddErrFunc) {
- seenFragments := map[string]bool{}
-
- observers.OnFragment(func(_ *Walker, fragment *ast.FragmentDefinition) {
- if seenFragments[fragment.Name] {
- addError(
- Message(`There can be only one fragment named "%s".`, fragment.Name),
- At(fragment.Position),
- )
- }
- seenFragments[fragment.Name] = true
- })
- })
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_input_field_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_input_field_names.go
deleted file mode 100644
index a93d63bd1e..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_input_field_names.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package validator
-
-import (
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
-
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
-)
-
-func init() {
- AddRule("UniqueInputFieldNames", func(observers *Events, addError AddErrFunc) {
- observers.OnValue(func(_ *Walker, value *ast.Value) {
- if value.Kind != ast.ObjectValue {
- return
- }
-
- seen := map[string]bool{}
- for _, field := range value.Children {
- if seen[field.Name] {
- addError(
- Message(`There can be only one input field named "%s".`, field.Name),
- At(field.Position),
- )
- }
- seen[field.Name] = true
- }
- })
- })
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_operation_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_operation_names.go
deleted file mode 100644
index dcd404dadf..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_operation_names.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package validator
-
-import (
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
-
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
-)
-
-func init() {
- AddRule("UniqueOperationNames", func(observers *Events, addError AddErrFunc) {
- seen := map[string]bool{}
-
- observers.OnOperation(func(_ *Walker, operation *ast.OperationDefinition) {
- if seen[operation.Name] {
- addError(
- Message(`There can be only one operation named "%s".`, operation.Name),
- At(operation.Position),
- )
- }
- seen[operation.Name] = true
- })
- })
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_variable_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_variable_names.go
deleted file mode 100644
index 7a214dbe4c..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_variable_names.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package validator
-
-import (
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
-
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
-)
-
-func init() {
- AddRule("UniqueVariableNames", func(observers *Events, addError AddErrFunc) {
- observers.OnOperation(func(_ *Walker, operation *ast.OperationDefinition) {
- seen := map[string]int{}
- for _, def := range operation.VariableDefinitions {
- // add the same error only once per a variable.
- if seen[def.Variable] == 1 {
- addError(
- Message(`There can be only one variable named "$%s".`, def.Variable),
- At(def.Position),
- )
- }
- seen[def.Variable]++
- }
- })
- })
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/values_of_correct_type.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/values_of_correct_type.go
deleted file mode 100644
index 8858023d4e..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/values_of_correct_type.go
+++ /dev/null
@@ -1,170 +0,0 @@
-package validator
-
-import (
- "errors"
- "fmt"
- "strconv"
-
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
-
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
-)
-
-func init() {
- AddRule("ValuesOfCorrectType", func(observers *Events, addError AddErrFunc) {
- observers.OnValue(func(_ *Walker, value *ast.Value) {
- if value.Definition == nil || value.ExpectedType == nil {
- return
- }
-
- if value.Kind == ast.NullValue && value.ExpectedType.NonNull {
- addError(
- Message(`Expected value of type "%s", found %s.`, value.ExpectedType.String(), value.String()),
- At(value.Position),
- )
- }
-
- if value.Definition.Kind == ast.Scalar {
- // Skip custom validating scalars
- if !value.Definition.OneOf("Int", "Float", "String", "Boolean", "ID") {
- return
- }
- }
-
- var possibleEnums []string
- if value.Definition.Kind == ast.Enum {
- for _, val := range value.Definition.EnumValues {
- possibleEnums = append(possibleEnums, val.Name)
- }
- }
-
- rawVal, err := value.Value(nil)
- if err != nil {
- unexpectedTypeMessage(addError, value)
- }
-
- switch value.Kind {
- case ast.NullValue:
- return
- case ast.ListValue:
- if value.ExpectedType.Elem == nil {
- unexpectedTypeMessage(addError, value)
- return
- }
-
- case ast.IntValue:
- if !value.Definition.OneOf("Int", "Float", "ID") {
- unexpectedTypeMessage(addError, value)
- }
-
- case ast.FloatValue:
- if !value.Definition.OneOf("Float") {
- unexpectedTypeMessage(addError, value)
- }
-
- case ast.StringValue, ast.BlockValue:
- if value.Definition.Kind == ast.Enum {
- rawValStr := fmt.Sprint(rawVal)
- addError(
- Message(`Enum "%s" cannot represent non-enum value: %s.`, value.ExpectedType.String(), value.String()),
- SuggestListQuoted("Did you mean the enum value", rawValStr, possibleEnums),
- At(value.Position),
- )
- } else if !value.Definition.OneOf("String", "ID") {
- unexpectedTypeMessage(addError, value)
- }
-
- case ast.EnumValue:
- if value.Definition.Kind != ast.Enum {
- rawValStr := fmt.Sprint(rawVal)
- addError(
- unexpectedTypeMessageOnly(value),
- SuggestListUnquoted("Did you mean the enum value", rawValStr, possibleEnums),
- At(value.Position),
- )
- } else if value.Definition.EnumValues.ForName(value.Raw) == nil {
- rawValStr := fmt.Sprint(rawVal)
- addError(
- Message(`Value "%s" does not exist in "%s" enum.`, value.String(), value.ExpectedType.String()),
- SuggestListQuoted("Did you mean the enum value", rawValStr, possibleEnums),
- At(value.Position),
- )
- }
-
- case ast.BooleanValue:
- if !value.Definition.OneOf("Boolean") {
- unexpectedTypeMessage(addError, value)
- }
-
- case ast.ObjectValue:
-
- for _, field := range value.Definition.Fields {
- if field.Type.NonNull {
- fieldValue := value.Children.ForName(field.Name)
- if fieldValue == nil && field.DefaultValue == nil {
- addError(
- Message(`Field "%s.%s" of required type "%s" was not provided.`, value.Definition.Name, field.Name, field.Type.String()),
- At(value.Position),
- )
- continue
- }
- }
- }
-
- for _, fieldValue := range value.Children {
- if value.Definition.Fields.ForName(fieldValue.Name) == nil {
- var suggestions []string
- for _, fieldValue := range value.Definition.Fields {
- suggestions = append(suggestions, fieldValue.Name)
- }
-
- addError(
- Message(`Field "%s" is not defined by type "%s".`, fieldValue.Name, value.Definition.Name),
- SuggestListQuoted("Did you mean", fieldValue.Name, suggestions),
- At(fieldValue.Position),
- )
- }
- }
-
- case ast.Variable:
- return
-
- default:
- panic(fmt.Errorf("unhandled %T", value))
- }
- })
- })
-}
-
-func unexpectedTypeMessage(addError AddErrFunc, v *ast.Value) {
- addError(
- unexpectedTypeMessageOnly(v),
- At(v.Position),
- )
-}
-
-func unexpectedTypeMessageOnly(v *ast.Value) ErrorOption {
- switch v.ExpectedType.String() {
- case "Int", "Int!":
- if _, err := strconv.ParseInt(v.Raw, 10, 32); err != nil && errors.Is(err, strconv.ErrRange) {
- return Message(`Int cannot represent non 32-bit signed integer value: %s`, v.String())
- }
- return Message(`Int cannot represent non-integer value: %s`, v.String())
- case "String", "String!", "[String]":
- return Message(`String cannot represent a non string value: %s`, v.String())
- case "Boolean", "Boolean!":
- return Message(`Boolean cannot represent a non boolean value: %s`, v.String())
- case "Float", "Float!":
- return Message(`Float cannot represent non numeric value: %s`, v.String())
- case "ID", "ID!":
- return Message(`ID cannot represent a non-string and non-integer value: %s`, v.String())
- //case "Enum":
- // return Message(`Enum "%s" cannot represent non-enum value: %s`, v.ExpectedType.String(), v.String())
- default:
- if v.Definition.Kind == ast.Enum {
- return Message(`Enum "%s" cannot represent non-enum value: %s.`, v.ExpectedType.String(), v.String())
- }
- return Message(`Expected value of type "%s", found %s.`, v.ExpectedType.String(), v.String())
- }
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/variables_are_input_types.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/variables_are_input_types.go
deleted file mode 100644
index ea4dfcc5ab..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/variables_are_input_types.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package validator
-
-import (
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
-
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
-)
-
-func init() {
- AddRule("VariablesAreInputTypes", func(observers *Events, addError AddErrFunc) {
- observers.OnOperation(func(_ *Walker, operation *ast.OperationDefinition) {
- for _, def := range operation.VariableDefinitions {
- if def.Definition == nil {
- continue
- }
- if !def.Definition.IsInputType() {
- addError(
- Message(
- `Variable "$%s" cannot be non-input type "%s".`,
- def.Variable,
- def.Type.String(),
- ),
- At(def.Position),
- )
- }
- }
- })
- })
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/suggestionList.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/suggestionList.go
deleted file mode 100644
index f0bbc32786..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/suggestionList.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package validator
-
-import (
- "math"
- "sort"
- "strings"
-
- "github.com/agnivade/levenshtein"
-)
-
-// Given an invalid input string and a list of valid options, returns a filtered
-// list of valid options sorted based on their similarity with the input.
-func SuggestionList(input string, options []string) []string {
- var results []string
- optionsByDistance := map[string]int{}
-
- for _, option := range options {
- distance := lexicalDistance(input, option)
- threshold := calcThreshold(input)
- if distance <= threshold {
- results = append(results, option)
- optionsByDistance[option] = distance
- }
- }
-
- sort.Slice(results, func(i, j int) bool {
- return optionsByDistance[results[i]] < optionsByDistance[results[j]]
- })
- return results
-}
-
-func calcThreshold(a string) (threshold int) {
- // the logic is copied from here
- // https://github.com/graphql/graphql-js/blob/47bd8c8897c72d3efc17ecb1599a95cee6bac5e8/src/jsutils/suggestionList.ts#L14
- threshold = int(math.Floor(float64(len(a))*0.4) + 1)
-
- if threshold < 1 {
- threshold = 1
- }
- return
-}
-
-// Computes the lexical distance between strings A and B.
-//
-// The "distance" between two strings is given by counting the minimum number
-// of edits needed to transform string A into string B. An edit can be an
-// insertion, deletion, or substitution of a single character, or a swap of two
-// adjacent characters.
-//
-// Includes a custom alteration from Damerau-Levenshtein to treat case changes
-// as a single edit which helps identify mis-cased values with an edit distance
-// of 1.
-//
-// This distance can be useful for detecting typos in input or sorting
-func lexicalDistance(a, b string) int {
- if a == b {
- return 0
- }
-
- a = strings.ToLower(a)
- b = strings.ToLower(b)
-
- // Any case change counts as a single edit
- if a == b {
- return 1
- }
-
- return levenshtein.ComputeDistance(a, b)
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/validator.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/validator.go
deleted file mode 100644
index 05f5b91669..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/validator.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package validator
-
-import (
- //nolint:revive
- . "github.com/open-policy-agent/opa/internal/gqlparser/ast"
- "github.com/open-policy-agent/opa/internal/gqlparser/gqlerror"
-)
-
-type AddErrFunc func(options ...ErrorOption)
-
-type ruleFunc func(observers *Events, addError AddErrFunc)
-
-type rule struct {
- name string
- rule ruleFunc
-}
-
-var rules []rule
-
-// addRule to rule set.
-// f is called once each time `Validate` is executed.
-func AddRule(name string, f ruleFunc) {
- rules = append(rules, rule{name: name, rule: f})
-}
-
-func Validate(schema *Schema, doc *QueryDocument) gqlerror.List {
- var errs gqlerror.List
-
- observers := &Events{}
- for i := range rules {
- rule := rules[i]
- rule.rule(observers, func(options ...ErrorOption) {
- err := &gqlerror.Error{
- Rule: rule.name,
- }
- for _, o := range options {
- o(err)
- }
- errs = append(errs, err)
- })
- }
-
- Walk(schema, doc, observers)
- return errs
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go b/vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go
index 31c89869da..9ddb93506e 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go
@@ -7,7 +7,7 @@ package patch
import (
"strings"
- "github.com/open-policy-agent/opa/storage"
+ "github.com/open-policy-agent/opa/v1/storage"
)
// ParsePatchPathEscaped returns a new path for the given escaped str.
@@ -37,8 +37,8 @@ func ParsePatchPathEscaped(str string) (path storage.Path, ok bool) {
// the substitutions in this order, an implementation avoids the error of
// turning '~01' first into '~1' and then into '/', which would be
// incorrect (the string '~01' correctly becomes '~1' after transformation)."
- path[i] = strings.Replace(path[i], "~1", "/", -1)
- path[i] = strings.Replace(path[i], "~0", "~", -1)
+ path[i] = strings.ReplaceAll(path[i], "~1", "/")
+ path[i] = strings.ReplaceAll(path[i], "~0", "~")
}
return
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/buffer/buffer.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/buffer/buffer.go
deleted file mode 100644
index c383ff3b54..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/buffer/buffer.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Package buffer provides a very thin wrapper around []byte buffer called
-// `Buffer`, to provide functionalities that are often used within the jwx
-// related packages
-package buffer
-
-import (
- "encoding/base64"
- "encoding/binary"
- "encoding/json"
- "fmt"
-)
-
-// Buffer wraps `[]byte` and provides functions that are often used in
-// the jwx related packages. One notable difference is that while
-// encoding/json marshalls `[]byte` using base64.StdEncoding, this
-// module uses base64.RawURLEncoding as mandated by the spec
-type Buffer []byte
-
-// FromUint creates a `Buffer` from an unsigned int
-func FromUint(v uint64) Buffer {
- data := make([]byte, 8)
- binary.BigEndian.PutUint64(data, v)
-
- i := 0
- for ; i < len(data); i++ {
- if data[i] != 0x0 {
- break
- }
- }
- return Buffer(data[i:])
-}
-
-// FromBase64 constructs a new Buffer from a base64 encoded data
-func FromBase64(v []byte) (Buffer, error) {
- b := Buffer{}
- if err := b.Base64Decode(v); err != nil {
- return Buffer(nil), fmt.Errorf("failed to decode from base64: %w", err)
- }
-
- return b, nil
-}
-
-// FromNData constructs a new Buffer from a "n:data" format
-// (I made that name up)
-func FromNData(v []byte) (Buffer, error) {
- size := binary.BigEndian.Uint32(v)
- buf := make([]byte, int(size))
- copy(buf, v[4:4+size])
- return Buffer(buf), nil
-}
-
-// Bytes returns the raw bytes that comprises the Buffer
-func (b Buffer) Bytes() []byte {
- return []byte(b)
-}
-
-// NData returns Datalen || Data, where Datalen is a 32 bit counter for
-// the length of the following data, and Data is the octets that comprise
-// the buffer data
-func (b Buffer) NData() []byte {
- buf := make([]byte, 4+b.Len())
- binary.BigEndian.PutUint32(buf, uint32(b.Len()))
-
- copy(buf[4:], b.Bytes())
- return buf
-}
-
-// Len returns the number of bytes that the Buffer holds
-func (b Buffer) Len() int {
- return len(b)
-}
-
-// Base64Encode encodes the contents of the Buffer using base64.RawURLEncoding
-func (b Buffer) Base64Encode() ([]byte, error) {
- enc := base64.RawURLEncoding
- out := make([]byte, enc.EncodedLen(len(b)))
- enc.Encode(out, b)
- return out, nil
-}
-
-// Base64Decode decodes the contents of the Buffer using base64.RawURLEncoding
-func (b *Buffer) Base64Decode(v []byte) error {
- enc := base64.RawURLEncoding
- out := make([]byte, enc.DecodedLen(len(v)))
- n, err := enc.Decode(out, v)
- if err != nil {
- return fmt.Errorf("failed to decode from base64: %w", err)
- }
- out = out[:n]
- *b = Buffer(out)
- return nil
-}
-
-// MarshalJSON marshals the buffer into JSON format after encoding the buffer
-// with base64.RawURLEncoding
-func (b Buffer) MarshalJSON() ([]byte, error) {
- v, err := b.Base64Encode()
- if err != nil {
- return nil, fmt.Errorf("failed to encode to base64: %w", err)
- }
- return json.Marshal(string(v))
-}
-
-// UnmarshalJSON unmarshals from a JSON string into a Buffer, after decoding it
-// with base64.RawURLEncoding
-func (b *Buffer) UnmarshalJSON(data []byte) error {
- var x string
- if err := json.Unmarshal(data, &x); err != nil {
- return fmt.Errorf("failed to unmarshal JSON: %w", err)
- }
- return b.Base64Decode([]byte(x))
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/elliptic.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/elliptic.go
deleted file mode 100644
index b7e35dc707..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/elliptic.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package jwa
-
-// EllipticCurveAlgorithm represents the algorithms used for EC keys
-type EllipticCurveAlgorithm string
-
-// Supported values for EllipticCurveAlgorithm
-const (
- P256 EllipticCurveAlgorithm = "P-256"
- P384 EllipticCurveAlgorithm = "P-384"
- P521 EllipticCurveAlgorithm = "P-521"
-)
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/key_type.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/key_type.go
deleted file mode 100644
index 98f0cc42e2..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/key_type.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package jwa
-
-import (
- "errors"
- "fmt"
- "strconv"
-)
-
-// KeyType represents the key type ("kty") that are supported
-type KeyType string
-
-var keyTypeAlg = map[string]struct{}{"EC": {}, "oct": {}, "RSA": {}}
-
-// Supported values for KeyType
-const (
- EC KeyType = "EC" // Elliptic Curve
- InvalidKeyType KeyType = "" // Invalid KeyType
- OctetSeq KeyType = "oct" // Octet sequence (used to represent symmetric keys)
- RSA KeyType = "RSA" // RSA
-)
-
-// Accept is used when conversion from values given by
-// outside sources (such as JSON payloads) is required
-func (keyType *KeyType) Accept(value interface{}) error {
- var tmp KeyType
- switch x := value.(type) {
- case string:
- tmp = KeyType(x)
- case KeyType:
- tmp = x
- default:
- return fmt.Errorf("invalid type for jwa.KeyType: %T", value)
- }
- _, ok := keyTypeAlg[tmp.String()]
- if !ok {
- return errors.New("unknown Key Type algorithm")
- }
-
- *keyType = tmp
- return nil
-}
-
-// String returns the string representation of a KeyType
-func (keyType KeyType) String() string {
- return string(keyType)
-}
-
-// UnmarshalJSON unmarshals and checks data as KeyType Algorithm
-func (keyType *KeyType) UnmarshalJSON(data []byte) error {
- var quote byte = '"'
- var quoted string
- if data[0] == quote {
- var err error
- quoted, err = strconv.Unquote(string(data))
- if err != nil {
- return fmt.Errorf("failed to process signature algorithm: %w", err)
- }
- } else {
- quoted = string(data)
- }
- _, ok := keyTypeAlg[quoted]
- if !ok {
- return errors.New("unknown signature algorithm")
- }
- *keyType = KeyType(quoted)
- return nil
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/parameters.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/parameters.go
deleted file mode 100644
index 2fe72e1dbc..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/parameters.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package jwa
-
-import (
- "crypto/elliptic"
-
- "github.com/open-policy-agent/opa/internal/jwx/buffer"
-)
-
-// EllipticCurve provides a indirect type to standard elliptic curve such that we can
-// use it for unmarshal
-type EllipticCurve struct {
- elliptic.Curve
-}
-
-// AlgorithmParameters provides a single structure suitable to unmarshaling any JWK
-type AlgorithmParameters struct {
- N buffer.Buffer `json:"n,omitempty"`
- E buffer.Buffer `json:"e,omitempty"`
- D buffer.Buffer `json:"d,omitempty"`
- P buffer.Buffer `json:"p,omitempty"`
- Q buffer.Buffer `json:"q,omitempty"`
- Dp buffer.Buffer `json:"dp,omitempty"`
- Dq buffer.Buffer `json:"dq,omitempty"`
- Qi buffer.Buffer `json:"qi,omitempty"`
- Crv EllipticCurveAlgorithm `json:"crv,omitempty"`
- X buffer.Buffer `json:"x,omitempty"`
- Y buffer.Buffer `json:"y,omitempty"`
- K buffer.Buffer `json:"k,omitempty"`
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/signature.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/signature.go
deleted file mode 100644
index 45e400176d..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/signature.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package jwa
-
-import (
- "errors"
- "fmt"
- "strconv"
-)
-
-// SignatureAlgorithm represents the various signature algorithms as described in https://tools.ietf.org/html/rfc7518#section-3.1
-type SignatureAlgorithm string
-
-var signatureAlg = map[string]struct{}{"ES256": {}, "ES384": {}, "ES512": {}, "HS256": {}, "HS384": {}, "HS512": {}, "PS256": {}, "PS384": {}, "PS512": {}, "RS256": {}, "RS384": {}, "RS512": {}, "none": {}}
-
-// Supported values for SignatureAlgorithm
-const (
- ES256 SignatureAlgorithm = "ES256" // ECDSA using P-256 and SHA-256
- ES384 SignatureAlgorithm = "ES384" // ECDSA using P-384 and SHA-384
- ES512 SignatureAlgorithm = "ES512" // ECDSA using P-521 and SHA-512
- HS256 SignatureAlgorithm = "HS256" // HMAC using SHA-256
- HS384 SignatureAlgorithm = "HS384" // HMAC using SHA-384
- HS512 SignatureAlgorithm = "HS512" // HMAC using SHA-512
- NoSignature SignatureAlgorithm = "none"
- PS256 SignatureAlgorithm = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256
- PS384 SignatureAlgorithm = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384
- PS512 SignatureAlgorithm = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512
- RS256 SignatureAlgorithm = "RS256" // RSASSA-PKCS-v1.5 using SHA-256
- RS384 SignatureAlgorithm = "RS384" // RSASSA-PKCS-v1.5 using SHA-384
- RS512 SignatureAlgorithm = "RS512" // RSASSA-PKCS-v1.5 using SHA-512
- NoValue SignatureAlgorithm = "" // No value is different from none
- Unsupported SignatureAlgorithm = "unsupported"
-)
-
-// Accept is used when conversion from values given by
-// outside sources (such as JSON payloads) is required
-func (signature *SignatureAlgorithm) Accept(value interface{}) error {
- var tmp SignatureAlgorithm
- switch x := value.(type) {
- case string:
- tmp = SignatureAlgorithm(x)
- case SignatureAlgorithm:
- tmp = x
- default:
- return fmt.Errorf("invalid type for jwa.SignatureAlgorithm: %T", value)
- }
- _, ok := signatureAlg[tmp.String()]
- if !ok {
- return errors.New("unknown signature algorithm")
- }
- *signature = tmp
- return nil
-}
-
-// String returns the string representation of a SignatureAlgorithm
-func (signature SignatureAlgorithm) String() string {
- return string(signature)
-}
-
-// UnmarshalJSON unmarshals and checks data as Signature Algorithm
-func (signature *SignatureAlgorithm) UnmarshalJSON(data []byte) error {
- var quote byte = '"'
- var quoted string
- if data[0] == quote {
- var err error
- quoted, err = strconv.Unquote(string(data))
- if err != nil {
- return fmt.Errorf("failed to process signature algorithm: %w", err)
- }
- } else {
- quoted = string(data)
- }
- _, ok := signatureAlg[quoted]
- if !ok {
- *signature = Unsupported
- return nil
- }
- *signature = SignatureAlgorithm(quoted)
- return nil
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/ecdsa.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/ecdsa.go
deleted file mode 100644
index b46689f037..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/ecdsa.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package jwk
-
-import (
- "crypto/ecdsa"
- "crypto/elliptic"
- "errors"
- "fmt"
- "math/big"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
-)
-
-func newECDSAPublicKey(key *ecdsa.PublicKey) (*ECDSAPublicKey, error) {
-
- var hdr StandardHeaders
- err := hdr.Set(KeyTypeKey, jwa.EC)
- if err != nil {
- return nil, fmt.Errorf("failed to set Key Type: %w", err)
- }
-
- return &ECDSAPublicKey{
- StandardHeaders: &hdr,
- key: key,
- }, nil
-}
-
-func newECDSAPrivateKey(key *ecdsa.PrivateKey) (*ECDSAPrivateKey, error) {
-
- var hdr StandardHeaders
- err := hdr.Set(KeyTypeKey, jwa.EC)
- if err != nil {
- return nil, fmt.Errorf("failed to set Key Type: %w", err)
- }
-
- return &ECDSAPrivateKey{
- StandardHeaders: &hdr,
- key: key,
- }, nil
-}
-
-// Materialize returns the EC-DSA public key represented by this JWK
-func (k ECDSAPublicKey) Materialize() (interface{}, error) {
- return k.key, nil
-}
-
-// Materialize returns the EC-DSA private key represented by this JWK
-func (k ECDSAPrivateKey) Materialize() (interface{}, error) {
- return k.key, nil
-}
-
-// GenerateKey creates a ECDSAPublicKey from JWK format
-func (k *ECDSAPublicKey) GenerateKey(keyJSON *RawKeyJSON) error {
-
- var x, y big.Int
-
- if keyJSON.X == nil || keyJSON.Y == nil || keyJSON.Crv == "" {
- return errors.New("missing mandatory key parameters X, Y or Crv")
- }
-
- x.SetBytes(keyJSON.X.Bytes())
- y.SetBytes(keyJSON.Y.Bytes())
-
- var curve elliptic.Curve
- switch keyJSON.Crv {
- case jwa.P256:
- curve = elliptic.P256()
- case jwa.P384:
- curve = elliptic.P384()
- case jwa.P521:
- curve = elliptic.P521()
- default:
- return fmt.Errorf("invalid curve name %s", keyJSON.Crv)
- }
-
- *k = ECDSAPublicKey{
- StandardHeaders: &keyJSON.StandardHeaders,
- key: &ecdsa.PublicKey{
- Curve: curve,
- X: &x,
- Y: &y,
- },
- }
- return nil
-}
-
-// GenerateKey creates a ECDSAPrivateKey from JWK format
-func (k *ECDSAPrivateKey) GenerateKey(keyJSON *RawKeyJSON) error {
-
- if keyJSON.D == nil {
- return errors.New("missing mandatory key parameter D")
- }
- eCDSAPublicKey := &ECDSAPublicKey{}
- err := eCDSAPublicKey.GenerateKey(keyJSON)
- if err != nil {
- return fmt.Errorf("failed to generate public key: %w", err)
- }
- dBytes := keyJSON.D.Bytes()
- // The length of this octet string MUST be ceiling(log-base-2(n)/8)
- // octets (where n is the order of the curve). This is because the private
- // key d must be in the interval [1, n-1] so the bitlength of d should be
- // no larger than the bitlength of n-1. The easiest way to find the octet
- // length is to take bitlength(n-1), add 7 to force a carry, and shift this
- // bit sequence right by 3, which is essentially dividing by 8 and adding
- // 1 if there is any remainder. Thus, the private key value d should be
- // output to (bitlength(n-1)+7)>>3 octets.
- n := eCDSAPublicKey.key.Params().N
- octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
- if octetLength-len(dBytes) != 0 {
- return errors.New("failed to generate private key. Incorrect D value")
- }
- privateKey := &ecdsa.PrivateKey{
- PublicKey: *eCDSAPublicKey.key,
- D: (&big.Int{}).SetBytes(keyJSON.D.Bytes()),
- }
-
- k.key = privateKey
- k.StandardHeaders = &keyJSON.StandardHeaders
-
- return nil
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/headers.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/headers.go
deleted file mode 100644
index b0fd51e901..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/headers.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package jwk
-
-import (
- "fmt"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
-)
-
-// Convenience constants for common JWK parameters
-const (
- AlgorithmKey = "alg"
- KeyIDKey = "kid"
- KeyOpsKey = "key_ops"
- KeyTypeKey = "kty"
- KeyUsageKey = "use"
- PrivateParamsKey = "privateParams"
-)
-
-// Headers provides a common interface to all future possible headers
-type Headers interface {
- Get(string) (interface{}, bool)
- Set(string, interface{}) error
- Walk(func(string, interface{}) error) error
- GetAlgorithm() jwa.SignatureAlgorithm
- GetKeyID() string
- GetKeyOps() KeyOperationList
- GetKeyType() jwa.KeyType
- GetKeyUsage() string
- GetPrivateParams() map[string]interface{}
-}
-
-// StandardHeaders stores the common JWK parameters
-type StandardHeaders struct {
- Algorithm *jwa.SignatureAlgorithm `json:"alg,omitempty"` // https://tools.ietf.org/html/rfc7517#section-4.4
- KeyID string `json:"kid,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.4
- KeyOps KeyOperationList `json:"key_ops,omitempty"` // https://tools.ietf.org/html/rfc7517#section-4.3
- KeyType jwa.KeyType `json:"kty,omitempty"` // https://tools.ietf.org/html/rfc7517#section-4.1
- KeyUsage string `json:"use,omitempty"` // https://tools.ietf.org/html/rfc7517#section-4.2
- PrivateParams map[string]interface{} `json:"privateParams,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.4
-}
-
-// GetAlgorithm is a convenience function to retrieve the corresponding value stored in the StandardHeaders
-func (h *StandardHeaders) GetAlgorithm() jwa.SignatureAlgorithm {
- if v := h.Algorithm; v != nil {
- return *v
- }
- return jwa.NoValue
-}
-
-// GetKeyID is a convenience function to retrieve the corresponding value stored in the StandardHeaders
-func (h *StandardHeaders) GetKeyID() string {
- return h.KeyID
-}
-
-// GetKeyOps is a convenience function to retrieve the corresponding value stored in the StandardHeaders
-func (h *StandardHeaders) GetKeyOps() KeyOperationList {
- return h.KeyOps
-}
-
-// GetKeyType is a convenience function to retrieve the corresponding value stored in the StandardHeaders
-func (h *StandardHeaders) GetKeyType() jwa.KeyType {
- return h.KeyType
-}
-
-// GetKeyUsage is a convenience function to retrieve the corresponding value stored in the StandardHeaders
-func (h *StandardHeaders) GetKeyUsage() string {
- return h.KeyUsage
-}
-
-// GetPrivateParams is a convenience function to retrieve the corresponding value stored in the StandardHeaders
-func (h *StandardHeaders) GetPrivateParams() map[string]interface{} {
- return h.PrivateParams
-}
-
-// Get is a general getter function for JWK StandardHeaders structure
-func (h *StandardHeaders) Get(name string) (interface{}, bool) {
- switch name {
- case AlgorithmKey:
- alg := h.GetAlgorithm()
- if alg != jwa.NoValue {
- return alg, true
- }
- return nil, false
- case KeyIDKey:
- v := h.KeyID
- if v == "" {
- return nil, false
- }
- return v, true
- case KeyOpsKey:
- v := h.KeyOps
- if v == nil {
- return nil, false
- }
- return v, true
- case KeyTypeKey:
- v := h.KeyType
- if v == jwa.InvalidKeyType {
- return nil, false
- }
- return v, true
- case KeyUsageKey:
- v := h.KeyUsage
- if v == "" {
- return nil, false
- }
- return v, true
- case PrivateParamsKey:
- v := h.PrivateParams
- if len(v) == 0 {
- return nil, false
- }
- return v, true
- default:
- return nil, false
- }
-}
-
-// Set is a general getter function for JWK StandardHeaders structure
-func (h *StandardHeaders) Set(name string, value interface{}) error {
- switch name {
- case AlgorithmKey:
- var acceptor jwa.SignatureAlgorithm
- if err := acceptor.Accept(value); err != nil {
- return fmt.Errorf("invalid value for %s key: %w", AlgorithmKey, err)
- }
- h.Algorithm = &acceptor
- return nil
- case KeyIDKey:
- if v, ok := value.(string); ok {
- h.KeyID = v
- return nil
- }
- return fmt.Errorf("invalid value for %s key: %T", KeyIDKey, value)
- case KeyOpsKey:
- if err := h.KeyOps.Accept(value); err != nil {
- return fmt.Errorf("invalid value for %s key: %w", KeyOpsKey, err)
- }
- return nil
- case KeyTypeKey:
- if err := h.KeyType.Accept(value); err != nil {
- return fmt.Errorf("invalid value for %s key: %w", KeyTypeKey, err)
- }
- return nil
- case KeyUsageKey:
- if v, ok := value.(string); ok {
- h.KeyUsage = v
- return nil
- }
- return fmt.Errorf("invalid value for %s key: %T", KeyUsageKey, value)
- case PrivateParamsKey:
- if v, ok := value.(map[string]interface{}); ok {
- h.PrivateParams = v
- return nil
- }
- return fmt.Errorf("invalid value for %s key: %T", PrivateParamsKey, value)
- default:
- return fmt.Errorf("invalid key: %s", name)
- }
-}
-
-// Walk iterates over all JWK standard headers fields while applying a function to its value.
-func (h StandardHeaders) Walk(f func(string, interface{}) error) error {
- for _, key := range []string{AlgorithmKey, KeyIDKey, KeyOpsKey, KeyTypeKey, KeyUsageKey, PrivateParamsKey} {
- if v, ok := h.Get(key); ok {
- if err := f(key, v); err != nil {
- return fmt.Errorf("walk function returned error for %s: %w", key, err)
- }
- }
- }
-
- for k, v := range h.PrivateParams {
- if err := f(k, v); err != nil {
- return fmt.Errorf("walk function returned error for %s: %w", k, err)
- }
- }
- return nil
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/interface.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/interface.go
deleted file mode 100644
index 7a7d03ef1c..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/interface.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package jwk
-
-import (
- "crypto/ecdsa"
- "crypto/rsa"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
-)
-
-// Set is a convenience struct to allow generating and parsing
-// JWK sets as opposed to single JWKs
-type Set struct {
- Keys []Key `json:"keys"`
-}
-
-// Key defines the minimal interface for each of the
-// key types. Their use and implementation differ significantly
-// between each key types, so you should use type assertions
-// to perform more specific tasks with each key
-type Key interface {
- Headers
-
- // Materialize creates the corresponding key. For example,
- // RSA types would create *rsa.PublicKey or *rsa.PrivateKey,
- // EC types would create *ecdsa.PublicKey or *ecdsa.PrivateKey,
- // and OctetSeq types create a []byte key.
- Materialize() (interface{}, error)
- GenerateKey(*RawKeyJSON) error
-}
-
-// RawKeyJSON is generic type that represents any kind JWK
-type RawKeyJSON struct {
- StandardHeaders
- jwa.AlgorithmParameters
-}
-
-// RawKeySetJSON is generic type that represents a JWK Set
-type RawKeySetJSON struct {
- Keys []RawKeyJSON `json:"keys"`
-}
-
-// RSAPublicKey is a type of JWK generated from RSA public keys
-type RSAPublicKey struct {
- *StandardHeaders
- key *rsa.PublicKey
-}
-
-// RSAPrivateKey is a type of JWK generated from RSA private keys
-type RSAPrivateKey struct {
- *StandardHeaders
- *jwa.AlgorithmParameters
- key *rsa.PrivateKey
-}
-
-// SymmetricKey is a type of JWK generated from symmetric keys
-type SymmetricKey struct {
- *StandardHeaders
- key []byte
-}
-
-// ECDSAPublicKey is a type of JWK generated from ECDSA public keys
-type ECDSAPublicKey struct {
- *StandardHeaders
- key *ecdsa.PublicKey
-}
-
-// ECDSAPrivateKey is a type of JWK generated from ECDH-ES private keys
-type ECDSAPrivateKey struct {
- *StandardHeaders
- key *ecdsa.PrivateKey
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/jwk.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/jwk.go
deleted file mode 100644
index aa22a3830f..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/jwk.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Package jwk implements JWK as described in https://tools.ietf.org/html/rfc7517
-package jwk
-
-import (
- "crypto/ecdsa"
- "crypto/rsa"
- "encoding/json"
- "errors"
- "fmt"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
-)
-
-// GetPublicKey returns the public key based on the private key type.
-// For rsa key types *rsa.PublicKey is returned; for ecdsa key types *ecdsa.PublicKey;
-// for byte slice (raw) keys, the key itself is returned. If the corresponding
-// public key cannot be deduced, an error is returned
-func GetPublicKey(key interface{}) (interface{}, error) {
- if key == nil {
- return nil, errors.New("jwk.New requires a non-nil key")
- }
-
- switch v := key.(type) {
- // Mental note: although Public() is defined in both types,
- // you can not coalesce the clauses for rsa.PrivateKey and
- // ecdsa.PrivateKey, as then `v` becomes interface{}
- // b/c the compiler cannot deduce the exact type.
- case *rsa.PrivateKey:
- return v.Public(), nil
- case *ecdsa.PrivateKey:
- return v.Public(), nil
- case []byte:
- return v, nil
- default:
- return nil, fmt.Errorf("invalid key type %T", key)
- }
-}
-
-// GetKeyTypeFromKey creates a jwk.Key from the given key.
-func GetKeyTypeFromKey(key interface{}) jwa.KeyType {
-
- switch key.(type) {
- case *rsa.PrivateKey, *rsa.PublicKey:
- return jwa.RSA
- case *ecdsa.PrivateKey, *ecdsa.PublicKey:
- return jwa.EC
- case []byte:
- return jwa.OctetSeq
- default:
- return jwa.InvalidKeyType
- }
-}
-
-// New creates a jwk.Key from the given key.
-func New(key interface{}) (Key, error) {
- if key == nil {
- return nil, errors.New("jwk.New requires a non-nil key")
- }
-
- switch v := key.(type) {
- case *rsa.PrivateKey:
- return newRSAPrivateKey(v)
- case *rsa.PublicKey:
- return newRSAPublicKey(v)
- case *ecdsa.PrivateKey:
- return newECDSAPrivateKey(v)
- case *ecdsa.PublicKey:
- return newECDSAPublicKey(v)
- case []byte:
- return newSymmetricKey(v)
- default:
- return nil, fmt.Errorf("invalid key type %T", key)
- }
-}
-
-func parse(jwkSrc string) (*Set, error) {
-
- var jwkKeySet Set
- var jwkKey Key
- rawKeySetJSON := &RawKeySetJSON{}
- err := json.Unmarshal([]byte(jwkSrc), rawKeySetJSON)
- if err != nil {
- return nil, fmt.Errorf("failed to unmarshal JWK Set: %w", err)
- }
- if len(rawKeySetJSON.Keys) == 0 {
-
- // It might be a single key
- rawKeyJSON := &RawKeyJSON{}
- err := json.Unmarshal([]byte(jwkSrc), rawKeyJSON)
- if err != nil {
- return nil, fmt.Errorf("failed to unmarshal JWK: %w", err)
- }
- jwkKey, err = rawKeyJSON.GenerateKey()
- if err != nil {
- return nil, fmt.Errorf("failed to generate key: %w", err)
- }
- // Add to set
- jwkKeySet.Keys = append(jwkKeySet.Keys, jwkKey)
- } else {
- for i := range rawKeySetJSON.Keys {
- rawKeyJSON := rawKeySetJSON.Keys[i]
- if rawKeyJSON.Algorithm != nil && *rawKeyJSON.Algorithm == jwa.Unsupported {
- continue
- }
- jwkKey, err = rawKeyJSON.GenerateKey()
- if err != nil {
- return nil, fmt.Errorf("failed to generate key: %w", err)
- }
- jwkKeySet.Keys = append(jwkKeySet.Keys, jwkKey)
- }
- }
- return &jwkKeySet, nil
-}
-
-// ParseBytes parses JWK from the incoming byte buffer.
-func ParseBytes(buf []byte) (*Set, error) {
- return parse(string(buf[:]))
-}
-
-// ParseString parses JWK from the incoming string.
-func ParseString(s string) (*Set, error) {
- return parse(s)
-}
-
-// GenerateKey creates an internal representation of a key from a raw JWK JSON
-func (r *RawKeyJSON) GenerateKey() (Key, error) {
-
- var key Key
-
- switch r.KeyType {
- case jwa.RSA:
- if r.D != nil {
- key = &RSAPrivateKey{}
- } else {
- key = &RSAPublicKey{}
- }
- case jwa.EC:
- if r.D != nil {
- key = &ECDSAPrivateKey{}
- } else {
- key = &ECDSAPublicKey{}
- }
- case jwa.OctetSeq:
- key = &SymmetricKey{}
- default:
- return nil, errors.New("unrecognized key type")
- }
- err := key.GenerateKey(r)
- if err != nil {
- return nil, fmt.Errorf("failed to generate key from JWK: %w", err)
- }
- return key, nil
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/key_ops.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/key_ops.go
deleted file mode 100644
index e8fe4cd854..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/key_ops.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package jwk
-
-import (
- "encoding/json"
- "fmt"
-)
-
-// KeyUsageType is used to denote what this key should be used for
-type KeyUsageType string
-
-const (
- // ForSignature is the value used in the headers to indicate that
- // this key should be used for signatures
- ForSignature KeyUsageType = "sig"
- // ForEncryption is the value used in the headers to indicate that
- // this key should be used for encryptiong
- ForEncryption KeyUsageType = "enc"
-)
-
-// KeyOperation is used to denote the allowed operations for a Key
-type KeyOperation string
-
-// KeyOperationList represents an slice of KeyOperation
-type KeyOperationList []KeyOperation
-
-var keyOps = map[string]struct{}{"sign": {}, "verify": {}, "encrypt": {}, "decrypt": {}, "wrapKey": {}, "unwrapKey": {}, "deriveKey": {}, "deriveBits": {}}
-
-// KeyOperation constants
-const (
- KeyOpSign KeyOperation = "sign" // (compute digital signature or MAC)
- KeyOpVerify KeyOperation = "verify" // (verify digital signature or MAC)
- KeyOpEncrypt KeyOperation = "encrypt" // (encrypt content)
- KeyOpDecrypt KeyOperation = "decrypt" // (decrypt content and validate decryption, if applicable)
- KeyOpWrapKey KeyOperation = "wrapKey" // (encrypt key)
- KeyOpUnwrapKey KeyOperation = "unwrapKey" // (decrypt key and validate decryption, if applicable)
- KeyOpDeriveKey KeyOperation = "deriveKey" // (derive key)
- KeyOpDeriveBits KeyOperation = "deriveBits" // (derive bits not to be used as a key)
-)
-
-// Accept determines if Key Operation is valid
-func (keyOperationList *KeyOperationList) Accept(v interface{}) error {
- switch x := v.(type) {
- case KeyOperationList:
- *keyOperationList = x
- return nil
- default:
- return fmt.Errorf(`invalid value %T`, v)
- }
-}
-
-// UnmarshalJSON unmarshals and checks data as KeyType Algorithm
-func (keyOperationList *KeyOperationList) UnmarshalJSON(data []byte) error {
- var tempKeyOperationList []string
- err := json.Unmarshal(data, &tempKeyOperationList)
- if err != nil {
- return fmt.Errorf("invalid key operation")
- }
- for _, value := range tempKeyOperationList {
- _, ok := keyOps[value]
- if !ok {
- return fmt.Errorf("unknown key operation")
- }
- *keyOperationList = append(*keyOperationList, KeyOperation(value))
- }
- return nil
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/rsa.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/rsa.go
deleted file mode 100644
index 11b8e3b56b..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/rsa.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package jwk
-
-import (
- "crypto/rsa"
- "encoding/binary"
- "errors"
- "fmt"
- "math/big"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
-)
-
-func newRSAPublicKey(key *rsa.PublicKey) (*RSAPublicKey, error) {
-
- var hdr StandardHeaders
- err := hdr.Set(KeyTypeKey, jwa.RSA)
- if err != nil {
- return nil, fmt.Errorf("failed to set Key Type: %w", err)
- }
- return &RSAPublicKey{
- StandardHeaders: &hdr,
- key: key,
- }, nil
-}
-
-func newRSAPrivateKey(key *rsa.PrivateKey) (*RSAPrivateKey, error) {
-
- var hdr StandardHeaders
- err := hdr.Set(KeyTypeKey, jwa.RSA)
- if err != nil {
- return nil, fmt.Errorf("failed to set Key Type: %w", err)
- }
-
- var algoParams jwa.AlgorithmParameters
-
- // it is needed to use raw encoding to omit the "=" paddings at the end
- algoParams.D = key.D.Bytes()
- algoParams.P = key.Primes[0].Bytes()
- algoParams.Q = key.Primes[1].Bytes()
- algoParams.Dp = key.Precomputed.Dp.Bytes()
- algoParams.Dq = key.Precomputed.Dq.Bytes()
- algoParams.Qi = key.Precomputed.Qinv.Bytes()
-
- // "modulus" (N) from the public key in the private key
- algoParams.N = key.PublicKey.N.Bytes()
-
- // make the E a.k.a "coprime"
- // https://en.wikipedia.org/wiki/RSA_(cryptosystem)
- coprime := make([]byte, 8)
- binary.BigEndian.PutUint64(coprime, uint64(key.PublicKey.E))
- // find the 1st index of non 0x0 paddings from the beginning
- i := 0
- for ; i < len(coprime); i++ {
- if coprime[i] != 0x0 {
- break
- }
- }
- algoParams.E = coprime[i:]
-
- return &RSAPrivateKey{
- StandardHeaders: &hdr,
- AlgorithmParameters: &algoParams,
- key: key,
- }, nil
-}
-
-// Materialize returns the standard RSA Public Key representation stored in the internal representation
-func (k *RSAPublicKey) Materialize() (interface{}, error) {
- if k.key == nil {
- return nil, errors.New("key has no rsa.PublicKey associated with it")
- }
- return k.key, nil
-}
-
-// Materialize returns the standard RSA Private Key representation stored in the internal representation
-func (k *RSAPrivateKey) Materialize() (interface{}, error) {
- if k.key == nil {
- return nil, errors.New("key has no rsa.PrivateKey associated with it")
- }
- return k.key, nil
-}
-
-// GenerateKey creates a RSAPublicKey from a RawKeyJSON
-func (k *RSAPublicKey) GenerateKey(keyJSON *RawKeyJSON) error {
-
- if keyJSON.N == nil || keyJSON.E == nil {
- return errors.New("missing mandatory key parameters N or E")
- }
- rsaPublicKey := &rsa.PublicKey{
- N: (&big.Int{}).SetBytes(keyJSON.N.Bytes()),
- E: int((&big.Int{}).SetBytes(keyJSON.E.Bytes()).Int64()),
- }
- k.key = rsaPublicKey
- k.StandardHeaders = &keyJSON.StandardHeaders
- return nil
-}
-
-// GenerateKey creates a RSAPublicKey from a RawKeyJSON
-func (k *RSAPrivateKey) GenerateKey(keyJSON *RawKeyJSON) error {
-
- rsaPublicKey := &RSAPublicKey{}
- err := rsaPublicKey.GenerateKey(keyJSON)
- if err != nil {
- return fmt.Errorf("failed to generate public key: %w", err)
- }
-
- if keyJSON.D == nil || keyJSON.P == nil || keyJSON.Q == nil {
- return errors.New("missing mandatory key parameters D, P or Q")
- }
- privateKey := &rsa.PrivateKey{
- PublicKey: *rsaPublicKey.key,
- D: (&big.Int{}).SetBytes(keyJSON.D.Bytes()),
- Primes: []*big.Int{
- (&big.Int{}).SetBytes(keyJSON.P.Bytes()),
- (&big.Int{}).SetBytes(keyJSON.Q.Bytes()),
- },
- }
-
- if keyJSON.Dp.Len() > 0 {
- privateKey.Precomputed.Dp = (&big.Int{}).SetBytes(keyJSON.Dp.Bytes())
- }
- if keyJSON.Dq.Len() > 0 {
- privateKey.Precomputed.Dq = (&big.Int{}).SetBytes(keyJSON.Dq.Bytes())
- }
- if keyJSON.Qi.Len() > 0 {
- privateKey.Precomputed.Qinv = (&big.Int{}).SetBytes(keyJSON.Qi.Bytes())
- }
-
- k.key = privateKey
- k.StandardHeaders = &keyJSON.StandardHeaders
- k.AlgorithmParameters = &keyJSON.AlgorithmParameters
- return nil
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/symmetric.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/symmetric.go
deleted file mode 100644
index e0cc0751e6..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/symmetric.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package jwk
-
-import (
- "fmt"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
-)
-
-func newSymmetricKey(key []byte) (*SymmetricKey, error) {
- var hdr StandardHeaders
-
- err := hdr.Set(KeyTypeKey, jwa.OctetSeq)
- if err != nil {
- return nil, fmt.Errorf("failed to set Key Type: %w", err)
- }
- return &SymmetricKey{
- StandardHeaders: &hdr,
- key: key,
- }, nil
-}
-
-// Materialize returns the octets for this symmetric key.
-// Since this is a symmetric key, this just calls Octets
-func (s SymmetricKey) Materialize() (interface{}, error) {
- return s.Octets(), nil
-}
-
-// Octets returns the octets in the key
-func (s SymmetricKey) Octets() []byte {
- return s.key
-}
-
-// GenerateKey creates a Symmetric key from a RawKeyJSON
-func (s *SymmetricKey) GenerateKey(keyJSON *RawKeyJSON) error {
-
- *s = SymmetricKey{
- StandardHeaders: &keyJSON.StandardHeaders,
- key: keyJSON.K,
- }
- return nil
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/headers.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/headers.go
deleted file mode 100644
index 0c8b355087..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/headers.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package jws
-
-import (
- "fmt"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
-)
-
-// Constants for JWS Common parameters
-const (
- AlgorithmKey = "alg"
- ContentTypeKey = "cty"
- CriticalKey = "crit"
- JWKKey = "jwk"
- JWKSetURLKey = "jku"
- KeyIDKey = "kid"
- PrivateParamsKey = "privateParams"
- TypeKey = "typ"
-)
-
-// Headers provides a common interface for common header parameters
-type Headers interface {
- Get(string) (interface{}, bool)
- Set(string, interface{}) error
- GetAlgorithm() jwa.SignatureAlgorithm
-}
-
-// StandardHeaders contains JWS common parameters.
-type StandardHeaders struct {
- Algorithm jwa.SignatureAlgorithm `json:"alg,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.1
- ContentType string `json:"cty,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.10
- Critical []string `json:"crit,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.11
- JWK string `json:"jwk,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.3
- JWKSetURL string `json:"jku,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.2
- KeyID string `json:"kid,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.4
- PrivateParams map[string]interface{} `json:"privateParams,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.9
- Type string `json:"typ,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.9
-}
-
-// GetAlgorithm returns algorithm
-func (h *StandardHeaders) GetAlgorithm() jwa.SignatureAlgorithm {
- return h.Algorithm
-}
-
-// Get is a general getter function for StandardHeaders structure
-func (h *StandardHeaders) Get(name string) (interface{}, bool) {
- switch name {
- case AlgorithmKey:
- v := h.Algorithm
- if v == "" {
- return nil, false
- }
- return v, true
- case ContentTypeKey:
- v := h.ContentType
- if v == "" {
- return nil, false
- }
- return v, true
- case CriticalKey:
- v := h.Critical
- if len(v) == 0 {
- return nil, false
- }
- return v, true
- case JWKKey:
- v := h.JWK
- if v == "" {
- return nil, false
- }
- return v, true
- case JWKSetURLKey:
- v := h.JWKSetURL
- if v == "" {
- return nil, false
- }
- return v, true
- case KeyIDKey:
- v := h.KeyID
- if v == "" {
- return nil, false
- }
- return v, true
- case PrivateParamsKey:
- v := h.PrivateParams
- if len(v) == 0 {
- return nil, false
- }
- return v, true
- case TypeKey:
- v := h.Type
- if v == "" {
- return nil, false
- }
- return v, true
- default:
- return nil, false
- }
-}
-
-// Set is a general setter function for StandardHeaders structure
-func (h *StandardHeaders) Set(name string, value interface{}) error {
- switch name {
- case AlgorithmKey:
- if err := h.Algorithm.Accept(value); err != nil {
- return fmt.Errorf("invalid value for %s key: %w", AlgorithmKey, err)
- }
- return nil
- case ContentTypeKey:
- if v, ok := value.(string); ok {
- h.ContentType = v
- return nil
- }
- return fmt.Errorf("invalid value for %s key: %T", ContentTypeKey, value)
- case CriticalKey:
- if v, ok := value.([]string); ok {
- h.Critical = v
- return nil
- }
- return fmt.Errorf("invalid value for %s key: %T", CriticalKey, value)
- case JWKKey:
- if v, ok := value.(string); ok {
- h.JWK = v
- return nil
- }
- return fmt.Errorf("invalid value for %s key: %T", JWKKey, value)
- case JWKSetURLKey:
- if v, ok := value.(string); ok {
- h.JWKSetURL = v
- return nil
- }
- return fmt.Errorf("invalid value for %s key: %T", JWKSetURLKey, value)
- case KeyIDKey:
- if v, ok := value.(string); ok {
- h.KeyID = v
- return nil
- }
- return fmt.Errorf("invalid value for %s key: %T", KeyIDKey, value)
- case PrivateParamsKey:
- if v, ok := value.(map[string]interface{}); ok {
- h.PrivateParams = v
- return nil
- }
- return fmt.Errorf("invalid value for %s key: %T", PrivateParamsKey, value)
- case TypeKey:
- if v, ok := value.(string); ok {
- h.Type = v
- return nil
- }
- return fmt.Errorf("invalid value for %s key: %T", TypeKey, value)
- default:
- return fmt.Errorf("invalid key: %s", name)
- }
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/interface.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/interface.go
deleted file mode 100644
index e647c8ac93..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/interface.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package jws
-
-// Message represents a full JWS encoded message. Flattened serialization
-// is not supported as a struct, but rather it's represented as a
-// Message struct with only one `Signature` element.
-//
-// Do not expect to use the Message object to verify or construct a
-// signed payloads with. You should only use this when you want to actually
-// want to programmatically view the contents for the full JWS Payload.
-//
-// To sign and verify, use the appropriate `SignWithOption()` nad `Verify()` functions
-type Message struct {
- Payload []byte `json:"payload"`
- Signatures []*Signature `json:"signatures,omitempty"`
-}
-
-// Signature represents the headers and signature of a JWS message
-type Signature struct {
- Headers Headers `json:"header,omitempty"` // Unprotected Headers
- Protected Headers `json:"Protected,omitempty"` // Protected Headers
- Signature []byte `json:"signature,omitempty"` // GetSignature
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/jws.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/jws.go
deleted file mode 100644
index 2a5fe3c173..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/jws.go
+++ /dev/null
@@ -1,220 +0,0 @@
-// Package jws implements the digital Signature on JSON based data
-// structures as described in https://tools.ietf.org/html/rfc7515
-//
-// If you do not care about the details, the only things that you
-// would need to use are the following functions:
-//
-// jws.SignWithOption(Payload, algorithm, key)
-// jws.Verify(encodedjws, algorithm, key)
-//
-// To sign, simply use `jws.SignWithOption`. `Payload` is a []byte buffer that
-// contains whatever data you want to sign. `alg` is one of the
-// jwa.SignatureAlgorithm constants from package jwa. For RSA and
-// ECDSA family of algorithms, you will need to prepare a private key.
-// For HMAC family, you just need a []byte value. The `jws.SignWithOption`
-// function will return the encoded JWS message on success.
-//
-// To verify, use `jws.Verify`. It will parse the `encodedjws` buffer
-// and verify the result using `algorithm` and `key`. Upon successful
-// verification, the original Payload is returned, so you can work on it.
-package jws
-
-import (
- "bytes"
- "crypto/rand"
- "encoding/base64"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "strings"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
- "github.com/open-policy-agent/opa/internal/jwx/jwk"
- "github.com/open-policy-agent/opa/internal/jwx/jws/sign"
- "github.com/open-policy-agent/opa/internal/jwx/jws/verify"
-)
-
-// SignLiteral generates a Signature for the given Payload and Headers, and serializes
-// it in compact serialization format. In this format you may NOT use
-// multiple signers.
-func SignLiteral(payload []byte, alg jwa.SignatureAlgorithm, key interface{}, hdrBuf []byte, rnd io.Reader) ([]byte, error) {
- encodedHdr := base64.RawURLEncoding.EncodeToString(hdrBuf)
- encodedPayload := base64.RawURLEncoding.EncodeToString(payload)
- signingInput := strings.Join(
- []string{
- encodedHdr,
- encodedPayload,
- }, ".",
- )
- signer, err := sign.New(alg)
- if err != nil {
- return nil, fmt.Errorf("failed to create signer: %w", err)
- }
-
- var signature []byte
- switch s := signer.(type) {
- case *sign.ECDSASigner:
- signature, err = s.SignWithRand([]byte(signingInput), key, rnd)
- default:
- signature, err = signer.Sign([]byte(signingInput), key)
- }
- if err != nil {
- return nil, fmt.Errorf("failed to sign Payload: %w", err)
- }
- encodedSignature := base64.RawURLEncoding.EncodeToString(signature)
- compactSerialization := strings.Join(
- []string{
- signingInput,
- encodedSignature,
- }, ".",
- )
- return []byte(compactSerialization), nil
-}
-
-// SignWithOption generates a Signature for the given Payload, and serializes
-// it in compact serialization format. In this format you may NOT use
-// multiple signers.
-//
-// If you would like to pass custom Headers, use the WithHeaders option.
-func SignWithOption(payload []byte, alg jwa.SignatureAlgorithm, key interface{}) ([]byte, error) {
- var headers Headers = &StandardHeaders{}
-
- err := headers.Set(AlgorithmKey, alg)
- if err != nil {
- return nil, fmt.Errorf("failed to set alg value: %w", err)
- }
-
- hdrBuf, err := json.Marshal(headers)
- if err != nil {
- return nil, fmt.Errorf("failed to marshal Headers: %w", err)
- }
- // NOTE(sr): we don't use SignWithOption -- if we did, this rand.Reader
- // should come from the BuiltinContext's Seed, too.
- return SignLiteral(payload, alg, key, hdrBuf, rand.Reader)
-}
-
-// Verify checks if the given JWS message is verifiable using `alg` and `key`.
-// If the verification is successful, `err` is nil, and the content of the
-// Payload that was signed is returned. If you need more fine-grained
-// control of the verification process, manually call `Parse`, generate a
-// verifier, and call `Verify` on the parsed JWS message object.
-func Verify(buf []byte, alg jwa.SignatureAlgorithm, key interface{}) (ret []byte, err error) {
-
- verifier, err := verify.New(alg)
- if err != nil {
- return nil, fmt.Errorf("failed to create verifier: %w", err)
- }
-
- buf = bytes.TrimSpace(buf)
- if len(buf) == 0 {
- return nil, errors.New(`attempt to verify empty buffer`)
- }
-
- parts, err := SplitCompact(string(buf[:]))
- if err != nil {
- return nil, fmt.Errorf("failed extract from compact serialization format: %w", err)
- }
-
- signingInput := strings.Join(
- []string{
- parts[0],
- parts[1],
- }, ".",
- )
-
- decodedSignature, err := base64.RawURLEncoding.DecodeString(parts[2])
- if err != nil {
- return nil, fmt.Errorf("failed to decode signature: %w", err)
- }
- if err := verifier.Verify([]byte(signingInput), decodedSignature, key); err != nil {
- return nil, fmt.Errorf("failed to verify message: %w", err)
- }
-
- if decodedPayload, err := base64.RawURLEncoding.DecodeString(parts[1]); err == nil {
- return decodedPayload, nil
- }
- return nil, fmt.Errorf("failed to decode Payload: %w", err)
-}
-
-// VerifyWithJWK verifies the JWS message using the specified JWK
-func VerifyWithJWK(buf []byte, key jwk.Key) (payload []byte, err error) {
-
- keyVal, err := key.Materialize()
- if err != nil {
- return nil, fmt.Errorf("failed to materialize key: %w", err)
- }
- return Verify(buf, key.GetAlgorithm(), keyVal)
-}
-
-// VerifyWithJWKSet verifies the JWS message using JWK key set.
-// By default it will only pick up keys that have the "use" key
-// set to either "sig" or "enc", but you can override it by
-// providing a keyaccept function.
-func VerifyWithJWKSet(buf []byte, keyset *jwk.Set) (payload []byte, err error) {
-
- for _, key := range keyset.Keys {
- payload, err := VerifyWithJWK(buf, key)
- if err == nil {
- return payload, nil
- }
- }
- return nil, errors.New("failed to verify with any of the keys")
-}
-
-// ParseByte parses a JWS value serialized via compact serialization and provided as []byte.
-func ParseByte(jwsCompact []byte) (m *Message, err error) {
- return parseCompact(string(jwsCompact[:]))
-}
-
-// ParseString parses a JWS value serialized via compact serialization and provided as string.
-func ParseString(s string) (*Message, error) {
- return parseCompact(s)
-}
-
-// SplitCompact splits a JWT and returns its three parts
-// separately: Protected Headers, Payload and Signature.
-func SplitCompact(jwsCompact string) ([]string, error) {
-
- parts := strings.Split(jwsCompact, ".")
- if len(parts) < 3 {
- return nil, errors.New("failed to split compact serialization")
- }
- return parts, nil
-}
-
-// parseCompact parses a JWS value serialized via compact serialization.
-func parseCompact(str string) (m *Message, err error) {
-
- var decodedHeader, decodedPayload, decodedSignature []byte
- parts, err := SplitCompact(str)
- if err != nil {
- return nil, fmt.Errorf("invalid compact serialization format: %w", err)
- }
-
- if decodedHeader, err = base64.RawURLEncoding.DecodeString(parts[0]); err != nil {
- return nil, fmt.Errorf("failed to decode Headers: %w", err)
- }
- var hdr StandardHeaders
- if err := json.Unmarshal(decodedHeader, &hdr); err != nil {
- return nil, fmt.Errorf("failed to parse JOSE Headers: %w", err)
- }
-
- if decodedPayload, err = base64.RawURLEncoding.DecodeString(parts[1]); err != nil {
- return nil, fmt.Errorf("failed to decode Payload: %w", err)
- }
-
- if len(parts) > 2 {
- if decodedSignature, err = base64.RawURLEncoding.DecodeString(parts[2]); err != nil {
- return nil, fmt.Errorf("failed to decode Signature: %w", err)
- }
- }
-
- var msg Message
- msg.Payload = decodedPayload
- msg.Signatures = append(msg.Signatures, &Signature{
- Protected: &hdr,
- Signature: decodedSignature,
- })
- return &msg, nil
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/message.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/message.go
deleted file mode 100644
index 1366a3d7be..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/message.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package jws
-
-// PublicHeaders returns the public headers in a JWS
-func (s Signature) PublicHeaders() Headers {
- return s.Headers
-}
-
-// ProtectedHeaders returns the protected headers in a JWS
-func (s Signature) ProtectedHeaders() Headers {
- return s.Protected
-}
-
-// GetSignature returns the signature in a JWS
-func (s Signature) GetSignature() []byte {
- return s.Signature
-}
-
-// GetPayload returns the payload in a JWS
-func (m Message) GetPayload() []byte {
- return m.Payload
-}
-
-// GetSignatures returns the all signatures in a JWS
-func (m Message) GetSignatures() []*Signature {
- return m.Signatures
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/ecdsa.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/ecdsa.go
deleted file mode 100644
index db1aadec67..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/ecdsa.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package sign
-
-import (
- "crypto"
- "crypto/ecdsa"
- "crypto/rand"
- "errors"
- "fmt"
- "io"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
-)
-
-var ecdsaSignFuncs = map[jwa.SignatureAlgorithm]ecdsaSignFunc{}
-
-func init() {
- algs := map[jwa.SignatureAlgorithm]crypto.Hash{
- jwa.ES256: crypto.SHA256,
- jwa.ES384: crypto.SHA384,
- jwa.ES512: crypto.SHA512,
- }
-
- for alg, h := range algs {
- ecdsaSignFuncs[alg] = makeECDSASignFunc(h)
- }
-}
-
-func makeECDSASignFunc(hash crypto.Hash) ecdsaSignFunc {
- return ecdsaSignFunc(func(payload []byte, key *ecdsa.PrivateKey, rnd io.Reader) ([]byte, error) {
- curveBits := key.Curve.Params().BitSize
- keyBytes := curveBits / 8
- // Curve bits do not need to be a multiple of 8.
- if curveBits%8 > 0 {
- keyBytes++
- }
- h := hash.New()
- h.Write(payload)
- r, s, err := ecdsa.Sign(rnd, key, h.Sum(nil))
- if err != nil {
- return nil, fmt.Errorf("failed to sign payload using ecdsa: %w", err)
- }
-
- rBytes := r.Bytes()
- rBytesPadded := make([]byte, keyBytes)
- copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
-
- sBytes := s.Bytes()
- sBytesPadded := make([]byte, keyBytes)
- copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
-
- out := append(rBytesPadded, sBytesPadded...)
- return out, nil
- })
-}
-
-func newECDSA(alg jwa.SignatureAlgorithm) (*ECDSASigner, error) {
- signfn, ok := ecdsaSignFuncs[alg]
- if !ok {
- return nil, fmt.Errorf("unsupported algorithm while trying to create ECDSA signer: %s", alg)
- }
-
- return &ECDSASigner{
- alg: alg,
- sign: signfn,
- }, nil
-}
-
-// Algorithm returns the signer algorithm
-func (s ECDSASigner) Algorithm() jwa.SignatureAlgorithm {
- return s.alg
-}
-
-// SignWithRand signs payload with a ECDSA private key and a provided randomness
-// source (such as `rand.Reader`).
-func (s ECDSASigner) SignWithRand(payload []byte, key interface{}, r io.Reader) ([]byte, error) {
- if key == nil {
- return nil, errors.New("missing private key while signing payload")
- }
-
- privateKey, ok := key.(*ecdsa.PrivateKey)
- if !ok {
- return nil, fmt.Errorf("invalid key type %T. *ecdsa.PrivateKey is required", key)
- }
- return s.sign(payload, privateKey, r)
-}
-
-// Sign signs payload with a ECDSA private key
-func (s ECDSASigner) Sign(payload []byte, key interface{}) ([]byte, error) {
- return s.SignWithRand(payload, key, rand.Reader)
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/hmac.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/hmac.go
deleted file mode 100644
index a4fad4208b..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/hmac.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package sign
-
-import (
- "crypto/hmac"
- "crypto/sha256"
- "crypto/sha512"
- "errors"
- "fmt"
- "hash"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
-)
-
-var hmacSignFuncs = map[jwa.SignatureAlgorithm]hmacSignFunc{}
-
-func init() {
- algs := map[jwa.SignatureAlgorithm]func() hash.Hash{
- jwa.HS256: sha256.New,
- jwa.HS384: sha512.New384,
- jwa.HS512: sha512.New,
- }
-
- for alg, h := range algs {
- hmacSignFuncs[alg] = makeHMACSignFunc(h)
-
- }
-}
-
-func newHMAC(alg jwa.SignatureAlgorithm) (*HMACSigner, error) {
- signer, ok := hmacSignFuncs[alg]
- if !ok {
- return nil, fmt.Errorf(`unsupported algorithm while trying to create HMAC signer: %s`, alg)
- }
-
- return &HMACSigner{
- alg: alg,
- sign: signer,
- }, nil
-}
-
-func makeHMACSignFunc(hfunc func() hash.Hash) hmacSignFunc {
- return hmacSignFunc(func(payload []byte, key []byte) ([]byte, error) {
- h := hmac.New(hfunc, key)
- h.Write(payload)
- return h.Sum(nil), nil
- })
-}
-
-// Algorithm returns the signer algorithm
-func (s HMACSigner) Algorithm() jwa.SignatureAlgorithm {
- return s.alg
-}
-
-// Sign signs payload with a Symmetric key
-func (s HMACSigner) Sign(payload []byte, key interface{}) ([]byte, error) {
- hmackey, ok := key.([]byte)
- if !ok {
- return nil, fmt.Errorf(`invalid key type %T. []byte is required`, key)
- }
-
- if len(hmackey) == 0 {
- return nil, errors.New(`missing key while signing payload`)
- }
-
- return s.sign(payload, hmackey)
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/interface.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/interface.go
deleted file mode 100644
index 2ef2bee486..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/interface.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package sign
-
-import (
- "crypto/ecdsa"
- "crypto/rsa"
- "io"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
-)
-
-// Signer provides a common interface for supported alg signing methods
-type Signer interface {
- // Sign creates a signature for the given `payload`.
- // `key` is the key used for signing the payload, and is usually
- // the private key type associated with the signature method. For example,
- // for `jwa.RSXXX` and `jwa.PSXXX` types, you need to pass the
- // `*"crypto/rsa".PrivateKey` type.
- // Check the documentation for each signer for details
- Sign(payload []byte, key interface{}) ([]byte, error)
-
- Algorithm() jwa.SignatureAlgorithm
-}
-
-type rsaSignFunc func([]byte, *rsa.PrivateKey) ([]byte, error)
-
-// RSASigner uses crypto/rsa to sign the payloads.
-type RSASigner struct {
- alg jwa.SignatureAlgorithm
- sign rsaSignFunc
-}
-
-type ecdsaSignFunc func([]byte, *ecdsa.PrivateKey, io.Reader) ([]byte, error)
-
-// ECDSASigner uses crypto/ecdsa to sign the payloads.
-type ECDSASigner struct {
- alg jwa.SignatureAlgorithm
- sign ecdsaSignFunc
-}
-
-type hmacSignFunc func([]byte, []byte) ([]byte, error)
-
-// HMACSigner uses crypto/hmac to sign the payloads.
-type HMACSigner struct {
- alg jwa.SignatureAlgorithm
- sign hmacSignFunc
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/rsa.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/rsa.go
deleted file mode 100644
index 1e02993eb0..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/rsa.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package sign
-
-import (
- "crypto"
- "crypto/rand"
- "crypto/rsa"
- "errors"
- "fmt"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
-)
-
-var rsaSignFuncs = map[jwa.SignatureAlgorithm]rsaSignFunc{}
-
-func init() {
- algs := map[jwa.SignatureAlgorithm]struct {
- Hash crypto.Hash
- SignFunc func(crypto.Hash) rsaSignFunc
- }{
- jwa.RS256: {
- Hash: crypto.SHA256,
- SignFunc: makeSignPKCS1v15,
- },
- jwa.RS384: {
- Hash: crypto.SHA384,
- SignFunc: makeSignPKCS1v15,
- },
- jwa.RS512: {
- Hash: crypto.SHA512,
- SignFunc: makeSignPKCS1v15,
- },
- jwa.PS256: {
- Hash: crypto.SHA256,
- SignFunc: makeSignPSS,
- },
- jwa.PS384: {
- Hash: crypto.SHA384,
- SignFunc: makeSignPSS,
- },
- jwa.PS512: {
- Hash: crypto.SHA512,
- SignFunc: makeSignPSS,
- },
- }
-
- for alg, item := range algs {
- rsaSignFuncs[alg] = item.SignFunc(item.Hash)
- }
-}
-
-func makeSignPKCS1v15(hash crypto.Hash) rsaSignFunc {
- return rsaSignFunc(func(payload []byte, key *rsa.PrivateKey) ([]byte, error) {
- h := hash.New()
- h.Write(payload)
- return rsa.SignPKCS1v15(rand.Reader, key, hash, h.Sum(nil))
- })
-}
-
-func makeSignPSS(hash crypto.Hash) rsaSignFunc {
- return rsaSignFunc(func(payload []byte, key *rsa.PrivateKey) ([]byte, error) {
- h := hash.New()
- h.Write(payload)
- return rsa.SignPSS(rand.Reader, key, hash, h.Sum(nil), &rsa.PSSOptions{
- SaltLength: rsa.PSSSaltLengthAuto,
- })
- })
-}
-
-func newRSA(alg jwa.SignatureAlgorithm) (*RSASigner, error) {
- signfn, ok := rsaSignFuncs[alg]
- if !ok {
- return nil, fmt.Errorf(`unsupported algorithm while trying to create RSA signer: %s`, alg)
- }
- return &RSASigner{
- alg: alg,
- sign: signfn,
- }, nil
-}
-
-// Algorithm returns the signer algorithm
-func (s RSASigner) Algorithm() jwa.SignatureAlgorithm {
- return s.alg
-}
-
-// Sign creates a signature using crypto/rsa. key must be a non-nil instance of
-// `*"crypto/rsa".PrivateKey`.
-func (s RSASigner) Sign(payload []byte, key interface{}) ([]byte, error) {
- if key == nil {
- return nil, errors.New(`missing private key while signing payload`)
- }
- rsakey, ok := key.(*rsa.PrivateKey)
- if !ok {
- return nil, fmt.Errorf(`invalid key type %T. *rsa.PrivateKey is required`, key)
- }
-
- return s.sign(payload, rsakey)
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/sign.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/sign.go
deleted file mode 100644
index 7db7bbd69c..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/sign.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package sign
-
-import (
- "crypto/x509"
- "encoding/pem"
- "fmt"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
-)
-
-// New creates a signer that signs payloads using the given signature algorithm.
-func New(alg jwa.SignatureAlgorithm) (Signer, error) {
- switch alg {
- case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512:
- return newRSA(alg)
- case jwa.ES256, jwa.ES384, jwa.ES512:
- return newECDSA(alg)
- case jwa.HS256, jwa.HS384, jwa.HS512:
- return newHMAC(alg)
- default:
- return nil, fmt.Errorf(`unsupported signature algorithm %s`, alg)
- }
-}
-
-// GetSigningKey returns a *rsa.PrivateKey or *ecdsa.PrivateKey typically encoded in PEM blocks of type "RSA PRIVATE KEY"
-// or "EC PRIVATE KEY" for RSA and ECDSA family of algorithms.
-// For HMAC family, it return a []byte value
-func GetSigningKey(key string, alg jwa.SignatureAlgorithm) (interface{}, error) {
- switch alg {
- case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512:
- block, _ := pem.Decode([]byte(key))
- if block == nil {
- return nil, fmt.Errorf("failed to parse PEM block containing the key")
- }
-
- priv, err := x509.ParsePKCS1PrivateKey(block.Bytes)
- if err != nil {
- pkcs8priv, err2 := x509.ParsePKCS8PrivateKey(block.Bytes)
- if err2 != nil {
- return nil, fmt.Errorf("error parsing private key (%v), (%v)", err, err2)
- }
- return pkcs8priv, nil
- }
- return priv, nil
- case jwa.ES256, jwa.ES384, jwa.ES512:
- block, _ := pem.Decode([]byte(key))
- if block == nil {
- return nil, fmt.Errorf("failed to parse PEM block containing the key")
- }
-
- priv, err := x509.ParseECPrivateKey(block.Bytes)
- if err != nil {
- pkcs8priv, err2 := x509.ParsePKCS8PrivateKey(block.Bytes)
- if err2 != nil {
- return nil, fmt.Errorf("error parsing private key (%v), (%v)", err, err2)
- }
- return pkcs8priv, nil
- }
- return priv, nil
- case jwa.HS256, jwa.HS384, jwa.HS512:
- return []byte(key), nil
- default:
- return nil, fmt.Errorf("unsupported signature algorithm: %s", alg)
- }
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/ecdsa.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/ecdsa.go
deleted file mode 100644
index 0d4971dc19..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/ecdsa.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package verify
-
-import (
- "crypto"
- "crypto/ecdsa"
- "errors"
- "fmt"
- "math/big"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
-)
-
-var ecdsaVerifyFuncs = map[jwa.SignatureAlgorithm]ecdsaVerifyFunc{}
-
-func init() {
- algs := map[jwa.SignatureAlgorithm]crypto.Hash{
- jwa.ES256: crypto.SHA256,
- jwa.ES384: crypto.SHA384,
- jwa.ES512: crypto.SHA512,
- }
-
- for alg, h := range algs {
- ecdsaVerifyFuncs[alg] = makeECDSAVerifyFunc(h)
- }
-}
-
-func makeECDSAVerifyFunc(hash crypto.Hash) ecdsaVerifyFunc {
- return ecdsaVerifyFunc(func(payload []byte, signature []byte, key *ecdsa.PublicKey) error {
-
- r, s := &big.Int{}, &big.Int{}
- n := len(signature) / 2
- r.SetBytes(signature[:n])
- s.SetBytes(signature[n:])
-
- h := hash.New()
- h.Write(payload)
-
- if !ecdsa.Verify(key, h.Sum(nil), r, s) {
- return errors.New(`failed to verify signature using ecdsa`)
- }
- return nil
- })
-}
-
-func newECDSA(alg jwa.SignatureAlgorithm) (*ECDSAVerifier, error) {
- verifyfn, ok := ecdsaVerifyFuncs[alg]
- if !ok {
- return nil, fmt.Errorf(`unsupported algorithm while trying to create ECDSA verifier: %s`, alg)
- }
-
- return &ECDSAVerifier{
- verify: verifyfn,
- }, nil
-}
-
-// Verify checks whether the signature for a given input and key is correct
-func (v ECDSAVerifier) Verify(payload []byte, signature []byte, key interface{}) error {
- if key == nil {
- return errors.New(`missing public key while verifying payload`)
- }
- ecdsakey, ok := key.(*ecdsa.PublicKey)
- if !ok {
- return fmt.Errorf(`invalid key type %T. *ecdsa.PublicKey is required`, key)
- }
-
- return v.verify(payload, signature, ecdsakey)
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/hmac.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/hmac.go
deleted file mode 100644
index d8498f50f2..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/hmac.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package verify
-
-import (
- "crypto/hmac"
- "errors"
- "fmt"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
- "github.com/open-policy-agent/opa/internal/jwx/jws/sign"
-)
-
-func newHMAC(alg jwa.SignatureAlgorithm) (*HMACVerifier, error) {
-
- s, err := sign.New(alg)
- if err != nil {
- return nil, fmt.Errorf("failed to generate HMAC signer: %w", err)
- }
- return &HMACVerifier{signer: s}, nil
-}
-
-// Verify checks whether the signature for a given input and key is correct
-func (v HMACVerifier) Verify(signingInput, signature []byte, key interface{}) (err error) {
-
- expected, err := v.signer.Sign(signingInput, key)
- if err != nil {
- return fmt.Errorf("failed to generated signature: %w", err)
- }
-
- if !hmac.Equal(signature, expected) {
- return errors.New("failed to match hmac signature")
- }
- return nil
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/interface.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/interface.go
deleted file mode 100644
index f5beb69741..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/interface.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package verify
-
-import (
- "crypto/ecdsa"
- "crypto/rsa"
-
- "github.com/open-policy-agent/opa/internal/jwx/jws/sign"
-)
-
-// Verifier provides a common interface for supported alg verification methods
-type Verifier interface {
- // Verify checks whether the payload and signature are valid for
- // the given key.
- // `key` is the key used for verifying the payload, and is usually
- // the public key associated with the signature method. For example,
- // for `jwa.RSXXX` and `jwa.PSXXX` types, you need to pass the
- // `*"crypto/rsa".PublicKey` type.
- // Check the documentation for each verifier for details
- Verify(payload []byte, signature []byte, key interface{}) error
-}
-
-type rsaVerifyFunc func([]byte, []byte, *rsa.PublicKey) error
-
-// RSAVerifier implements the Verifier interface
-type RSAVerifier struct {
- verify rsaVerifyFunc
-}
-
-type ecdsaVerifyFunc func([]byte, []byte, *ecdsa.PublicKey) error
-
-// ECDSAVerifier implements the Verifier interface
-type ECDSAVerifier struct {
- verify ecdsaVerifyFunc
-}
-
-// HMACVerifier implements the Verifier interface
-type HMACVerifier struct {
- signer sign.Signer
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/rsa.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/rsa.go
deleted file mode 100644
index edc560dfa6..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/rsa.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package verify
-
-import (
- "crypto"
- "crypto/rsa"
- "errors"
- "fmt"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
-)
-
-var rsaVerifyFuncs = map[jwa.SignatureAlgorithm]rsaVerifyFunc{}
-
-func init() {
- algs := map[jwa.SignatureAlgorithm]struct {
- Hash crypto.Hash
- VerifyFunc func(crypto.Hash) rsaVerifyFunc
- }{
- jwa.RS256: {
- Hash: crypto.SHA256,
- VerifyFunc: makeVerifyPKCS1v15,
- },
- jwa.RS384: {
- Hash: crypto.SHA384,
- VerifyFunc: makeVerifyPKCS1v15,
- },
- jwa.RS512: {
- Hash: crypto.SHA512,
- VerifyFunc: makeVerifyPKCS1v15,
- },
- jwa.PS256: {
- Hash: crypto.SHA256,
- VerifyFunc: makeVerifyPSS,
- },
- jwa.PS384: {
- Hash: crypto.SHA384,
- VerifyFunc: makeVerifyPSS,
- },
- jwa.PS512: {
- Hash: crypto.SHA512,
- VerifyFunc: makeVerifyPSS,
- },
- }
-
- for alg, item := range algs {
- rsaVerifyFuncs[alg] = item.VerifyFunc(item.Hash)
- }
-}
-
-func makeVerifyPKCS1v15(hash crypto.Hash) rsaVerifyFunc {
- return rsaVerifyFunc(func(payload, signature []byte, key *rsa.PublicKey) error {
- h := hash.New()
- h.Write(payload)
- return rsa.VerifyPKCS1v15(key, hash, h.Sum(nil), signature)
- })
-}
-
-func makeVerifyPSS(hash crypto.Hash) rsaVerifyFunc {
- return rsaVerifyFunc(func(payload, signature []byte, key *rsa.PublicKey) error {
- h := hash.New()
- h.Write(payload)
- return rsa.VerifyPSS(key, hash, h.Sum(nil), signature, nil)
- })
-}
-
-func newRSA(alg jwa.SignatureAlgorithm) (*RSAVerifier, error) {
- verifyfn, ok := rsaVerifyFuncs[alg]
- if !ok {
- return nil, fmt.Errorf(`unsupported algorithm while trying to create RSA verifier: %s`, alg)
- }
-
- return &RSAVerifier{
- verify: verifyfn,
- }, nil
-}
-
-// Verify checks if a JWS is valid.
-func (v RSAVerifier) Verify(payload, signature []byte, key interface{}) error {
- if key == nil {
- return errors.New(`missing public key while verifying payload`)
- }
- rsaKey, ok := key.(*rsa.PublicKey)
- if !ok {
- return fmt.Errorf(`invalid key type %T. *rsa.PublicKey is required`, key)
- }
-
- return v.verify(payload, signature, rsaKey)
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/verify.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/verify.go
deleted file mode 100644
index 05720a64e0..0000000000
--- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/verify.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package verify
-
-import (
- "crypto/ecdsa"
- "crypto/rsa"
- "crypto/x509"
- "encoding/pem"
- "fmt"
-
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
-)
-
-// New creates a new JWS verifier using the specified algorithm
-// and the public key
-func New(alg jwa.SignatureAlgorithm) (Verifier, error) {
- switch alg {
- case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512:
- return newRSA(alg)
- case jwa.ES256, jwa.ES384, jwa.ES512:
- return newECDSA(alg)
- case jwa.HS256, jwa.HS384, jwa.HS512:
- return newHMAC(alg)
- default:
- return nil, fmt.Errorf(`unsupported signature algorithm: %s`, alg)
- }
-}
-
-// GetSigningKey returns a *rsa.PublicKey or *ecdsa.PublicKey typically encoded in PEM blocks of type "PUBLIC KEY",
-// for RSA and ECDSA family of algorithms.
-// For HMAC family, it return a []byte value
-func GetSigningKey(key string, alg jwa.SignatureAlgorithm) (interface{}, error) {
- switch alg {
- case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512, jwa.ES256, jwa.ES384, jwa.ES512:
- block, _ := pem.Decode([]byte(key))
- if block == nil {
- return nil, fmt.Errorf("failed to parse PEM block containing the key")
- }
-
- pub, err := x509.ParsePKIXPublicKey(block.Bytes)
- if err != nil {
- return nil, err
- }
-
- switch pub := pub.(type) {
- case *rsa.PublicKey, *ecdsa.PublicKey:
- return pub, nil
- default:
- return nil, fmt.Errorf("invalid key type %T", pub)
- }
- case jwa.HS256, jwa.HS384, jwa.HS512:
- return []byte(key), nil
- default:
- return nil, fmt.Errorf("unsupported signature algorithm: %s", alg)
- }
-}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/merge/merge.go b/vendor/github.com/open-policy-agent/opa/internal/merge/merge.go
index 16f39350be..ba1a09c329 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/merge/merge.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/merge/merge.go
@@ -8,7 +8,7 @@ package merge
// InterfaceMaps returns the result of merging a and b. If a and b cannot be
// merged because of conflicting key-value pairs, ok is false.
-func InterfaceMaps(a map[string]interface{}, b map[string]interface{}) (map[string]interface{}, bool) {
+func InterfaceMaps(a map[string]any, b map[string]any) (map[string]any, bool) {
if a == nil {
return b, true
@@ -21,7 +21,7 @@ func InterfaceMaps(a map[string]interface{}, b map[string]interface{}) (map[stri
return merge(a, b), true
}
-func merge(a, b map[string]interface{}) map[string]interface{} {
+func merge(a, b map[string]any) map[string]any {
for k := range b {
@@ -32,8 +32,8 @@ func merge(a, b map[string]interface{}) map[string]interface{} {
continue
}
- existObj := exist.(map[string]interface{})
- addObj := add.(map[string]interface{})
+ existObj := exist.(map[string]any)
+ addObj := add.(map[string]any)
a[k] = merge(existObj, addObj)
}
@@ -41,7 +41,7 @@ func merge(a, b map[string]interface{}) map[string]interface{} {
return a
}
-func hasConflicts(a, b map[string]interface{}) bool {
+func hasConflicts(a, b map[string]any) bool {
for k := range b {
add := b[k]
@@ -50,8 +50,8 @@ func hasConflicts(a, b map[string]interface{}) bool {
continue
}
- existObj, existOk := exist.(map[string]interface{})
- addObj, addOk := add.(map[string]interface{})
+ existObj, existOk := exist.(map[string]any)
+ addObj, addOk := add.(map[string]any)
if !existOk || !addOk {
return true
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go b/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go
index b75d26ddab..8d59158717 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go
@@ -11,10 +11,10 @@ import (
"io"
"sort"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/ast/location"
"github.com/open-policy-agent/opa/internal/debug"
- "github.com/open-policy-agent/opa/ir"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/ast/location"
+ "github.com/open-policy-agent/opa/v1/ir"
)
// QuerySet represents the input to the planner.
@@ -51,10 +51,10 @@ type Planner struct {
// debugf prepends the planner location. We're passing callstack depth 2 because
// it should still log the file location of p.debugf.
-func (p *Planner) debugf(format string, args ...interface{}) {
+func (p *Planner) debugf(format string, args ...any) {
var msg string
if p.loc != nil {
- msg = fmt.Sprintf("%s: "+format, append([]interface{}{p.loc}, args...)...)
+ msg = fmt.Sprintf("%s: "+format, append([]any{p.loc}, args...)...)
} else {
msg = fmt.Sprintf(format, args...)
}
@@ -211,23 +211,28 @@ func (p *Planner) planRules(rules []*ast.Rule) (string, error) {
// Set the location to the rule head.
p.loc = rules[0].Head.Loc()
+ pcount := p.funcs.argVars()
+ params := make([]ir.Local, 0, pcount+len(rules[0].Head.Args))
+ for range pcount {
+ params = append(params, p.newLocal())
+ }
// Create function definition for rules.
fn := &ir.Func{
- Name: fmt.Sprintf("g%d.%s", p.funcs.gen(), path),
- Params: []ir.Local{
- p.newLocal(), // input document
- p.newLocal(), // data document
- },
+ Name: fmt.Sprintf("g%d.%s", p.funcs.gen(), path),
+ Params: params,
Return: p.newLocal(),
Path: append([]string{fmt.Sprintf("g%d", p.funcs.gen())}, pathPieces...),
}
// Initialize parameters for functions.
- for i := 0; i < len(rules[0].Head.Args); i++ {
+ for range len(rules[0].Head.Args) {
fn.Params = append(fn.Params, p.newLocal())
}
- params := fn.Params[2:]
+ // only those added as formal parameters:
+ // f(x, y) is planned as f(data, input, x, y)
+ // pcount > 2 means there are vars passed along through with replacements by variables
+ params = fn.Params[pcount:]
// Initialize return value for partial set/object rules. Complete document
// rules assign directly to `fn.Return`.
@@ -301,10 +306,11 @@ func (p *Planner) planRules(rules []*ast.Rule) (string, error) {
// Setup planner for block.
p.lnext = lnext
- p.vars = newVarstack(map[ast.Var]ir.Local{
- ast.InputRootDocument.Value.(ast.Var): fn.Params[0],
- ast.DefaultRootDocument.Value.(ast.Var): fn.Params[1],
- })
+ vs := make(map[ast.Var]ir.Local, p.funcs.argVars())
+ for i, v := range p.funcs.vars() {
+ vs[v] = fn.Params[i]
+ }
+ p.vars = newVarstack(vs)
curr := &ir.Block{}
*blocks = append(*blocks, curr)
@@ -385,7 +391,7 @@ func (p *Planner) planRules(rules []*ast.Rule) (string, error) {
return nil
})
default:
- return fmt.Errorf("illegal rule kind")
+ return errors.New("illegal rule kind")
}
})
})
@@ -497,7 +503,6 @@ func (p *Planner) planDotOr(obj ir.Local, key ir.Operand, or stmtFactory, iter p
func (p *Planner) planNestedObjects(obj ir.Local, ref ast.Ref, iter planLocalIter) error {
if len(ref) == 0 {
- //return fmt.Errorf("nested object construction didn't create object")
return iter(obj)
}
@@ -673,13 +678,17 @@ func (p *Planner) planWith(e *ast.Expr, iter planiter) error {
values := make([]*ast.Term, 0, len(e.With)) // NOTE(sr): we could be overallocating if there are builtin replacements
targets := make([]ast.Ref, 0, len(e.With))
+ vars := []ast.Var{}
mocks := frame{}
for _, w := range e.With {
v := w.Target.Value.(ast.Ref)
switch {
- case p.isFunction(v): // nothing to do
+ case p.isFunctionOrBuiltin(v): // track var values
+ if wvar, ok := w.Value.Value.(ast.Var); ok {
+ vars = append(vars, wvar)
+ }
case ast.DefaultRootDocument.Equal(v[0]) ||
ast.InputRootDocument.Equal(v[0]):
@@ -736,7 +745,7 @@ func (p *Planner) planWith(e *ast.Expr, iter planiter) error {
// planning of this expression (transitively).
shadowing := p.dataRefsShadowRuletrie(dataRefs) || len(mocks) > 0
if shadowing {
- p.funcs.Push(map[string]string{})
+ p.funcs.Push(map[string]string{}, vars)
for _, ref := range dataRefs {
p.rules.Push(ref)
}
@@ -757,7 +766,7 @@ func (p *Planner) planWith(e *ast.Expr, iter planiter) error {
p.mocks.PushFrame(mocks)
if shadowing {
- p.funcs.Push(map[string]string{})
+ p.funcs.Push(map[string]string{}, vars)
for _, ref := range dataRefs {
p.rules.Push(ref)
}
@@ -991,8 +1000,16 @@ func (p *Planner) planExprCall(e *ast.Expr, iter planiter) error {
op := e.Operator()
if replacement := p.mocks.Lookup(operator); replacement != nil {
- switch r := replacement.Value.(type) {
- case ast.Ref:
+ if _, ok := replacement.Value.(ast.Var); ok {
+ var arity int
+ if node := p.rules.Lookup(op); node != nil {
+ arity = node.Arity() // NB(sr): We don't need to plan what isn't called, only lookup arity
+ } else if bi, ok := p.decls[operator]; ok {
+ arity = bi.Decl.Arity()
+ }
+ return p.planExprCallValue(replacement, arity, operands, iter)
+ }
+ if r, ok := replacement.Value.(ast.Ref); ok {
if !r.HasPrefix(ast.DefaultRootRef) && !r.HasPrefix(ast.InputRootRef) {
// replacement is builtin
operator = r.String()
@@ -1020,7 +1037,7 @@ func (p *Planner) planExprCall(e *ast.Expr, iter planiter) error {
// replacement is a value, or ref
if bi, ok := p.decls[operator]; ok {
- return p.planExprCallValue(replacement, len(bi.Decl.FuncArgs().Args), operands, iter)
+ return p.planExprCallValue(replacement, bi.Decl.Arity(), operands, iter)
}
if node := p.rules.Lookup(op); node != nil {
return p.planExprCallValue(replacement, node.Arity(), operands, iter)
@@ -1037,7 +1054,7 @@ func (p *Planner) planExprCall(e *ast.Expr, iter planiter) error {
args = p.defaultOperands()
} else if decl, ok := p.decls[operator]; ok {
relation = decl.Relation
- arity = len(decl.Decl.Args())
+ arity = decl.Decl.Arity()
void = decl.Decl.Result() == nil
name = operator
p.externs[operator] = decl
@@ -1147,7 +1164,7 @@ func (p *Planner) planExprCallFunc(name string, arity int, void bool, operands [
})
default:
- return fmt.Errorf("impossible replacement, arity mismatch")
+ return errors.New("impossible replacement, arity mismatch")
}
}
@@ -1173,7 +1190,7 @@ func (p *Planner) planExprCallValue(value *ast.Term, arity int, operands []*ast.
})
})
default:
- return fmt.Errorf("impossible replacement, arity mismatch")
+ return errors.New("impossible replacement, arity mismatch")
}
}
@@ -1519,7 +1536,7 @@ func (p *Planner) planValue(t ast.Value, loc *ast.Location, iter planiter) error
p.loc = loc
return p.planObjectComprehension(v, iter)
default:
- return fmt.Errorf("%v term not implemented", ast.TypeName(v))
+ return fmt.Errorf("%v term not implemented", ast.ValueName(v))
}
}
@@ -1564,9 +1581,7 @@ func (p *Planner) planString(str ast.String, iter planiter) error {
}
func (p *Planner) planVar(v ast.Var, iter planiter) error {
- p.ltarget = op(p.vars.GetOrElse(v, func() ir.Local {
- return p.newLocal()
- }))
+ p.ltarget = op(p.vars.GetOrElse(v, p.newLocal))
return iter()
}
@@ -1750,7 +1765,7 @@ func (p *Planner) planRef(ref ast.Ref, iter planiter) error {
head, ok := ref[0].Value.(ast.Var)
if !ok {
- return fmt.Errorf("illegal ref: non-var head")
+ return errors.New("illegal ref: non-var head")
}
if head.Compare(ast.DefaultRootDocument.Value) == 0 {
@@ -1767,7 +1782,7 @@ func (p *Planner) planRef(ref ast.Ref, iter planiter) error {
p.ltarget, ok = p.vars.GetOp(head)
if !ok {
- return fmt.Errorf("illegal ref: unsafe head")
+ return errors.New("illegal ref: unsafe head")
}
return p.planRefRec(ref, 1, iter)
@@ -1924,12 +1939,15 @@ func (p *Planner) planRefData(virtual *ruletrie, base *baseptr, ref ast.Ref, ind
if err != nil {
return err
}
-
- p.appendStmt(&ir.CallStmt{
+ call := ir.CallStmt{
Func: funcName,
- Args: p.defaultOperands(),
+ Args: make([]ir.Operand, 0, p.funcs.argVars()),
Result: p.ltarget.Value.(ir.Local),
- })
+ }
+ for _, v := range p.funcs.vars() {
+ call.Args = append(call.Args, p.vars.GetOpOrEmpty(v))
+ }
+ p.appendStmt(&call)
return p.planRefRec(ref, index+1, iter)
}
@@ -2365,6 +2383,10 @@ func rewrittenVar(vars map[ast.Var]ast.Var, k ast.Var) ast.Var {
return rw
}
+func dont() ([][]*ast.Rule, []ir.Operand, int, bool) {
+ return nil, nil, 0, false
+}
+
// optimizeLookup returns a set of rulesets and required statements planning
// the locals (strings) needed with the used local variables, and the index
// into ref's parth that is still to be planned; if the passed ref's vars
@@ -2381,9 +2403,6 @@ func rewrittenVar(vars map[ast.Var]ast.Var, k ast.Var) ast.Var {
// var actually matched_ -- so we don't know which subtree to evaluate
// with the results.
func (p *Planner) optimizeLookup(t *ruletrie, ref ast.Ref) ([][]*ast.Rule, []ir.Operand, int, bool) {
- dont := func() ([][]*ast.Rule, []ir.Operand, int, bool) {
- return nil, nil, 0, false
- }
if t == nil {
p.debugf("no optimization of %s: trie is nil", ref)
return dont()
@@ -2411,6 +2430,10 @@ outer:
opt = true
// take all children, they might match
for _, node := range nodes {
+ if nr := node.Rules(); len(nr) > 0 {
+ p.debugf("no optimization of %s: node with rules (%v)", ref, refsOfRules(nr))
+ return dont()
+ }
for _, child := range node.Children() {
if node := node.Get(child); node != nil {
nextNodes = append(nextNodes, node)
@@ -2418,8 +2441,12 @@ outer:
}
}
case ast.String:
- // take all children that either match or have a var key
+ // take all children that either match or have a var key // TODO(sr): Where's the code for the second part, having a var key?
for _, node := range nodes {
+ if nr := node.Rules(); len(nr) > 0 {
+ p.debugf("no optimization of %s: node with rules (%v)", ref, refsOfRules(nr))
+ return dont()
+ }
if node := node.Get(r); node != nil {
nextNodes = append(nextNodes, node)
}
@@ -2438,10 +2465,20 @@ outer:
// let us break, too.
all := 0
for _, node := range nodes {
- all += node.ChildrenCount()
+ if i < len(ref)-1 {
+ // Look ahead one term to only count those children relevant to your planned ref.
+ switch ref[i+1].Value.(type) {
+ case ast.Var:
+ all += node.ChildrenCount()
+ default:
+ if relChildren := node.Get(ref[i+1].Value); relChildren != nil {
+ all++
+ }
+ }
+ }
}
if all == 0 {
- p.debugf("ref %s: all nodes have 0 children, break", ref[0:index+1])
+ p.debugf("ref %s: all nodes have 0 relevant children, break", ref[0:index+1])
break
}
@@ -2534,19 +2571,30 @@ func (p *Planner) unseenVars(t *ast.Term) bool {
}
func (p *Planner) defaultOperands() []ir.Operand {
- return []ir.Operand{
- p.vars.GetOpOrEmpty(ast.InputRootDocument.Value.(ast.Var)),
- p.vars.GetOpOrEmpty(ast.DefaultRootDocument.Value.(ast.Var)),
+ pcount := p.funcs.argVars()
+ operands := make([]ir.Operand, pcount)
+ for i, v := range p.funcs.vars() {
+ operands[i] = p.vars.GetOpOrEmpty(v)
}
+ return operands
}
-func (p *Planner) isFunction(r ast.Ref) bool {
+func (p *Planner) isFunctionOrBuiltin(r ast.Ref) bool {
if node := p.rules.Lookup(r); node != nil {
return node.Arity() > 0
}
- return false
+ _, ok := p.decls[r.String()]
+ return ok
}
func op(v ir.Val) ir.Operand {
return ir.Operand{Value: v}
}
+
+func refsOfRules(rs []*ast.Rule) []string {
+ refs := make([]string, len(rs))
+ for i := range rs {
+ refs[i] = rs[i].Head.Ref().String()
+ }
+ return refs
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go b/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go
index f5d6f3fc6c..9f3d115293 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go
@@ -4,7 +4,7 @@ import (
"fmt"
"sort"
- "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/v1/ast"
)
// funcstack implements a simple map structure used to keep track of virtual
@@ -20,20 +20,44 @@ type funcstack struct {
}
type taggedPairs struct {
- pairs map[string]string
- gen int
+ pairs map[string]string
+ vars []ast.Var
+ vcount int
+ gen int
}
func newFuncstack() *funcstack {
return &funcstack{
- stack: []taggedPairs{{pairs: map[string]string{}, gen: 0}},
- next: 1}
+ stack: []taggedPairs{
+ {
+ pairs: map[string]string{},
+ gen: 0,
+ vars: []ast.Var{
+ ast.InputRootDocument.Value.(ast.Var),
+ ast.DefaultRootDocument.Value.(ast.Var),
+ },
+ vcount: 2,
+ },
+ },
+ next: 1}
}
func (p funcstack) last() taggedPairs {
return p.stack[len(p.stack)-1]
}
+func (p funcstack) argVars() int {
+ return p.last().vcount
+}
+
+func (p funcstack) vars() []ast.Var {
+ ret := make([]ast.Var, 0, p.last().vcount)
+ for i := range p.stack {
+ ret = append(ret, p.stack[i].vars...)
+ }
+ return ret
+}
+
func (p funcstack) Add(key, value string) {
p.last().pairs[key] = value
}
@@ -43,8 +67,13 @@ func (p funcstack) Get(key string) (string, bool) {
return value, ok
}
-func (p *funcstack) Push(funcs map[string]string) {
- p.stack = append(p.stack, taggedPairs{pairs: funcs, gen: p.next})
+func (p *funcstack) Push(funcs map[string]string, vars []ast.Var) {
+ p.stack = append(p.stack, taggedPairs{
+ pairs: funcs,
+ gen: p.next,
+ vars: vars,
+ vcount: p.last().vcount + len(vars),
+ })
p.next++
}
@@ -111,7 +140,7 @@ func (t *ruletrie) Rules() []*ast.Rule {
func (t *ruletrie) Push(key ast.Ref) {
node := t
- for i := 0; i < len(key)-1; i++ {
+ for i := range len(key) - 1 {
node = node.Get(key[i].Value)
if node == nil {
return
@@ -123,7 +152,7 @@ func (t *ruletrie) Push(key ast.Ref) {
func (t *ruletrie) Pop(key ast.Ref) {
node := t
- for i := 0; i < len(key)-1; i++ {
+ for i := range len(key) - 1 {
node = node.Get(key[i].Value)
if node == nil {
return
diff --git a/vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go b/vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go
index dccff1b5c1..0df6bcd8b2 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go
@@ -5,8 +5,8 @@
package planner
import (
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/ir"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/ir"
)
type varstack []map[ast.Var]ir.Local
diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/compare.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/compare.go
index 1d0f25f8c2..e2514423b7 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/compare.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/compare.go
@@ -1,6 +1,6 @@
package crypto
-import "fmt"
+import "errors"
// ConstantTimeByteCompare is a constant-time byte comparison of x and y. This function performs an absolute comparison
// if the two byte slices assuming they represent a big-endian number.
@@ -11,12 +11,12 @@ import "fmt"
// +1 if x > y
func ConstantTimeByteCompare(x, y []byte) (int, error) {
if len(x) != len(y) {
- return 0, fmt.Errorf("slice lengths do not match")
+ return 0, errors.New("slice lengths do not match")
}
xLarger, yLarger := 0, 0
- for i := 0; i < len(x); i++ {
+ for i := range x {
xByte, yByte := int(x[i]), int(y[i])
x := ((yByte - xByte) >> 8) & 1
diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/ecc.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/ecc.go
index 758c73fcb3..12679a15be 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/ecc.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/ecc.go
@@ -7,6 +7,7 @@ import (
"crypto/hmac"
"encoding/asn1"
"encoding/binary"
+ "errors"
"fmt"
"hash"
"math"
@@ -82,7 +83,7 @@ func HMACKeyDerivation(hash func() hash.Hash, bitLen int, key []byte, label, con
// verify the requested bit length is not larger then the length encoding size
if int64(bitLen) > 0x7FFFFFFF {
- return nil, fmt.Errorf("bitLen is greater than 32-bits")
+ return nil, errors.New("bitLen is greater than 32-bits")
}
fixedInput := bytes.NewBuffer(nil)
diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go
index 179b5b5d5e..55e587e9f5 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go
@@ -11,7 +11,7 @@ import (
"time"
"github.com/open-policy-agent/opa/internal/version"
- "github.com/open-policy-agent/opa/logging"
+ "github.com/open-policy-agent/opa/v1/logging"
)
// Values taken from
diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/kms.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/kms.go
index 77c0bc9349..6dfb06a496 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/kms.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/kms.go
@@ -10,7 +10,7 @@ import (
"time"
"github.com/open-policy-agent/opa/internal/version"
- "github.com/open-policy-agent/opa/logging"
+ "github.com/open-policy-agent/opa/v1/logging"
)
// Values taken from
diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go
index bfb780754b..07aa568fa2 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go
@@ -8,18 +8,19 @@ import (
"bytes"
"crypto/hmac"
"crypto/sha256"
+ "encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"net/url"
- "sort"
"strings"
"time"
v4 "github.com/open-policy-agent/opa/internal/providers/aws/v4"
- "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/util"
)
func stringFromTerm(t *ast.Term) string {
@@ -67,19 +68,6 @@ func sha256MAC(message string, key []byte) []byte {
return mac.Sum(nil)
}
-func sortKeys(strMap map[string][]string) []string {
- keys := make([]string, len(strMap))
-
- i := 0
- for k := range strMap {
- keys[i] = k
- i++
- }
- sort.Strings(keys)
-
- return keys
-}
-
// SignRequest modifies an http.Request to include an AWS V4 signature based on the provided credentials.
func SignRequest(req *http.Request, service string, creds Credentials, theTime time.Time, sigVersion string) error {
// General ref. https://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html
@@ -168,7 +156,7 @@ func SignV4(headers map[string][]string, method string, theURL *url.URL, body []
canonicalReq += theURL.RawQuery + "\n" // RAW Query String
// include the values for the signed headers
- orderedKeys := sortKeys(headersToSign)
+ orderedKeys := util.KeysSorted(headersToSign)
for _, k := range orderedKeys {
canonicalReq += k + ":" + strings.Join(headersToSign[k], ",") + "\n"
}
@@ -202,7 +190,7 @@ func SignV4(headers map[string][]string, method string, theURL *url.URL, body []
authHeader := "AWS4-HMAC-SHA256 Credential=" + awsCreds.AccessKey + "/" + dateNow
authHeader += "/" + awsCreds.RegionName + "/" + service + "/aws4_request,"
authHeader += "SignedHeaders=" + headerList + ","
- authHeader += "Signature=" + fmt.Sprintf("%x", signature)
+ authHeader += "Signature=" + hex.EncodeToString(signature)
return authHeader, awsHeaders
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go
index 929f2006e7..8f6d760e82 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go
@@ -9,7 +9,7 @@ import (
"crypto/rand"
"crypto/sha256"
"encoding/hex"
- "fmt"
+ "errors"
"hash"
"io"
"math/big"
@@ -107,7 +107,7 @@ func deriveKeyFromAccessKeyPair(accessKey, secretKey string) (*ecdsa.PrivateKey,
counter++
if counter > 0xFF {
- return nil, fmt.Errorf("exhausted single byte external counter")
+ return nil, errors.New("exhausted single byte external counter")
}
}
d = d.Add(d, one)
@@ -146,7 +146,7 @@ func retrievePrivateKey(symmetric Credentials) (v4aCredentials, error) {
privateKey, err := deriveKeyFromAccessKeyPair(symmetric.AccessKey, symmetric.SecretKey)
if err != nil {
- return v4aCredentials{}, fmt.Errorf("failed to derive asymmetric key from credentials")
+ return v4aCredentials{}, errors.New("failed to derive asymmetric key from credentials")
}
creds := v4aCredentials{
@@ -216,7 +216,7 @@ func (s *httpSigner) Build() (signedRequest, error) {
signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength)
- rawQuery := strings.Replace(query.Encode(), "+", "%20", -1)
+ rawQuery := strings.ReplaceAll(query.Encode(), "+", "%20")
canonicalURI := v4Internal.GetURIPath(req.URL)
@@ -280,7 +280,7 @@ func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature
return parts.String()
}
-func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) {
+func (*httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) {
signed = make(http.Header)
const hostHeader = "host"
@@ -314,7 +314,7 @@ func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, he
var canonicalHeaders strings.Builder
n := len(headers)
const colon = ':'
- for i := 0; i < n; i++ {
+ for i := range n {
if headers[i] == hostHeader {
canonicalHeaders.WriteString(hostHeader)
canonicalHeaders.WriteRune(colon)
diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go
index e033da7460..d43339c961 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go
@@ -5,7 +5,7 @@ import (
"io"
"net/http"
- "github.com/open-policy-agent/opa/logging"
+ "github.com/open-policy-agent/opa/v1/logging"
)
// DoRequestWithClient is a convenience function to get the body of an HTTP response with
@@ -18,7 +18,7 @@ func DoRequestWithClient(req *http.Request, client *http.Client, desc string, lo
}
defer resp.Body.Close()
- logger.WithFields(map[string]interface{}{
+ logger.WithFields(map[string]any{
"url": req.URL.String(),
"status": resp.Status,
"headers": resp.Header,
diff --git a/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go b/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go
index 6e84df4b08..653794b0a9 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go
@@ -9,17 +9,14 @@ import (
"errors"
"strings"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/storage"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/storage"
)
// ParseDataPath returns a ref from the slash separated path s rooted at data.
// All path segments are treated as identifier strings.
func ParseDataPath(s string) (ast.Ref, error) {
-
- s = "/" + strings.TrimPrefix(s, "/")
-
- path, ok := storage.ParsePath(s)
+ path, ok := storage.ParsePath("/" + strings.TrimPrefix(s, "/"))
if !ok {
return nil, errors.New("invalid path")
}
@@ -29,7 +26,7 @@ func ParseDataPath(s string) (ast.Ref, error) {
// ArrayPath will take an ast.Array and build an ast.Ref using the ast.Terms in the Array
func ArrayPath(a *ast.Array) ast.Ref {
- var ref ast.Ref
+ ref := make(ast.Ref, 0, a.Len())
a.Foreach(func(term *ast.Term) {
ref = append(ref, term)
diff --git a/vendor/github.com/open-policy-agent/opa/internal/rego/opa/engine.go b/vendor/github.com/open-policy-agent/opa/internal/rego/opa/engine.go
index 36ee844504..7defdf788c 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/rego/opa/engine.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/rego/opa/engine.go
@@ -36,10 +36,10 @@ type EvalEngine interface {
Init() (EvalEngine, error)
Entrypoints(context.Context) (map[string]int32, error)
WithPolicyBytes([]byte) EvalEngine
- WithDataJSON(interface{}) EvalEngine
+ WithDataJSON(any) EvalEngine
Eval(context.Context, EvalOpts) (*Result, error)
- SetData(context.Context, interface{}) error
- SetDataPath(context.Context, []string, interface{}) error
+ SetData(context.Context, any) error
+ SetDataPath(context.Context, []string, any) error
RemoveDataPath(context.Context, []string) error
Close()
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/rego/opa/options.go b/vendor/github.com/open-policy-agent/opa/internal/rego/opa/options.go
index b58a05ee8e..97aa41bf0e 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/rego/opa/options.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/rego/opa/options.go
@@ -4,11 +4,11 @@ import (
"io"
"time"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/metrics"
- "github.com/open-policy-agent/opa/topdown/builtins"
- "github.com/open-policy-agent/opa/topdown/cache"
- "github.com/open-policy-agent/opa/topdown/print"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/metrics"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/topdown/cache"
+ "github.com/open-policy-agent/opa/v1/topdown/print"
)
// Result holds the evaluation result.
@@ -18,7 +18,7 @@ type Result struct {
// EvalOpts define options for performing an evaluation.
type EvalOpts struct {
- Input *interface{}
+ Input *any
Metrics metrics.Metrics
Entrypoint int32
Time time.Time
diff --git a/vendor/github.com/open-policy-agent/opa/internal/report/report.go b/vendor/github.com/open-policy-agent/opa/internal/report/report.go
index 145d0a9465..bc71d66a3c 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/report/report.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/report/report.go
@@ -6,43 +6,46 @@
package report
import (
+ "cmp"
"context"
"encoding/json"
+ "errors"
"fmt"
"net/http"
"os"
"runtime"
"strconv"
"strings"
- "sync"
"time"
- "github.com/open-policy-agent/opa/keys"
- "github.com/open-policy-agent/opa/logging"
+ "github.com/open-policy-agent/opa/internal/semver"
+ "github.com/open-policy-agent/opa/v1/keys"
+ "github.com/open-policy-agent/opa/v1/logging"
+ "github.com/open-policy-agent/opa/v1/version"
- "github.com/open-policy-agent/opa/plugins/rest"
- "github.com/open-policy-agent/opa/util"
- "github.com/open-policy-agent/opa/version"
+ "github.com/open-policy-agent/opa/v1/plugins/rest"
+ "github.com/open-policy-agent/opa/v1/util"
)
-// ExternalServiceURL is the base HTTP URL for a telemetry service.
-// If not otherwise specified it will use the hard coded default.
+// ExternalServiceURL is the base HTTP URL for a github instance used
+// to query for more recent version.
+// If not otherwise specified, it will use the hard-coded default, api.github.com.
+// GHRepo is the repository to use, and defaults to "open-policy-agent/opa"
//
// Override at build time via:
//
// -ldflags "-X github.com/open-policy-agent/opa/internal/report.ExternalServiceURL="
+// -ldflags "-X github.com/open-policy-agent/opa/internal/report.GHRepo="
//
-// This will be overridden if the OPA_TELEMETRY_SERVICE_URL environment variable
+// ExternalServiceURL will be overridden if the OPA_TELEMETRY_SERVICE_URL environment variable
// is provided.
-var ExternalServiceURL = "https://telemetry.openpolicyagent.org"
+var ExternalServiceURL = "https://api.github.com"
+var GHRepo = "open-policy-agent/opa"
// Reporter reports information such as the version, heap usage about the running OPA instance to an external service
-type Reporter struct {
- body map[string]any
- client rest.Client
-
- gatherers map[string]Gatherer
- gatherersMtx sync.Mutex
+type Reporter interface {
+ SendReport(ctx context.Context) (*DataResponse, error)
+ RegisterGatherer(key string, f Gatherer)
}
// Gatherer represents a mechanism to inject additional data in the telemetry report
@@ -50,7 +53,7 @@ type Gatherer func(ctx context.Context) (any, error)
// DataResponse represents the data returned by the external service
type DataResponse struct {
- Latest ReleaseDetails `json:"latest,omitempty"`
+ Latest ReleaseDetails `json:"latest"`
}
// ReleaseDetails holds information about the latest OPA release
@@ -66,24 +69,25 @@ type Options struct {
Logger logging.Logger
}
+type GHVersionCollector struct {
+ client rest.Client
+}
+
+type GHResponse struct {
+ TagName string `json:"tag_name,omitempty"` // latest OPA release tag
+ ReleaseNotes string `json:"html_url,omitempty"` // link to the OPA release notes
+ Download string `json:"assets_url,omitempty"` // link to download the OPA release
+}
+
// New returns an instance of the Reporter
-func New(id string, opts Options) (*Reporter, error) {
- r := Reporter{
- gatherers: map[string]Gatherer{},
- }
- r.body = map[string]any{
- "id": id,
- "version": version.Version,
- }
+func New(opts Options) (Reporter, error) {
+ r := GHVersionCollector{}
- url := os.Getenv("OPA_TELEMETRY_SERVICE_URL")
- if url == "" {
- url = ExternalServiceURL
- }
+ url := cmp.Or(os.Getenv("OPA_TELEMETRY_SERVICE_URL"), ExternalServiceURL)
- restConfig := []byte(fmt.Sprintf(`{
+ restConfig := fmt.Appendf(nil, `{
"url": %q,
- }`, url))
+ }`, url)
client, err := rest.New(restConfig, map[string]*keys.Config{}, rest.Logger(opts.Logger))
if err != nil {
@@ -99,21 +103,11 @@ func New(id string, opts Options) (*Reporter, error) {
// SendReport sends the telemetry report which includes information such as the OPA version, current memory usage to
// the external service
-func (r *Reporter) SendReport(ctx context.Context) (*DataResponse, error) {
+func (r *GHVersionCollector) SendReport(ctx context.Context) (*DataResponse, error) {
rCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
- r.gatherersMtx.Lock()
- defer r.gatherersMtx.Unlock()
- for key, g := range r.gatherers {
- var err error
- r.body[key], err = g(rCtx)
- if err != nil {
- return nil, fmt.Errorf("gather telemetry error for key %s: %w", key, err)
- }
- }
-
- resp, err := r.client.WithJSON(r.body).Do(rCtx, "POST", "/v1/version")
+ resp, err := r.client.Do(rCtx, "GET", fmt.Sprintf("/repos/%s/releases/latest", GHRepo))
if err != nil {
return nil, err
}
@@ -123,12 +117,12 @@ func (r *Reporter) SendReport(ctx context.Context) (*DataResponse, error) {
switch resp.StatusCode {
case http.StatusOK:
if resp.Body != nil {
- var result DataResponse
+ var result GHResponse
err := json.NewDecoder(resp.Body).Decode(&result)
if err != nil {
return nil, err
}
- return &result, nil
+ return createDataResponse(result)
}
return nil, nil
default:
@@ -136,10 +130,50 @@ func (r *Reporter) SendReport(ctx context.Context) (*DataResponse, error) {
}
}
-func (r *Reporter) RegisterGatherer(key string, f Gatherer) {
- r.gatherersMtx.Lock()
- r.gatherers[key] = f
- r.gatherersMtx.Unlock()
+func createDataResponse(ghResp GHResponse) (*DataResponse, error) {
+ if ghResp.TagName == "" {
+ return nil, errors.New("server response does not contain tag_name")
+ }
+
+ v := strings.TrimPrefix(version.Version, "v")
+ sv, err := semver.NewVersion(v)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse current version %q: %w", v, err)
+ }
+
+ latestV := strings.TrimPrefix(ghResp.TagName, "v")
+ latestSV, err := semver.NewVersion(latestV)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse latest version %q: %w", latestV, err)
+ }
+
+ isLatest := sv.Compare(*latestSV) >= 0
+
+ // Note: alternatively, we could look through the assets in the GH API response to find a matching asset,
+ // and use its URL. However, this is not guaranteed to be more robust, and wouldn't use the 'openpolicyagent.org' domain.
+ downloadLink := fmt.Sprintf("https://openpolicyagent.org/downloads/%v/opa_%v_%v",
+ ghResp.TagName, runtime.GOOS, runtime.GOARCH)
+
+ if runtime.GOARCH == "arm64" {
+ downloadLink = fmt.Sprintf("%v_static", downloadLink)
+ }
+
+ if strings.HasPrefix(runtime.GOOS, "win") {
+ downloadLink = fmt.Sprintf("%v.exe", downloadLink)
+ }
+
+ return &DataResponse{
+ Latest: ReleaseDetails{
+ Download: downloadLink,
+ ReleaseNotes: ghResp.ReleaseNotes,
+ LatestRelease: ghResp.TagName,
+ OPAUpToDate: isLatest,
+ },
+ }, nil
+}
+
+func (*GHVersionCollector) RegisterGatherer(_ string, _ Gatherer) {
+ // no-op for this implementation
}
// IsSet returns true if dr is populated.
diff --git a/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go b/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go
index b1a5b71577..de8ef87401 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go
@@ -12,12 +12,12 @@ import (
"path/filepath"
"strings"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/bundle"
storedversion "github.com/open-policy-agent/opa/internal/version"
- "github.com/open-policy-agent/opa/loader"
- "github.com/open-policy-agent/opa/metrics"
- "github.com/open-policy-agent/opa/storage"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/bundle"
+ "github.com/open-policy-agent/opa/v1/loader"
+ "github.com/open-policy-agent/opa/v1/metrics"
+ "github.com/open-policy-agent/opa/v1/storage"
)
// InsertAndCompileOptions contains the input for the operation.
@@ -29,6 +29,7 @@ type InsertAndCompileOptions struct {
MaxErrors int
EnablePrintStatements bool
ParserOptions ast.ParserOptions
+ BundleActivatorPlugin string
}
// InsertAndCompileResult contains the output of the operation.
@@ -41,7 +42,7 @@ type InsertAndCompileResult struct {
// store contents.
func InsertAndCompile(ctx context.Context, opts InsertAndCompileOptions) (*InsertAndCompileResult, error) {
if len(opts.Files.Documents) > 0 {
- if err := opts.Store.Write(ctx, opts.Txn, storage.AddOp, storage.Path{}, opts.Files.Documents); err != nil {
+ if err := opts.Store.Write(ctx, opts.Txn, storage.AddOp, storage.RootPath, opts.Files.Documents); err != nil {
return nil, fmt.Errorf("storage error: %w", err)
}
}
@@ -53,6 +54,7 @@ func InsertAndCompile(ctx context.Context, opts InsertAndCompileOptions) (*Inser
}
compiler := ast.NewCompiler().
+ WithDefaultRegoVersion(opts.ParserOptions.RegoVersion).
SetErrorLimit(opts.MaxErrors).
WithPathConflictsCheck(storage.NonEmpty(ctx, opts.Store, opts.Txn)).
WithEnablePrintStatements(opts.EnablePrintStatements)
@@ -67,6 +69,7 @@ func InsertAndCompile(ctx context.Context, opts InsertAndCompileOptions) (*Inser
Bundles: opts.Bundles,
ExtraModules: policies,
ParserOptions: opts.ParserOptions,
+ Plugin: opts.BundleActivatorPlugin,
}
err := bundle.Activate(activation)
@@ -121,10 +124,11 @@ func LoadPaths(paths []string,
asBundle bool,
bvc *bundle.VerificationConfig,
skipVerify bool,
+ bundleLazyLoading bool,
processAnnotations bool,
caps *ast.Capabilities,
fsys fs.FS) (*LoadPathsResult, error) {
- return LoadPathsForRegoVersion(ast.RegoV0, paths, filter, asBundle, bvc, skipVerify, processAnnotations, false, caps, fsys)
+ return LoadPathsForRegoVersion(ast.RegoV0, paths, filter, asBundle, bvc, skipVerify, bundleLazyLoading, processAnnotations, false, caps, fsys)
}
func LoadPathsForRegoVersion(regoVersion ast.RegoVersion,
@@ -133,6 +137,7 @@ func LoadPathsForRegoVersion(regoVersion ast.RegoVersion,
asBundle bool,
bvc *bundle.VerificationConfig,
skipVerify bool,
+ bundleLazyLoading bool,
processAnnotations bool,
followSymlinks bool,
caps *ast.Capabilities,
@@ -158,6 +163,7 @@ func LoadPathsForRegoVersion(regoVersion ast.RegoVersion,
WithFS(fsys).
WithBundleVerificationConfig(bvc).
WithSkipBundleVerification(skipVerify).
+ WithBundleLazyLoadingMode(bundleLazyLoading).
WithFilter(filter).
WithProcessAnnotation(processAnnotations).
WithCapabilities(caps).
@@ -170,12 +176,13 @@ func LoadPathsForRegoVersion(regoVersion ast.RegoVersion,
}
}
- if len(nonBundlePaths) == 0 {
+ if asBundle {
return &result, nil
}
files, err := loader.NewFileLoader().
WithFS(fsys).
+ WithBundleLazyLoadingMode(bundleLazyLoading).
WithProcessAnnotation(processAnnotations).
WithCapabilities(caps).
WithRegoVersion(regoVersion).
diff --git a/vendor/github.com/open-policy-agent/opa/internal/semver/semver.go b/vendor/github.com/open-policy-agent/opa/internal/semver/semver.go
index 389eeccc18..23c6c186d9 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/semver/semver.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/semver/semver.go
@@ -233,3 +233,18 @@ func validateIdentifier(id string) error {
// reIdentifier is a regular expression used to check that pre-release and metadata
// identifiers satisfy the spec requirements
var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`)
+
+// Compare compares two semver strings.
+func Compare(a, b string) int {
+ aV, err := NewVersion(strings.TrimPrefix(a, "v"))
+ if err != nil {
+ return -1
+ }
+
+ bV, err := NewVersion(strings.TrimPrefix(b, "v"))
+ if err != nil {
+ return 1
+ }
+
+ return aV.Compare(*bV)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/strings/strings.go b/vendor/github.com/open-policy-agent/opa/internal/strings/strings.go
index 08f3bf9182..f2838ac36a 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/strings/strings.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/strings/strings.go
@@ -57,7 +57,7 @@ func TruncateFilePaths(maxIdealWidth, maxWidth int, path ...string) (map[string]
}
// Drop the overall length down to match our substitution
- longestLocation = longestLocation - (len(lcs) - 3)
+ longestLocation -= (len(lcs) - 3)
}
return result, longestLocation
diff --git a/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go b/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go
index 1fc07f68c3..6d867262f5 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go
@@ -31,7 +31,7 @@ var ErrNotList = errors.New("not a list")
// MaxIndex is the maximum index that will be allowed by setIndex.
// The default value 65536 = 1024 * 64
-var MaxIndex = 65536
+const MaxIndex = 65536
// ToYAML takes a string of arguments and converts to a YAML document.
func ToYAML(s string) (string, error) {
@@ -46,8 +46,8 @@ func ToYAML(s string) (string, error) {
// Parse parses a set line.
//
// A set line is of the form name1=value1,name2=value2
-func Parse(s string) (map[string]interface{}, error) {
- vals := map[string]interface{}{}
+func Parse(s string) (map[string]any, error) {
+ vals := map[string]any{}
scanner := bytes.NewBufferString(s)
t := newParser(scanner, vals, false)
err := t.parse()
@@ -57,8 +57,8 @@ func Parse(s string) (map[string]interface{}, error) {
// ParseString parses a set line and forces a string value.
//
// A set line is of the form name1=value1,name2=value2
-func ParseString(s string) (map[string]interface{}, error) {
- vals := map[string]interface{}{}
+func ParseString(s string) (map[string]any, error) {
+ vals := map[string]any{}
scanner := bytes.NewBufferString(s)
t := newParser(scanner, vals, true)
err := t.parse()
@@ -69,7 +69,7 @@ func ParseString(s string) (map[string]interface{}, error) {
//
// If the strval string has a key that exists in dest, it overwrites the
// dest version.
-func ParseInto(s string, dest map[string]interface{}) error {
+func ParseInto(s string, dest map[string]any) error {
scanner := bytes.NewBufferString(s)
t := newParser(scanner, dest, false)
return t.parse()
@@ -78,7 +78,7 @@ func ParseInto(s string, dest map[string]interface{}) error {
// ParseIntoFile parses a filevals line and merges the result into dest.
//
// This method always returns a string as the value.
-func ParseIntoFile(s string, dest map[string]interface{}, runesToVal runesToVal) error {
+func ParseIntoFile(s string, dest map[string]any, runesToVal runesToVal) error {
scanner := bytes.NewBufferString(s)
t := newFileParser(scanner, dest, runesToVal)
return t.parse()
@@ -87,7 +87,7 @@ func ParseIntoFile(s string, dest map[string]interface{}, runesToVal runesToVal)
// ParseIntoString parses a strvals line and merges the result into dest.
//
// This method always returns a string as the value.
-func ParseIntoString(s string, dest map[string]interface{}) error {
+func ParseIntoString(s string, dest map[string]any) error {
scanner := bytes.NewBufferString(s)
t := newParser(scanner, dest, true)
return t.parse()
@@ -101,20 +101,20 @@ func ParseIntoString(s string, dest map[string]interface{}) error {
// where st is a boolean to figure out if we're forcing it to parse values as string
type parser struct {
sc *bytes.Buffer
- data map[string]interface{}
+ data map[string]any
runesToVal runesToVal
}
-type runesToVal func([]rune) (interface{}, error)
+type runesToVal func([]rune) (any, error)
-func newParser(sc *bytes.Buffer, data map[string]interface{}, stringBool bool) *parser {
- rs2v := func(rs []rune) (interface{}, error) {
+func newParser(sc *bytes.Buffer, data map[string]any, stringBool bool) *parser {
+ rs2v := func(rs []rune) (any, error) {
return typedVal(rs, stringBool), nil
}
return &parser{sc: sc, data: data, runesToVal: rs2v}
}
-func newFileParser(sc *bytes.Buffer, data map[string]interface{}, runesToVal runesToVal) *parser {
+func newFileParser(sc *bytes.Buffer, data map[string]any, runesToVal runesToVal) *parser {
return &parser{sc: sc, data: data, runesToVal: runesToVal}
}
@@ -139,7 +139,7 @@ func runeSet(r []rune) map[rune]bool {
return s
}
-func (t *parser) key(data map[string]interface{}) error {
+func (t *parser) key(data map[string]any) error {
stop := runeSet([]rune{'=', '[', ',', '.'})
for {
switch k, last, err := runesUntil(t.sc, stop); {
@@ -148,8 +148,6 @@ func (t *parser) key(data map[string]interface{}) error {
return err
}
return fmt.Errorf("key %q has no value", string(k))
- //set(data, string(k), "")
- //return err
case last == '[':
// We are in a list index context, so we need to set an index.
i, err := t.keyIndex()
@@ -158,9 +156,9 @@ func (t *parser) key(data map[string]interface{}) error {
}
kk := string(k)
// Find or create target list
- list := []interface{}{}
+ list := []any{}
if _, ok := data[kk]; ok {
- list = data[kk].([]interface{})
+ list = data[kk].([]any)
}
// Now we need to get the value after the ].
@@ -168,7 +166,7 @@ func (t *parser) key(data map[string]interface{}) error {
set(data, kk, list)
return err
case last == '=':
- //End of key. Consume =, Get value.
+ // End of key. Consume =, Get value.
// FIXME: Get value list first
vl, e := t.valList()
switch e {
@@ -196,9 +194,9 @@ func (t *parser) key(data map[string]interface{}) error {
return fmt.Errorf("key %q has no value (cannot end with ,)", string(k))
case last == '.':
// First, create or find the target map.
- inner := map[string]interface{}{}
+ inner := map[string]any{}
if _, ok := data[string(k)]; ok {
- inner = data[string(k)].(map[string]interface{})
+ inner = data[string(k)].(map[string]any)
}
// Recurse
@@ -212,7 +210,7 @@ func (t *parser) key(data map[string]interface{}) error {
}
}
-func set(data map[string]interface{}, key string, val interface{}) {
+func set(data map[string]any, key string, val any) {
// If key is empty, don't set it.
if len(key) == 0 {
return
@@ -220,7 +218,7 @@ func set(data map[string]interface{}, key string, val interface{}) {
data[key] = val
}
-func setIndex(list []interface{}, index int, val interface{}) (l2 []interface{}, err error) {
+func setIndex(list []any, index int, val any) (l2 []any, err error) {
// There are possible index values that are out of range on a target system
// causing a panic. This will catch the panic and return an error instead.
// The value of the index that causes a panic varies from system to system.
@@ -237,7 +235,7 @@ func setIndex(list []interface{}, index int, val interface{}) (l2 []interface{},
return list, fmt.Errorf("index of %d is greater than maximum supported index of %d", index, MaxIndex)
}
if len(list) <= index {
- newlist := make([]interface{}, index+1)
+ newlist := make([]any, index+1)
copy(newlist, list)
list = newlist
}
@@ -256,7 +254,7 @@ func (t *parser) keyIndex() (int, error) {
return strconv.Atoi(string(v))
}
-func (t *parser) listItem(list []interface{}, i int) ([]interface{}, error) {
+func (t *parser) listItem(list []any, i int) ([]any, error) {
if i < 0 {
return list, fmt.Errorf("negative %d index not allowed", i)
}
@@ -300,14 +298,14 @@ func (t *parser) listItem(list []interface{}, i int) ([]interface{}, error) {
return setIndex(list, i, list2)
case last == '.':
// We have a nested object. Send to t.key
- inner := map[string]interface{}{}
+ inner := map[string]any{}
if len(list) > i {
var ok bool
- inner, ok = list[i].(map[string]interface{})
+ inner, ok = list[i].(map[string]any)
if !ok {
// We have indices out of order. Initialize empty value.
- list[i] = map[string]interface{}{}
- inner = list[i].(map[string]interface{})
+ list[i] = map[string]any{}
+ inner = list[i].(map[string]any)
}
}
@@ -328,21 +326,21 @@ func (t *parser) val() ([]rune, error) {
return v, err
}
-func (t *parser) valList() ([]interface{}, error) {
+func (t *parser) valList() ([]any, error) {
r, _, e := t.sc.ReadRune()
if e != nil {
- return []interface{}{}, e
+ return []any{}, e
}
if r != '{' {
e = t.sc.UnreadRune()
if e != nil {
- return []interface{}{}, e
+ return []any{}, e
}
- return []interface{}{}, ErrNotList
+ return []any{}, ErrNotList
}
- list := []interface{}{}
+ list := []any{}
stop := runeSet([]rune{',', '}'})
for {
switch rs, last, err := runesUntil(t.sc, stop); {
@@ -356,7 +354,7 @@ func (t *parser) valList() ([]interface{}, error) {
if r, _, e := t.sc.ReadRune(); e == nil && r != ',' {
e = t.sc.UnreadRune()
if e != nil {
- return []interface{}{}, e
+ return []any{}, e
}
}
v, e := t.runesToVal(rs)
@@ -397,7 +395,7 @@ func inMap(k rune, m map[rune]bool) bool {
return ok
}
-func typedVal(v []rune, st bool) interface{} {
+func typedVal(v []rune, st bool) any {
val := string(v)
if st {
diff --git a/vendor/github.com/open-policy-agent/opa/internal/uuid/uuid.go b/vendor/github.com/open-policy-agent/opa/internal/uuid/uuid.go
index 5d925e68df..a18f024a25 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/uuid/uuid.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/uuid/uuid.go
@@ -32,12 +32,12 @@ func New(r io.Reader) (string, error) {
// if parsing fails, it will return an empty map. It will fill the map
// with some decoded values with fillMap
// ref: https://datatracker.ietf.org/doc/html/rfc4122
-func Parse(s string) (map[string]interface{}, error) {
+func Parse(s string) (map[string]any, error) {
uuid, err := uuid.Parse(s)
if err != nil {
return nil, err
}
- out := make(map[string]interface{}, getVersionLen(int(uuid.Version())))
+ out := make(map[string]any, getVersionLen(int(uuid.Version())))
fillMap(out, uuid)
return out, nil
}
@@ -46,7 +46,7 @@ func Parse(s string) (map[string]interface{}, error) {
// Version 1-2 has decodable values that could be of use, version 4 is random,
// and version 3,5 is not feasible to extract data. Generated with either MD5 or SHA1 hash
// ref: https://datatracker.ietf.org/doc/html/rfc4122 about creation of UUIDs
-func fillMap(m map[string]interface{}, u uuid.UUID) {
+func fillMap(m map[string]any, u uuid.UUID) {
m["version"] = int(u.Version())
m["variant"] = u.Variant().String()
switch version := m["version"]; version {
diff --git a/vendor/github.com/open-policy-agent/opa/internal/version/version.go b/vendor/github.com/open-policy-agent/opa/internal/version/version.go
index 1c2e9ecd01..1264278e44 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/version/version.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/version/version.go
@@ -10,8 +10,8 @@ import (
"fmt"
"runtime"
- "github.com/open-policy-agent/opa/storage"
- "github.com/open-policy-agent/opa/version"
+ "github.com/open-policy-agent/opa/v1/storage"
+ "github.com/open-policy-agent/opa/v1/version"
)
var versionPath = storage.MustParsePath("/system/version")
@@ -24,7 +24,7 @@ func Write(ctx context.Context, store storage.Store, txn storage.Transaction) er
return err
}
- return store.Write(ctx, txn, storage.AddOp, versionPath, map[string]interface{}{
+ return store.Write(ctx, txn, storage.AddOp, versionPath, map[string]any{
"version": version.Version,
"build_commit": version.Vcs,
"build_timestamp": version.Timestamp,
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go
index 35e6059c72..0695ce94fe 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go
@@ -7,6 +7,7 @@ package encoding
import (
"bytes"
"encoding/binary"
+ "errors"
"fmt"
"io"
@@ -105,7 +106,7 @@ func readMagic(r io.Reader) error {
if err := binary.Read(r, binary.LittleEndian, &v); err != nil {
return err
} else if v != constant.Magic {
- return fmt.Errorf("illegal magic value")
+ return errors.New("illegal magic value")
}
return nil
}
@@ -115,7 +116,7 @@ func readVersion(r io.Reader) error {
if err := binary.Read(r, binary.LittleEndian, &v); err != nil {
return err
} else if v != constant.Version {
- return fmt.Errorf("illegal wasm version")
+ return errors.New("illegal wasm version")
}
return nil
}
@@ -199,7 +200,7 @@ func readSections(r io.Reader, m *module.Module) error {
return fmt.Errorf("code section: %w", err)
}
default:
- return fmt.Errorf("illegal section id")
+ return errors.New("illegal section id")
}
}
}
@@ -269,7 +270,7 @@ func readNameMap(r io.Reader) ([]module.NameMap, error) {
return nil, err
}
nm := make([]module.NameMap, n)
- for i := uint32(0); i < n; i++ {
+ for i := range n {
var name string
id, err := leb128.ReadVarUint32(r)
if err != nil {
@@ -289,7 +290,7 @@ func readNameSectionLocals(r io.Reader, s *module.NameSection) error {
if err != nil {
return err
}
- for i := uint32(0); i < n; i++ {
+ for range n {
id, err := leb128.ReadVarUint32(r) // func index
if err != nil {
return err
@@ -326,7 +327,7 @@ func readTypeSection(r io.Reader, s *module.TypeSection) error {
return err
}
- for i := uint32(0); i < n; i++ {
+ for range n {
var ftype module.FunctionType
if err := readFunctionType(r, &ftype); err != nil {
@@ -346,7 +347,7 @@ func readImportSection(r io.Reader, s *module.ImportSection) error {
return err
}
- for i := uint32(0); i < n; i++ {
+ for range n {
var imp module.Import
@@ -367,14 +368,14 @@ func readTableSection(r io.Reader, s *module.TableSection) error {
return err
}
- for i := uint32(0); i < n; i++ {
+ for range n {
var table module.Table
if elem, err := readByte(r); err != nil {
return err
} else if elem != constant.ElementTypeAnyFunc {
- return fmt.Errorf("illegal element type")
+ return errors.New("illegal element type")
}
table.Type = types.Anyfunc
@@ -396,7 +397,7 @@ func readMemorySection(r io.Reader, s *module.MemorySection) error {
return err
}
- for i := uint32(0); i < n; i++ {
+ for range n {
var mem module.Memory
@@ -417,7 +418,7 @@ func readGlobalSection(r io.Reader, s *module.GlobalSection) error {
return err
}
- for i := uint32(0); i < n; i++ {
+ for range n {
var global module.Global
@@ -442,7 +443,7 @@ func readExportSection(r io.Reader, s *module.ExportSection) error {
return err
}
- for i := uint32(0); i < n; i++ {
+ for range n {
var exp module.Export
@@ -463,7 +464,7 @@ func readElementSection(r io.Reader, s *module.ElementSection) error {
return err
}
- for i := uint32(0); i < n; i++ {
+ for range n {
var seg module.ElementSegment
@@ -484,7 +485,7 @@ func readDataSection(r io.Reader, s *module.DataSection) error {
return err
}
- for i := uint32(0); i < n; i++ {
+ for range n {
var seg module.DataSegment
@@ -505,7 +506,7 @@ func readRawCodeSection(r io.Reader, s *module.RawCodeSection) error {
return err
}
- for i := uint32(0); i < n; i++ {
+ for range n {
var seg module.RawCodeSegment
if err := readRawCodeSegment(r, &seg); err != nil {
@@ -547,7 +548,7 @@ func readGlobal(r io.Reader, global *module.Global) error {
if b == 1 {
global.Mutable = true
} else if b != 0 {
- return fmt.Errorf("illegal mutability flag")
+ return errors.New("illegal mutability flag")
}
return readConstantExpr(r, &global.Init)
@@ -584,7 +585,7 @@ func readImport(r io.Reader, imp *module.Import) error {
if elem, err := readByte(r); err != nil {
return err
} else if elem != constant.ElementTypeAnyFunc {
- return fmt.Errorf("illegal element type")
+ return errors.New("illegal element type")
}
desc := module.TableImport{
Type: types.Anyfunc,
@@ -617,12 +618,12 @@ func readImport(r io.Reader, imp *module.Import) error {
if b == 1 {
desc.Mutable = true
} else if b != 0 {
- return fmt.Errorf("illegal mutability flag")
+ return errors.New("illegal mutability flag")
}
return nil
}
- return fmt.Errorf("illegal import descriptor type")
+ return errors.New("illegal import descriptor type")
}
func readExport(r io.Reader, exp *module.Export) error {
@@ -646,7 +647,7 @@ func readExport(r io.Reader, exp *module.Export) error {
case constant.ExportDescGlobal:
exp.Descriptor.Type = module.GlobalExportType
default:
- return fmt.Errorf("illegal export descriptor type")
+ return errors.New("illegal export descriptor type")
}
exp.Descriptor.Index, err = leb128.ReadVarUint32(r)
@@ -727,7 +728,7 @@ func readExpr(r io.Reader, expr *module.Expr) (err error) {
case error:
err = r
default:
- err = fmt.Errorf("unknown panic")
+ err = errors.New("unknown panic")
}
}
}()
@@ -809,21 +810,21 @@ func readLimits(r io.Reader, l *module.Limit) error {
return err
}
- min, err := leb128.ReadVarUint32(r)
+ minLim, err := leb128.ReadVarUint32(r)
if err != nil {
return err
}
- l.Min = min
+ l.Min = minLim
if b == 1 {
- max, err := leb128.ReadVarUint32(r)
+ maxLim, err := leb128.ReadVarUint32(r)
if err != nil {
return err
}
- l.Max = &max
+ l.Max = &maxLim
} else if b != 0 {
- return fmt.Errorf("illegal limit flag")
+ return errors.New("illegal limit flag")
}
return nil
@@ -838,7 +839,7 @@ func readLocals(r io.Reader, locals *[]module.LocalDeclaration) error {
ret := make([]module.LocalDeclaration, n)
- for i := uint32(0); i < n; i++ {
+ for i := range n {
if err := readVarUint32(r, &ret[i].Count); err != nil {
return err
}
@@ -888,7 +889,7 @@ func readVarUint32Vector(r io.Reader, v *[]uint32) error {
ret := make([]uint32, n)
- for i := uint32(0); i < n; i++ {
+ for i := range n {
if err := readVarUint32(r, &ret[i]); err != nil {
return err
}
@@ -907,7 +908,7 @@ func readValueTypeVector(r io.Reader, v *[]types.ValueType) error {
ret := make([]types.ValueType, n)
- for i := uint32(0); i < n; i++ {
+ for i := range n {
if err := readValueType(r, &ret[i]); err != nil {
return err
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/writer.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/writer.go
index 6917b8d1d1..19df3bd6e6 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/writer.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/writer.go
@@ -7,6 +7,7 @@ package encoding
import (
"bytes"
"encoding/binary"
+ "errors"
"fmt"
"io"
"math"
@@ -260,7 +261,7 @@ func writeTableSection(w io.Writer, s module.TableSection) error {
return err
}
default:
- return fmt.Errorf("illegal table element type")
+ return errors.New("illegal table element type")
}
if err := writeLimits(&buf, table.Lim); err != nil {
return err
@@ -588,7 +589,7 @@ func writeImport(w io.Writer, imp module.Import) error {
}
return writeByte(w, constant.Const)
default:
- return fmt.Errorf("illegal import descriptor type")
+ return errors.New("illegal import descriptor type")
}
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/control.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/control.go
index 38f030982d..0b2805247f 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/control.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/control.go
@@ -112,8 +112,8 @@ func (Br) Op() opcode.Opcode {
}
// ImmediateArgs returns the block index to break to.
-func (i Br) ImmediateArgs() []interface{} {
- return []interface{}{i.Index}
+func (i Br) ImmediateArgs() []any {
+ return []any{i.Index}
}
// BrIf represents a WASM br_if instruction.
@@ -127,8 +127,8 @@ func (BrIf) Op() opcode.Opcode {
}
// ImmediateArgs returns the block index to break to.
-func (i BrIf) ImmediateArgs() []interface{} {
- return []interface{}{i.Index}
+func (i BrIf) ImmediateArgs() []any {
+ return []any{i.Index}
}
// Call represents a WASM call instruction.
@@ -142,8 +142,8 @@ func (Call) Op() opcode.Opcode {
}
// ImmediateArgs returns the function index.
-func (i Call) ImmediateArgs() []interface{} {
- return []interface{}{i.Index}
+func (i Call) ImmediateArgs() []any {
+ return []any{i.Index}
}
// CallIndirect represents a WASM call_indirect instruction.
@@ -158,8 +158,8 @@ func (CallIndirect) Op() opcode.Opcode {
}
// ImmediateArgs returns the function index.
-func (i CallIndirect) ImmediateArgs() []interface{} {
- return []interface{}{i.Index, i.Reserved}
+func (i CallIndirect) ImmediateArgs() []any {
+ return []any{i.Index, i.Reserved}
}
// Return represents a WASM return instruction.
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/instruction.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/instruction.go
index 066be77c44..a0ab5953b8 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/instruction.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/instruction.go
@@ -15,14 +15,14 @@ type NoImmediateArgs struct {
}
// ImmediateArgs returns the immedate arguments of an instruction.
-func (NoImmediateArgs) ImmediateArgs() []interface{} {
+func (NoImmediateArgs) ImmediateArgs() []any {
return nil
}
// Instruction represents a single WASM instruction.
type Instruction interface {
Op() opcode.Opcode
- ImmediateArgs() []interface{}
+ ImmediateArgs() []any
}
// StructuredInstruction represents a structured control instruction like br_if.
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/memory.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/memory.go
index c449cb1b6a..5a052bb764 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/memory.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/memory.go
@@ -18,8 +18,8 @@ func (I32Load) Op() opcode.Opcode {
}
// ImmediateArgs returns the static offset and alignment operands.
-func (i I32Load) ImmediateArgs() []interface{} {
- return []interface{}{i.Align, i.Offset}
+func (i I32Load) ImmediateArgs() []any {
+ return []any{i.Align, i.Offset}
}
// I32Store represents the WASM i32.store instruction.
@@ -34,6 +34,6 @@ func (I32Store) Op() opcode.Opcode {
}
// ImmediateArgs returns the static offset and alignment operands.
-func (i I32Store) ImmediateArgs() []interface{} {
- return []interface{}{i.Align, i.Offset}
+func (i I32Store) ImmediateArgs() []any {
+ return []any{i.Align, i.Offset}
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/numeric.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/numeric.go
index 03f33752a2..bbba1f0bcb 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/numeric.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/numeric.go
@@ -19,8 +19,8 @@ func (I32Const) Op() opcode.Opcode {
}
// ImmediateArgs returns the i32 value to push onto the stack.
-func (i I32Const) ImmediateArgs() []interface{} {
- return []interface{}{i.Value}
+func (i I32Const) ImmediateArgs() []any {
+ return []any{i.Value}
}
// I64Const represents the WASM i64.const instruction.
@@ -34,8 +34,8 @@ func (I64Const) Op() opcode.Opcode {
}
// ImmediateArgs returns the i64 value to push onto the stack.
-func (i I64Const) ImmediateArgs() []interface{} {
- return []interface{}{i.Value}
+func (i I64Const) ImmediateArgs() []any {
+ return []any{i.Value}
}
// F32Const represents the WASM f32.const instruction.
@@ -49,8 +49,8 @@ func (F32Const) Op() opcode.Opcode {
}
// ImmediateArgs returns the f32 value to push onto the stack.
-func (i F32Const) ImmediateArgs() []interface{} {
- return []interface{}{i.Value}
+func (i F32Const) ImmediateArgs() []any {
+ return []any{i.Value}
}
// F64Const represents the WASM f64.const instruction.
@@ -64,8 +64,8 @@ func (F64Const) Op() opcode.Opcode {
}
// ImmediateArgs returns the f64 value to push onto the stack.
-func (i F64Const) ImmediateArgs() []interface{} {
- return []interface{}{i.Value}
+func (i F64Const) ImmediateArgs() []any {
+ return []any{i.Value}
}
// I32Eqz represents the WASM i32.eqz instruction.
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/variable.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/variable.go
index 063ffdb96d..68be486af1 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/variable.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/variable.go
@@ -17,8 +17,8 @@ func (GetLocal) Op() opcode.Opcode {
}
// ImmediateArgs returns the index of the local variable to push onto the stack.
-func (i GetLocal) ImmediateArgs() []interface{} {
- return []interface{}{i.Index}
+func (i GetLocal) ImmediateArgs() []any {
+ return []any{i.Index}
}
// SetLocal represents the WASM set_local instruction.
@@ -33,8 +33,8 @@ func (SetLocal) Op() opcode.Opcode {
// ImmediateArgs returns the index of the local variable to set with the top of
// the stack.
-func (i SetLocal) ImmediateArgs() []interface{} {
- return []interface{}{i.Index}
+func (i SetLocal) ImmediateArgs() []any {
+ return []any{i.Index}
}
// TeeLocal represents the WASM tee_local instruction.
@@ -49,6 +49,6 @@ func (TeeLocal) Op() opcode.Opcode {
// ImmediateArgs returns the index of the local variable to "tee" with the top of
// the stack (like set, but retaining the top of the stack).
-func (i TeeLocal) ImmediateArgs() []interface{} {
- return []interface{}{i.Index}
+func (i TeeLocal) ImmediateArgs() []any {
+ return []any{i.Index}
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/module/module.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/module/module.go
index 913863c10c..033d429c89 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/wasm/module/module.go
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/module/module.go
@@ -288,7 +288,7 @@ func (x ExportDescriptorType) String() string {
}
// Kind returns the function import type kind.
-func (i FunctionImport) Kind() ImportDescriptorType {
+func (FunctionImport) Kind() ImportDescriptorType {
return FunctionImportType
}
@@ -297,7 +297,7 @@ func (i FunctionImport) String() string {
}
// Kind returns the memory import type kind.
-func (i MemoryImport) Kind() ImportDescriptorType {
+func (MemoryImport) Kind() ImportDescriptorType {
return MemoryImportType
}
@@ -306,7 +306,7 @@ func (i MemoryImport) String() string {
}
// Kind returns the table import type kind.
-func (i TableImport) Kind() ImportDescriptorType {
+func (TableImport) Kind() ImportDescriptorType {
return TableImportType
}
@@ -315,7 +315,7 @@ func (i TableImport) String() string {
}
// Kind returns the global import type kind.
-func (i GlobalImport) Kind() ImportDescriptorType {
+func (GlobalImport) Kind() ImportDescriptorType {
return GlobalImportType
}
diff --git a/vendor/github.com/open-policy-agent/opa/loader/doc.go b/vendor/github.com/open-policy-agent/opa/loader/doc.go
new file mode 100644
index 0000000000..9f60920d95
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/loader/doc.go
@@ -0,0 +1,8 @@
+// Copyright 2024 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended.
+// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead.
+// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information.
+package loader
diff --git a/vendor/github.com/open-policy-agent/opa/loader/errors.go b/vendor/github.com/open-policy-agent/opa/loader/errors.go
index b8aafb1421..8dc70b8673 100644
--- a/vendor/github.com/open-policy-agent/opa/loader/errors.go
+++ b/vendor/github.com/open-policy-agent/opa/loader/errors.go
@@ -5,58 +5,8 @@
package loader
import (
- "fmt"
- "strings"
-
- "github.com/open-policy-agent/opa/ast"
+ v1 "github.com/open-policy-agent/opa/v1/loader"
)
// Errors is a wrapper for multiple loader errors.
-type Errors []error
-
-func (e Errors) Error() string {
- if len(e) == 0 {
- return "no error(s)"
- }
- if len(e) == 1 {
- return "1 error occurred during loading: " + e[0].Error()
- }
- buf := make([]string, len(e))
- for i := range buf {
- buf[i] = e[i].Error()
- }
- return fmt.Sprintf("%v errors occurred during loading:\n", len(e)) + strings.Join(buf, "\n")
-}
-
-func (e *Errors) add(err error) {
- if errs, ok := err.(ast.Errors); ok {
- for i := range errs {
- *e = append(*e, errs[i])
- }
- } else {
- *e = append(*e, err)
- }
-}
-
-type unsupportedDocumentType string
-
-func (path unsupportedDocumentType) Error() string {
- return string(path) + ": document must be of type object"
-}
-
-type unrecognizedFile string
-
-func (path unrecognizedFile) Error() string {
- return string(path) + ": can't recognize file type"
-}
-
-func isUnrecognizedFile(err error) bool {
- _, ok := err.(unrecognizedFile)
- return ok
-}
-
-type mergeError string
-
-func (e mergeError) Error() string {
- return string(e) + ": merge error"
-}
+type Errors = v1.Errors
diff --git a/vendor/github.com/open-policy-agent/opa/loader/loader.go b/vendor/github.com/open-policy-agent/opa/loader/loader.go
index 461639ed19..9b2f91d4e9 100644
--- a/vendor/github.com/open-policy-agent/opa/loader/loader.go
+++ b/vendor/github.com/open-policy-agent/opa/loader/loader.go
@@ -6,478 +6,74 @@
package loader
import (
- "bytes"
- "fmt"
- "io"
"io/fs"
"os"
- "path/filepath"
- "sort"
"strings"
- "sigs.k8s.io/yaml"
-
"github.com/open-policy-agent/opa/ast"
- astJSON "github.com/open-policy-agent/opa/ast/json"
"github.com/open-policy-agent/opa/bundle"
- fileurl "github.com/open-policy-agent/opa/internal/file/url"
- "github.com/open-policy-agent/opa/internal/merge"
- "github.com/open-policy-agent/opa/loader/filter"
- "github.com/open-policy-agent/opa/metrics"
- "github.com/open-policy-agent/opa/storage"
- "github.com/open-policy-agent/opa/storage/inmem"
- "github.com/open-policy-agent/opa/util"
+ v1 "github.com/open-policy-agent/opa/v1/loader"
)
// Result represents the result of successfully loading zero or more files.
-type Result struct {
- Documents map[string]interface{}
- Modules map[string]*RegoFile
- path []string
-}
-
-// ParsedModules returns the parsed modules stored on the result.
-func (l *Result) ParsedModules() map[string]*ast.Module {
- modules := make(map[string]*ast.Module)
- for _, module := range l.Modules {
- modules[module.Name] = module.Parsed
- }
- return modules
-}
-
-// Compiler returns a Compiler object with the compiled modules from this loader
-// result.
-func (l *Result) Compiler() (*ast.Compiler, error) {
- compiler := ast.NewCompiler()
- compiler.Compile(l.ParsedModules())
- if compiler.Failed() {
- return nil, compiler.Errors
- }
- return compiler, nil
-}
-
-// Store returns a Store object with the documents from this loader result.
-func (l *Result) Store() (storage.Store, error) {
- return l.StoreWithOpts()
-}
-
-// StoreWithOpts returns a Store object with the documents from this loader result,
-// instantiated with the passed options.
-func (l *Result) StoreWithOpts(opts ...inmem.Opt) (storage.Store, error) {
- return inmem.NewFromObjectWithOpts(l.Documents, opts...), nil
-}
+type Result = v1.Result
// RegoFile represents the result of loading a single Rego source file.
-type RegoFile struct {
- Name string
- Parsed *ast.Module
- Raw []byte
-}
+type RegoFile = v1.RegoFile
// Filter defines the interface for filtering files during loading. If the
// filter returns true, the file should be excluded from the result.
-type Filter = filter.LoaderFilter
+type Filter = v1.Filter
// GlobExcludeName excludes files and directories whose names do not match the
// shell style pattern at minDepth or greater.
func GlobExcludeName(pattern string, minDepth int) Filter {
- return func(_ string, info fs.FileInfo, depth int) bool {
- match, _ := filepath.Match(pattern, info.Name())
- return match && depth >= minDepth
- }
+ return v1.GlobExcludeName(pattern, minDepth)
}
// FileLoader defines an interface for loading OPA data files
// and Rego policies.
-type FileLoader interface {
- All(paths []string) (*Result, error)
- Filtered(paths []string, filter Filter) (*Result, error)
- AsBundle(path string) (*bundle.Bundle, error)
- WithReader(io.Reader) FileLoader
- WithFS(fs.FS) FileLoader
- WithMetrics(metrics.Metrics) FileLoader
- WithFilter(Filter) FileLoader
- WithBundleVerificationConfig(*bundle.VerificationConfig) FileLoader
- WithSkipBundleVerification(bool) FileLoader
- WithProcessAnnotation(bool) FileLoader
- WithCapabilities(*ast.Capabilities) FileLoader
- WithJSONOptions(*astJSON.Options) FileLoader
- WithRegoVersion(ast.RegoVersion) FileLoader
- WithFollowSymlinks(bool) FileLoader
-}
+type FileLoader = v1.FileLoader
// NewFileLoader returns a new FileLoader instance.
func NewFileLoader() FileLoader {
- return &fileLoader{
- metrics: metrics.New(),
- files: make(map[string]bundle.FileInfo),
- }
-}
-
-type fileLoader struct {
- metrics metrics.Metrics
- filter Filter
- bvc *bundle.VerificationConfig
- skipVerify bool
- files map[string]bundle.FileInfo
- opts ast.ParserOptions
- fsys fs.FS
- reader io.Reader
- followSymlinks bool
-}
-
-// WithFS provides an fs.FS to use for loading files. You can pass nil to
-// use plain IO calls (e.g. os.Open, os.Stat, etc.), this is the default
-// behaviour.
-func (fl *fileLoader) WithFS(fsys fs.FS) FileLoader {
- fl.fsys = fsys
- return fl
-}
-
-// WithReader provides an io.Reader to use for loading the bundle tarball.
-// An io.Reader passed via WithReader takes precedence over an fs.FS passed
-// via WithFS.
-func (fl *fileLoader) WithReader(rdr io.Reader) FileLoader {
- fl.reader = rdr
- return fl
-}
-
-// WithMetrics provides the metrics instance to use while loading
-func (fl *fileLoader) WithMetrics(m metrics.Metrics) FileLoader {
- fl.metrics = m
- return fl
-}
-
-// WithFilter specifies the filter object to use to filter files while loading
-func (fl *fileLoader) WithFilter(filter Filter) FileLoader {
- fl.filter = filter
- return fl
-}
-
-// WithBundleVerificationConfig sets the key configuration used to verify a signed bundle
-func (fl *fileLoader) WithBundleVerificationConfig(config *bundle.VerificationConfig) FileLoader {
- fl.bvc = config
- return fl
-}
-
-// WithSkipBundleVerification skips verification of a signed bundle
-func (fl *fileLoader) WithSkipBundleVerification(skipVerify bool) FileLoader {
- fl.skipVerify = skipVerify
- return fl
-}
-
-// WithProcessAnnotation enables or disables processing of schema annotations on rules
-func (fl *fileLoader) WithProcessAnnotation(processAnnotation bool) FileLoader {
- fl.opts.ProcessAnnotation = processAnnotation
- return fl
-}
-
-// WithCapabilities sets the supported capabilities when loading the files
-func (fl *fileLoader) WithCapabilities(caps *ast.Capabilities) FileLoader {
- fl.opts.Capabilities = caps
- return fl
-}
-
-// WithJSONOptions sets the JSONOptions for use when parsing files
-func (fl *fileLoader) WithJSONOptions(opts *astJSON.Options) FileLoader {
- fl.opts.JSONOptions = opts
- return fl
-}
-
-// WithRegoVersion sets the ast.RegoVersion to use when parsing and compiling modules.
-func (fl *fileLoader) WithRegoVersion(version ast.RegoVersion) FileLoader {
- fl.opts.RegoVersion = version
- return fl
-}
-
-// WithFollowSymlinks enables or disables following symlinks when loading files
-func (fl *fileLoader) WithFollowSymlinks(followSymlinks bool) FileLoader {
- fl.followSymlinks = followSymlinks
- return fl
-}
-
-// All returns a Result object loaded (recursively) from the specified paths.
-func (fl fileLoader) All(paths []string) (*Result, error) {
- return fl.Filtered(paths, nil)
-}
-
-// Filtered returns a Result object loaded (recursively) from the specified
-// paths while applying the given filters. If any filter returns true, the
-// file/directory is excluded.
-func (fl fileLoader) Filtered(paths []string, filter Filter) (*Result, error) {
- return all(fl.fsys, paths, filter, func(curr *Result, path string, depth int) error {
-
- var (
- bs []byte
- err error
- )
- if fl.fsys != nil {
- bs, err = fs.ReadFile(fl.fsys, path)
- } else {
- bs, err = os.ReadFile(path)
- }
- if err != nil {
- return err
- }
-
- result, err := loadKnownTypes(path, bs, fl.metrics, fl.opts)
- if err != nil {
- if !isUnrecognizedFile(err) {
- return err
- }
- if depth > 0 {
- return nil
- }
- result, err = loadFileForAnyType(path, bs, fl.metrics, fl.opts)
- if err != nil {
- return err
- }
- }
-
- return curr.merge(path, result)
- })
-}
-
-// AsBundle loads a path as a bundle. If it is a single file
-// it will be treated as a normal tarball bundle. If a directory
-// is supplied it will be loaded as an unzipped bundle tree.
-func (fl fileLoader) AsBundle(path string) (*bundle.Bundle, error) {
- path, err := fileurl.Clean(path)
- if err != nil {
- return nil, err
- }
-
- if err := checkForUNCPath(path); err != nil {
- return nil, err
- }
-
- var bundleLoader bundle.DirectoryLoader
- var isDir bool
- if fl.reader != nil {
- bundleLoader = bundle.NewTarballLoaderWithBaseURL(fl.reader, path).WithFilter(fl.filter)
- } else {
- bundleLoader, isDir, err = GetBundleDirectoryLoaderFS(fl.fsys, path, fl.filter)
- }
-
- if err != nil {
- return nil, err
- }
- bundleLoader = bundleLoader.WithFollowSymlinks(fl.followSymlinks)
-
- br := bundle.NewCustomReader(bundleLoader).
- WithMetrics(fl.metrics).
- WithBundleVerificationConfig(fl.bvc).
- WithSkipBundleVerification(fl.skipVerify).
- WithProcessAnnotations(fl.opts.ProcessAnnotation).
- WithCapabilities(fl.opts.Capabilities).
- WithJSONOptions(fl.opts.JSONOptions).
- WithFollowSymlinks(fl.followSymlinks).
- WithRegoVersion(fl.opts.RegoVersion)
-
- // For bundle directories add the full path in front of module file names
- // to simplify debugging.
- if isDir {
- br.WithBaseDir(path)
- }
-
- b, err := br.Read()
- if err != nil {
- err = fmt.Errorf("bundle %s: %w", path, err)
- }
-
- return &b, err
+ return v1.NewFileLoader().WithRegoVersion(ast.DefaultRegoVersion)
}
// GetBundleDirectoryLoader returns a bundle directory loader which can be used to load
// files in the directory
func GetBundleDirectoryLoader(path string) (bundle.DirectoryLoader, bool, error) {
- return GetBundleDirectoryLoaderFS(nil, path, nil)
+ return v1.GetBundleDirectoryLoader(path)
}
// GetBundleDirectoryLoaderWithFilter returns a bundle directory loader which can be used to load
// files in the directory after applying the given filter.
func GetBundleDirectoryLoaderWithFilter(path string, filter Filter) (bundle.DirectoryLoader, bool, error) {
- return GetBundleDirectoryLoaderFS(nil, path, filter)
+ return v1.GetBundleDirectoryLoaderWithFilter(path, filter)
}
// GetBundleDirectoryLoaderFS returns a bundle directory loader which can be used to load
// files in the directory.
func GetBundleDirectoryLoaderFS(fsys fs.FS, path string, filter Filter) (bundle.DirectoryLoader, bool, error) {
- path, err := fileurl.Clean(path)
- if err != nil {
- return nil, false, err
- }
-
- if err := checkForUNCPath(path); err != nil {
- return nil, false, err
- }
-
- var fi fs.FileInfo
- if fsys != nil {
- fi, err = fs.Stat(fsys, path)
- } else {
- fi, err = os.Stat(path)
- }
- if err != nil {
- return nil, false, fmt.Errorf("error reading %q: %s", path, err)
- }
-
- var bundleLoader bundle.DirectoryLoader
- if fi.IsDir() {
- if fsys != nil {
- bundleLoader = bundle.NewFSLoaderWithRoot(fsys, path)
- } else {
- bundleLoader = bundle.NewDirectoryLoader(path)
- }
- } else {
- var fh fs.File
- if fsys != nil {
- fh, err = fsys.Open(path)
- } else {
- fh, err = os.Open(path)
- }
- if err != nil {
- return nil, false, err
- }
- bundleLoader = bundle.NewTarballLoaderWithBaseURL(fh, path)
- }
-
- if filter != nil {
- bundleLoader = bundleLoader.WithFilter(filter)
- }
- return bundleLoader, fi.IsDir(), nil
+ return v1.GetBundleDirectoryLoaderFS(fsys, path, filter)
}
// FilteredPaths is the same as FilterPathsFS using the current diretory file
// system
func FilteredPaths(paths []string, filter Filter) ([]string, error) {
- return FilteredPathsFS(nil, paths, filter)
+ return v1.FilteredPaths(paths, filter)
}
// FilteredPathsFS return a list of files from the specified
// paths while applying the given filters. If any filter returns true, the
// file/directory is excluded.
func FilteredPathsFS(fsys fs.FS, paths []string, filter Filter) ([]string, error) {
- result := []string{}
-
- _, err := all(fsys, paths, filter, func(_ *Result, path string, _ int) error {
- result = append(result, path)
- return nil
- })
- if err != nil {
- return nil, err
- }
- return result, nil
+ return v1.FilteredPathsFS(fsys, paths, filter)
}
// Schemas loads a schema set from the specified file path.
func Schemas(schemaPath string) (*ast.SchemaSet, error) {
-
- var errs Errors
- ss, err := loadSchemas(schemaPath)
- if err != nil {
- errs.add(err)
- return nil, errs
- }
-
- return ss, nil
-}
-
-func loadSchemas(schemaPath string) (*ast.SchemaSet, error) {
-
- if schemaPath == "" {
- return nil, nil
- }
-
- ss := ast.NewSchemaSet()
- path, err := fileurl.Clean(schemaPath)
- if err != nil {
- return nil, err
- }
-
- info, err := os.Stat(path)
- if err != nil {
- return nil, err
- }
-
- // Handle single file case.
- if !info.IsDir() {
- schema, err := loadOneSchema(path)
- if err != nil {
- return nil, err
- }
- ss.Put(ast.SchemaRootRef, schema)
- return ss, nil
-
- }
-
- // Handle directory case.
- rootDir := path
-
- err = filepath.Walk(path,
- func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- } else if info.IsDir() {
- return nil
- }
-
- schema, err := loadOneSchema(path)
- if err != nil {
- return err
- }
-
- relPath, err := filepath.Rel(rootDir, path)
- if err != nil {
- return err
- }
-
- key := getSchemaSetByPathKey(relPath)
- ss.Put(key, schema)
- return nil
- })
-
- if err != nil {
- return nil, err
- }
-
- return ss, nil
-}
-
-func getSchemaSetByPathKey(path string) ast.Ref {
-
- front := filepath.Dir(path)
- last := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path))
-
- var parts []string
-
- if front != "." {
- parts = append(strings.Split(filepath.ToSlash(front), "/"), last)
- } else {
- parts = []string{last}
- }
-
- key := make(ast.Ref, 1+len(parts))
- key[0] = ast.SchemaRootDocument
- for i := range parts {
- key[i+1] = ast.StringTerm(parts[i])
- }
-
- return key
-}
-
-func loadOneSchema(path string) (interface{}, error) {
- bs, err := os.ReadFile(path)
- if err != nil {
- return nil, err
- }
-
- var schema interface{}
- if err := util.Unmarshal(bs, &schema); err != nil {
- return nil, fmt.Errorf("%s: %w", path, err)
- }
-
- return schema, nil
+ return v1.Schemas(schemaPath)
}
// All returns a Result object loaded (recursively) from the specified paths.
@@ -517,321 +113,33 @@ func Rego(path string) (*RegoFile, error) {
// RegoWithOpts returns a RegoFile object loaded from the given path.
func RegoWithOpts(path string, opts ast.ParserOptions) (*RegoFile, error) {
- path, err := fileurl.Clean(path)
- if err != nil {
- return nil, err
+ if opts.RegoVersion == ast.RegoUndefined {
+ opts.RegoVersion = ast.DefaultRegoVersion
}
- bs, err := os.ReadFile(path)
- if err != nil {
- return nil, err
- }
- return loadRego(path, bs, metrics.New(), opts)
+
+ return v1.RegoWithOpts(path, opts)
}
// CleanPath returns the normalized version of a path that can be used as an identifier.
func CleanPath(path string) string {
- return strings.Trim(path, "/")
+ return v1.CleanPath(path)
}
// Paths returns a sorted list of files contained at path. If recurse is true
// and path is a directory, then Paths will walk the directory structure
// recursively and list files at each level.
func Paths(path string, recurse bool) (paths []string, err error) {
- path, err = fileurl.Clean(path)
- if err != nil {
- return nil, err
- }
- err = filepath.Walk(path, func(f string, _ os.FileInfo, _ error) error {
- if !recurse {
- if path != f && path != filepath.Dir(f) {
- return filepath.SkipDir
- }
- }
- paths = append(paths, f)
- return nil
- })
- return paths, err
+ return v1.Paths(path, recurse)
}
// Dirs resolves filepaths to directories. It will return a list of unique
// directories.
func Dirs(paths []string) []string {
- unique := map[string]struct{}{}
-
- for _, path := range paths {
- // TODO: /dir/dir will register top level directory /dir
- dir := filepath.Dir(path)
- unique[dir] = struct{}{}
- }
-
- u := make([]string, 0, len(unique))
- for k := range unique {
- u = append(u, k)
- }
- sort.Strings(u)
- return u
+ return v1.Dirs(paths)
}
// SplitPrefix returns a tuple specifying the document prefix and the file
// path.
func SplitPrefix(path string) ([]string, string) {
- // Non-prefixed URLs can be returned without modification and their contents
- // can be rooted directly under data.
- if strings.Index(path, "://") == strings.Index(path, ":") {
- return nil, path
- }
- parts := strings.SplitN(path, ":", 2)
- if len(parts) == 2 && len(parts[0]) > 0 {
- return strings.Split(parts[0], "."), parts[1]
- }
- return nil, path
-}
-
-func (l *Result) merge(path string, result interface{}) error {
- switch result := result.(type) {
- case bundle.Bundle:
- for _, module := range result.Modules {
- l.Modules[module.Path] = &RegoFile{
- Name: module.Path,
- Parsed: module.Parsed,
- Raw: module.Raw,
- }
- }
- return l.mergeDocument(path, result.Data)
- case *RegoFile:
- l.Modules[CleanPath(path)] = result
- return nil
- default:
- return l.mergeDocument(path, result)
- }
-}
-
-func (l *Result) mergeDocument(path string, doc interface{}) error {
- obj, ok := makeDir(l.path, doc)
- if !ok {
- return unsupportedDocumentType(path)
- }
- merged, ok := merge.InterfaceMaps(l.Documents, obj)
- if !ok {
- return mergeError(path)
- }
- for k := range merged {
- l.Documents[k] = merged[k]
- }
- return nil
-}
-
-func (l *Result) withParent(p string) *Result {
- path := append(l.path, p)
- return &Result{
- Documents: l.Documents,
- Modules: l.Modules,
- path: path,
- }
-}
-
-func newResult() *Result {
- return &Result{
- Documents: map[string]interface{}{},
- Modules: map[string]*RegoFile{},
- }
-}
-
-func all(fsys fs.FS, paths []string, filter Filter, f func(*Result, string, int) error) (*Result, error) {
- errs := Errors{}
- root := newResult()
-
- for _, path := range paths {
-
- // Paths can be prefixed with a string that specifies where content should be
- // loaded under data. E.g., foo.bar:/path/to/some.json will load the content
- // of some.json under {"foo": {"bar": ...}}.
- loaded := root
- prefix, path := SplitPrefix(path)
- if len(prefix) > 0 {
- for _, part := range prefix {
- loaded = loaded.withParent(part)
- }
- }
-
- allRec(fsys, path, filter, &errs, loaded, 0, f)
- }
-
- if len(errs) > 0 {
- return nil, errs
- }
-
- return root, nil
-}
-
-func allRec(fsys fs.FS, path string, filter Filter, errors *Errors, loaded *Result, depth int, f func(*Result, string, int) error) {
-
- path, err := fileurl.Clean(path)
- if err != nil {
- errors.add(err)
- return
- }
-
- if err := checkForUNCPath(path); err != nil {
- errors.add(err)
- return
- }
-
- var info fs.FileInfo
- if fsys != nil {
- info, err = fs.Stat(fsys, path)
- } else {
- info, err = os.Stat(path)
- }
-
- if err != nil {
- errors.add(err)
- return
- }
-
- if filter != nil && filter(path, info, depth) {
- return
- }
-
- if !info.IsDir() {
- if err := f(loaded, path, depth); err != nil {
- errors.add(err)
- }
- return
- }
-
- // If we are recursing on directories then content must be loaded under path
- // specified by directory hierarchy.
- if depth > 0 {
- loaded = loaded.withParent(info.Name())
- }
-
- var files []fs.DirEntry
- if fsys != nil {
- files, err = fs.ReadDir(fsys, path)
- } else {
- files, err = os.ReadDir(path)
- }
- if err != nil {
- errors.add(err)
- return
- }
-
- for _, file := range files {
- allRec(fsys, filepath.Join(path, file.Name()), filter, errors, loaded, depth+1, f)
- }
-}
-
-func loadKnownTypes(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (interface{}, error) {
- switch filepath.Ext(path) {
- case ".json":
- return loadJSON(path, bs, m)
- case ".rego":
- return loadRego(path, bs, m, opts)
- case ".yaml", ".yml":
- return loadYAML(path, bs, m)
- default:
- if strings.HasSuffix(path, ".tar.gz") {
- r, err := loadBundleFile(path, bs, m, opts)
- if err != nil {
- err = fmt.Errorf("bundle %s: %w", path, err)
- }
- return r, err
- }
- }
- return nil, unrecognizedFile(path)
-}
-
-func loadFileForAnyType(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (interface{}, error) {
- module, err := loadRego(path, bs, m, opts)
- if err == nil {
- return module, nil
- }
- doc, err := loadJSON(path, bs, m)
- if err == nil {
- return doc, nil
- }
- doc, err = loadYAML(path, bs, m)
- if err == nil {
- return doc, nil
- }
- return nil, unrecognizedFile(path)
-}
-
-func loadBundleFile(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (bundle.Bundle, error) {
- tl := bundle.NewTarballLoaderWithBaseURL(bytes.NewBuffer(bs), path)
- br := bundle.NewCustomReader(tl).
- WithRegoVersion(opts.RegoVersion).
- WithJSONOptions(opts.JSONOptions).
- WithProcessAnnotations(opts.ProcessAnnotation).
- WithMetrics(m).
- WithSkipBundleVerification(true).
- IncludeManifestInData(true)
- return br.Read()
-}
-
-func loadRego(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (*RegoFile, error) {
- m.Timer(metrics.RegoModuleParse).Start()
- var module *ast.Module
- var err error
- module, err = ast.ParseModuleWithOpts(path, string(bs), opts)
- m.Timer(metrics.RegoModuleParse).Stop()
- if err != nil {
- return nil, err
- }
- result := &RegoFile{
- Name: path,
- Parsed: module,
- Raw: bs,
- }
- return result, nil
-}
-
-func loadJSON(path string, bs []byte, m metrics.Metrics) (interface{}, error) {
- m.Timer(metrics.RegoDataParse).Start()
- var x interface{}
- err := util.UnmarshalJSON(bs, &x)
- m.Timer(metrics.RegoDataParse).Stop()
-
- if err != nil {
- return nil, fmt.Errorf("%s: %w", path, err)
- }
- return x, nil
-}
-
-func loadYAML(path string, bs []byte, m metrics.Metrics) (interface{}, error) {
- m.Timer(metrics.RegoDataParse).Start()
- bs, err := yaml.YAMLToJSON(bs)
- m.Timer(metrics.RegoDataParse).Stop()
- if err != nil {
- return nil, fmt.Errorf("%v: error converting YAML to JSON: %v", path, err)
- }
- return loadJSON(path, bs, m)
-}
-
-func makeDir(path []string, x interface{}) (map[string]interface{}, bool) {
- if len(path) == 0 {
- obj, ok := x.(map[string]interface{})
- if !ok {
- return nil, false
- }
- return obj, true
- }
- return makeDir(path[:len(path)-1], map[string]interface{}{path[len(path)-1]: x})
-}
-
-// isUNC reports whether path is a UNC path.
-func isUNC(path string) bool {
- return len(path) > 1 && isSlash(path[0]) && isSlash(path[1])
-}
-
-func isSlash(c uint8) bool {
- return c == '\\' || c == '/'
-}
-
-func checkForUNCPath(path string) error {
- if isUNC(path) {
- return fmt.Errorf("UNC path read is not allowed: %s", path)
- }
- return nil
+ return v1.SplitPrefix(path)
}
diff --git a/vendor/github.com/open-policy-agent/opa/rego/doc.go b/vendor/github.com/open-policy-agent/opa/rego/doc.go
new file mode 100644
index 0000000000..febe75696c
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/rego/doc.go
@@ -0,0 +1,8 @@
+// Copyright 2024 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended.
+// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead.
+// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information.
+package rego
diff --git a/vendor/github.com/open-policy-agent/opa/rego/errors.go b/vendor/github.com/open-policy-agent/opa/rego/errors.go
index dcc5e2679d..bcbd2efedd 100644
--- a/vendor/github.com/open-policy-agent/opa/rego/errors.go
+++ b/vendor/github.com/open-policy-agent/opa/rego/errors.go
@@ -1,24 +1,17 @@
package rego
+import v1 "github.com/open-policy-agent/opa/v1/rego"
+
// HaltError is an error type to return from a custom function implementation
// that will abort the evaluation process (analogous to topdown.Halt).
-type HaltError struct {
- err error
-}
-
-// Error delegates to the wrapped error
-func (h *HaltError) Error() string {
- return h.err.Error()
-}
+type HaltError = v1.HaltError
// NewHaltError wraps an error such that the evaluation process will stop
// when it occurs.
func NewHaltError(err error) error {
- return &HaltError{err: err}
+ return v1.NewHaltError(err)
}
// ErrorDetails interface is satisfied by an error that provides further
// details.
-type ErrorDetails interface {
- Lines() []string
-}
+type ErrorDetails = v1.ErrorDetails
diff --git a/vendor/github.com/open-policy-agent/opa/rego/plugins.go b/vendor/github.com/open-policy-agent/opa/rego/plugins.go
index abaa910341..38ef84416f 100644
--- a/vendor/github.com/open-policy-agent/opa/rego/plugins.go
+++ b/vendor/github.com/open-policy-agent/opa/rego/plugins.go
@@ -5,39 +5,13 @@
package rego
import (
- "context"
- "sync"
-
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/ir"
+ v1 "github.com/open-policy-agent/opa/v1/rego"
)
-var targetPlugins = map[string]TargetPlugin{}
-var pluginMtx sync.Mutex
-
-type TargetPlugin interface {
- IsTarget(string) bool
- PrepareForEval(context.Context, *ir.Policy, ...PrepareOption) (TargetPluginEval, error)
-}
-
-type TargetPluginEval interface {
- Eval(context.Context, *EvalContext, ast.Value) (ast.Value, error)
-}
+type TargetPlugin = v1.TargetPlugin
-func (r *Rego) targetPlugin(tgt string) TargetPlugin {
- for _, p := range targetPlugins {
- if p.IsTarget(tgt) {
- return p
- }
- }
- return nil
-}
+type TargetPluginEval = v1.TargetPluginEval
func RegisterPlugin(name string, p TargetPlugin) {
- pluginMtx.Lock()
- defer pluginMtx.Unlock()
- if _, ok := targetPlugins[name]; ok {
- panic("plugin already registered " + name)
- }
- targetPlugins[name] = p
+ v1.RegisterPlugin(name, p)
}
diff --git a/vendor/github.com/open-policy-agent/opa/rego/rego.go b/vendor/github.com/open-policy-agent/opa/rego/rego.go
index 64b4b9b93e..bdcf6c291a 100644
--- a/vendor/github.com/open-policy-agent/opa/rego/rego.go
+++ b/vendor/github.com/open-policy-agent/opa/rego/rego.go
@@ -6,958 +6,367 @@
package rego
import (
- "bytes"
- "context"
- "errors"
- "fmt"
"io"
- "strings"
"time"
"github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/bundle"
- bundleUtils "github.com/open-policy-agent/opa/internal/bundle"
- "github.com/open-policy-agent/opa/internal/compiler/wasm"
- "github.com/open-policy-agent/opa/internal/future"
- "github.com/open-policy-agent/opa/internal/planner"
- "github.com/open-policy-agent/opa/internal/rego/opa"
- "github.com/open-policy-agent/opa/internal/wasm/encoding"
- "github.com/open-policy-agent/opa/ir"
"github.com/open-policy-agent/opa/loader"
- "github.com/open-policy-agent/opa/metrics"
- "github.com/open-policy-agent/opa/plugins"
- "github.com/open-policy-agent/opa/resolver"
"github.com/open-policy-agent/opa/storage"
- "github.com/open-policy-agent/opa/storage/inmem"
- "github.com/open-policy-agent/opa/topdown"
- "github.com/open-policy-agent/opa/topdown/builtins"
- "github.com/open-policy-agent/opa/topdown/cache"
- "github.com/open-policy-agent/opa/topdown/print"
- "github.com/open-policy-agent/opa/tracing"
- "github.com/open-policy-agent/opa/types"
- "github.com/open-policy-agent/opa/util"
-)
-
-const (
- defaultPartialNamespace = "partial"
- wasmVarPrefix = "^"
-)
-
-// nolint: deadcode,varcheck
-const (
- targetWasm = "wasm"
- targetRego = "rego"
+ "github.com/open-policy-agent/opa/v1/metrics"
+ v1 "github.com/open-policy-agent/opa/v1/rego"
+ "github.com/open-policy-agent/opa/v1/resolver"
+ "github.com/open-policy-agent/opa/v1/topdown"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/topdown/cache"
+ "github.com/open-policy-agent/opa/v1/topdown/print"
+ "github.com/open-policy-agent/opa/v1/tracing"
)
// CompileResult represents the result of compiling a Rego query, zero or more
// Rego modules, and arbitrary contextual data into an executable.
-type CompileResult struct {
- Bytes []byte `json:"bytes"`
-}
+type CompileResult = v1.CompileResult
// PartialQueries contains the queries and support modules produced by partial
// evaluation.
-type PartialQueries struct {
- Queries []ast.Body `json:"queries,omitempty"`
- Support []*ast.Module `json:"modules,omitempty"`
-}
+type PartialQueries = v1.PartialQueries
// PartialResult represents the result of partial evaluation. The result can be
// used to generate a new query that can be run when inputs are known.
-type PartialResult struct {
- compiler *ast.Compiler
- store storage.Store
- body ast.Body
- builtinDecls map[string]*ast.Builtin
- builtinFuncs map[string]*topdown.Builtin
-}
-
-// Rego returns an object that can be evaluated to produce a query result.
-func (pr PartialResult) Rego(options ...func(*Rego)) *Rego {
- options = append(options, Compiler(pr.compiler), Store(pr.store), ParsedQuery(pr.body))
- r := New(options...)
-
- // Propagate any custom builtins.
- for k, v := range pr.builtinDecls {
- r.builtinDecls[k] = v
- }
- for k, v := range pr.builtinFuncs {
- r.builtinFuncs[k] = v
- }
- return r
-}
-
-// preparedQuery is a wrapper around a Rego object which has pre-processed
-// state stored on it. Once prepared there are a more limited number of actions
-// that can be taken with it. It will, however, be able to evaluate faster since
-// it will not have to re-parse or compile as much.
-type preparedQuery struct {
- r *Rego
- cfg *PrepareConfig
-}
+type PartialResult = v1.PartialResult
// EvalContext defines the set of options allowed to be set at evaluation
// time. Any other options will need to be set on a new Rego object.
-type EvalContext struct {
- hasInput bool
- time time.Time
- seed io.Reader
- rawInput *interface{}
- parsedInput ast.Value
- metrics metrics.Metrics
- txn storage.Transaction
- instrument bool
- instrumentation *topdown.Instrumentation
- partialNamespace string
- queryTracers []topdown.QueryTracer
- compiledQuery compiledQuery
- unknowns []string
- disableInlining []ast.Ref
- parsedUnknowns []*ast.Term
- indexing bool
- earlyExit bool
- interQueryBuiltinCache cache.InterQueryCache
- interQueryBuiltinValueCache cache.InterQueryValueCache
- ndBuiltinCache builtins.NDBCache
- resolvers []refResolver
- sortSets bool
- copyMaps bool
- printHook print.Hook
- capabilities *ast.Capabilities
- strictBuiltinErrors bool
- virtualCache topdown.VirtualCache
-}
-
-func (e *EvalContext) RawInput() *interface{} {
- return e.rawInput
-}
-
-func (e *EvalContext) ParsedInput() ast.Value {
- return e.parsedInput
-}
-
-func (e *EvalContext) Time() time.Time {
- return e.time
-}
-
-func (e *EvalContext) Seed() io.Reader {
- return e.seed
-}
-
-func (e *EvalContext) InterQueryBuiltinCache() cache.InterQueryCache {
- return e.interQueryBuiltinCache
-}
-
-func (e *EvalContext) InterQueryBuiltinValueCache() cache.InterQueryValueCache {
- return e.interQueryBuiltinValueCache
-}
-
-func (e *EvalContext) PrintHook() print.Hook {
- return e.printHook
-}
-
-func (e *EvalContext) Metrics() metrics.Metrics {
- return e.metrics
-}
-
-func (e *EvalContext) StrictBuiltinErrors() bool {
- return e.strictBuiltinErrors
-}
-
-func (e *EvalContext) NDBCache() builtins.NDBCache {
- return e.ndBuiltinCache
-}
-
-func (e *EvalContext) CompiledQuery() ast.Body {
- return e.compiledQuery.query
-}
-
-func (e *EvalContext) Capabilities() *ast.Capabilities {
- return e.capabilities
-}
-
-func (e *EvalContext) Transaction() storage.Transaction {
- return e.txn
-}
+type EvalContext = v1.EvalContext
// EvalOption defines a function to set an option on an EvalConfig
-type EvalOption func(*EvalContext)
+type EvalOption = v1.EvalOption
// EvalInput configures the input for a Prepared Query's evaluation
-func EvalInput(input interface{}) EvalOption {
- return func(e *EvalContext) {
- e.rawInput = &input
- e.hasInput = true
- }
+func EvalInput(input any) EvalOption {
+ return v1.EvalInput(input)
}
// EvalParsedInput configures the input for a Prepared Query's evaluation
func EvalParsedInput(input ast.Value) EvalOption {
- return func(e *EvalContext) {
- e.parsedInput = input
- e.hasInput = true
- }
+ return v1.EvalParsedInput(input)
}
// EvalMetrics configures the metrics for a Prepared Query's evaluation
func EvalMetrics(metric metrics.Metrics) EvalOption {
- return func(e *EvalContext) {
- e.metrics = metric
- }
+ return v1.EvalMetrics(metric)
}
// EvalTransaction configures the Transaction for a Prepared Query's evaluation
func EvalTransaction(txn storage.Transaction) EvalOption {
- return func(e *EvalContext) {
- e.txn = txn
- }
+ return v1.EvalTransaction(txn)
}
// EvalInstrument enables or disables instrumenting for a Prepared Query's evaluation
func EvalInstrument(instrument bool) EvalOption {
- return func(e *EvalContext) {
- e.instrument = instrument
- }
+ return v1.EvalInstrument(instrument)
}
// EvalTracer configures a tracer for a Prepared Query's evaluation
// Deprecated: Use EvalQueryTracer instead.
func EvalTracer(tracer topdown.Tracer) EvalOption {
- return func(e *EvalContext) {
- if tracer != nil {
- e.queryTracers = append(e.queryTracers, topdown.WrapLegacyTracer(tracer))
- }
- }
+ return v1.EvalTracer(tracer)
}
// EvalQueryTracer configures a tracer for a Prepared Query's evaluation
func EvalQueryTracer(tracer topdown.QueryTracer) EvalOption {
- return func(e *EvalContext) {
- if tracer != nil {
- e.queryTracers = append(e.queryTracers, tracer)
- }
- }
+ return v1.EvalQueryTracer(tracer)
}
// EvalPartialNamespace returns an argument that sets the namespace to use for
// partial evaluation results. The namespace must be a valid package path
// component.
func EvalPartialNamespace(ns string) EvalOption {
- return func(e *EvalContext) {
- e.partialNamespace = ns
- }
+ return v1.EvalPartialNamespace(ns)
}
// EvalUnknowns returns an argument that sets the values to treat as
// unknown during partial evaluation.
func EvalUnknowns(unknowns []string) EvalOption {
- return func(e *EvalContext) {
- e.unknowns = unknowns
- }
+ return v1.EvalUnknowns(unknowns)
}
// EvalDisableInlining returns an argument that adds a set of paths to exclude from
// partial evaluation inlining.
func EvalDisableInlining(paths []ast.Ref) EvalOption {
- return func(e *EvalContext) {
- e.disableInlining = paths
- }
+ return v1.EvalDisableInlining(paths)
}
// EvalParsedUnknowns returns an argument that sets the values to treat
// as unknown during partial evaluation.
func EvalParsedUnknowns(unknowns []*ast.Term) EvalOption {
- return func(e *EvalContext) {
- e.parsedUnknowns = unknowns
- }
+ return v1.EvalParsedUnknowns(unknowns)
}
// EvalRuleIndexing will disable indexing optimizations for the
// evaluation. This should only be used when tracing in debug mode.
func EvalRuleIndexing(enabled bool) EvalOption {
- return func(e *EvalContext) {
- e.indexing = enabled
- }
+ return v1.EvalRuleIndexing(enabled)
}
// EvalEarlyExit will disable 'early exit' optimizations for the
// evaluation. This should only be used when tracing in debug mode.
func EvalEarlyExit(enabled bool) EvalOption {
- return func(e *EvalContext) {
- e.earlyExit = enabled
- }
+ return v1.EvalEarlyExit(enabled)
}
// EvalTime sets the wall clock time to use during policy evaluation.
// time.now_ns() calls will return this value.
func EvalTime(x time.Time) EvalOption {
- return func(e *EvalContext) {
- e.time = x
- }
+ return v1.EvalTime(x)
}
// EvalSeed sets a reader that will seed randomization required by built-in functions.
// If a seed is not provided crypto/rand.Reader is used.
func EvalSeed(r io.Reader) EvalOption {
- return func(e *EvalContext) {
- e.seed = r
- }
+ return v1.EvalSeed(r)
}
// EvalInterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize
// during evaluation.
func EvalInterQueryBuiltinCache(c cache.InterQueryCache) EvalOption {
- return func(e *EvalContext) {
- e.interQueryBuiltinCache = c
- }
+ return v1.EvalInterQueryBuiltinCache(c)
}
// EvalInterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize
// during evaluation.
func EvalInterQueryBuiltinValueCache(c cache.InterQueryValueCache) EvalOption {
- return func(e *EvalContext) {
- e.interQueryBuiltinValueCache = c
- }
+ return v1.EvalInterQueryBuiltinValueCache(c)
}
// EvalNDBuiltinCache sets the non-deterministic builtin cache that built-in functions can
// use during evaluation.
func EvalNDBuiltinCache(c builtins.NDBCache) EvalOption {
- return func(e *EvalContext) {
- e.ndBuiltinCache = c
- }
+ return v1.EvalNDBuiltinCache(c)
}
// EvalResolver sets a Resolver for a specified ref path for this evaluation.
func EvalResolver(ref ast.Ref, r resolver.Resolver) EvalOption {
- return func(e *EvalContext) {
- e.resolvers = append(e.resolvers, refResolver{ref, r})
- }
+ return v1.EvalResolver(ref, r)
}
// EvalSortSets causes the evaluator to sort sets before returning them as JSON arrays.
func EvalSortSets(yes bool) EvalOption {
- return func(e *EvalContext) {
- e.sortSets = yes
- }
+ return v1.EvalSortSets(yes)
}
-// EvalCopyMaps causes the evaluator to copy `map[string]interface{}`s before returning them.
+// EvalCopyMaps causes the evaluator to copy `map[string]any`s before returning them.
func EvalCopyMaps(yes bool) EvalOption {
- return func(e *EvalContext) {
- e.copyMaps = yes
- }
+ return v1.EvalCopyMaps(yes)
}
// EvalPrintHook sets the object to use for handling print statement outputs.
func EvalPrintHook(ph print.Hook) EvalOption {
- return func(e *EvalContext) {
- e.printHook = ph
- }
+ return v1.EvalPrintHook(ph)
}
// EvalVirtualCache sets the topdown.VirtualCache to use for evaluation. This is
// optional, and if not set, the default cache is used.
func EvalVirtualCache(vc topdown.VirtualCache) EvalOption {
- return func(e *EvalContext) {
- e.virtualCache = vc
- }
-}
-
-func (pq preparedQuery) Modules() map[string]*ast.Module {
- mods := make(map[string]*ast.Module)
-
- for name, mod := range pq.r.parsedModules {
- mods[name] = mod
- }
-
- for _, b := range pq.r.bundles {
- for _, mod := range b.Modules {
- mods[mod.Path] = mod.Parsed
- }
- }
-
- return mods
-}
-
-// newEvalContext creates a new EvalContext overlaying any EvalOptions over top
-// the Rego object on the preparedQuery. The returned function should be called
-// once the evaluation is complete to close any transactions that might have
-// been opened.
-func (pq preparedQuery) newEvalContext(ctx context.Context, options []EvalOption) (*EvalContext, func(context.Context), error) {
- ectx := &EvalContext{
- hasInput: false,
- rawInput: nil,
- parsedInput: nil,
- metrics: nil,
- txn: nil,
- instrument: false,
- instrumentation: nil,
- partialNamespace: pq.r.partialNamespace,
- queryTracers: nil,
- unknowns: pq.r.unknowns,
- parsedUnknowns: pq.r.parsedUnknowns,
- compiledQuery: compiledQuery{},
- indexing: true,
- earlyExit: true,
- resolvers: pq.r.resolvers,
- printHook: pq.r.printHook,
- capabilities: pq.r.capabilities,
- strictBuiltinErrors: pq.r.strictBuiltinErrors,
- }
-
- for _, o := range options {
- o(ectx)
- }
-
- if ectx.metrics == nil {
- ectx.metrics = metrics.New()
- }
-
- if ectx.instrument {
- ectx.instrumentation = topdown.NewInstrumentation(ectx.metrics)
- }
-
- // Default to an empty "finish" function
- finishFunc := func(context.Context) {}
-
- var err error
- ectx.disableInlining, err = parseStringsToRefs(pq.r.disableInlining)
- if err != nil {
- return nil, finishFunc, err
- }
-
- if ectx.txn == nil {
- ectx.txn, err = pq.r.store.NewTransaction(ctx)
- if err != nil {
- return nil, finishFunc, err
- }
- finishFunc = func(ctx context.Context) {
- pq.r.store.Abort(ctx, ectx.txn)
- }
- }
-
- // If we didn't get an input specified in the Eval options
- // then fall back to the Rego object's input fields.
- if !ectx.hasInput {
- ectx.rawInput = pq.r.rawInput
- ectx.parsedInput = pq.r.parsedInput
- }
-
- if ectx.parsedInput == nil {
- if ectx.rawInput == nil {
- // Fall back to the original Rego objects input if none was specified
- // Note that it could still be nil
- ectx.rawInput = pq.r.rawInput
- }
-
- if pq.r.targetPlugin(pq.r.target) == nil && // no plugin claims this target
- pq.r.target != targetWasm {
- ectx.parsedInput, err = pq.r.parseRawInput(ectx.rawInput, ectx.metrics)
- if err != nil {
- return nil, finishFunc, err
- }
- }
- }
-
- return ectx, finishFunc, nil
+ return v1.EvalVirtualCache(vc)
}
// PreparedEvalQuery holds the prepared Rego state that has been pre-processed
// for subsequent evaluations.
-type PreparedEvalQuery struct {
- preparedQuery
-}
-
-// Eval evaluates this PartialResult's Rego object with additional eval options
-// and returns a ResultSet.
-// If options are provided they will override the original Rego options respective value.
-// The original Rego object transaction will *not* be re-used. A new transaction will be opened
-// if one is not provided with an EvalOption.
-func (pq PreparedEvalQuery) Eval(ctx context.Context, options ...EvalOption) (ResultSet, error) {
- ectx, finish, err := pq.newEvalContext(ctx, options)
- if err != nil {
- return nil, err
- }
- defer finish(ctx)
-
- ectx.compiledQuery = pq.r.compiledQueries[evalQueryType]
-
- return pq.r.eval(ctx, ectx)
-}
+type PreparedEvalQuery = v1.PreparedEvalQuery
// PreparedPartialQuery holds the prepared Rego state that has been pre-processed
// for partial evaluations.
-type PreparedPartialQuery struct {
- preparedQuery
-}
-
-// Partial runs partial evaluation on the prepared query and returns the result.
-// The original Rego object transaction will *not* be re-used. A new transaction will be opened
-// if one is not provided with an EvalOption.
-func (pq PreparedPartialQuery) Partial(ctx context.Context, options ...EvalOption) (*PartialQueries, error) {
- ectx, finish, err := pq.newEvalContext(ctx, options)
- if err != nil {
- return nil, err
- }
- defer finish(ctx)
-
- ectx.compiledQuery = pq.r.compiledQueries[partialQueryType]
-
- return pq.r.partial(ctx, ectx)
-}
+type PreparedPartialQuery = v1.PreparedPartialQuery
// Errors represents a collection of errors returned when evaluating Rego.
-type Errors []error
-
-func (errs Errors) Error() string {
- if len(errs) == 0 {
- return "no error"
- }
- if len(errs) == 1 {
- return fmt.Sprintf("1 error occurred: %v", errs[0].Error())
- }
- buf := []string{fmt.Sprintf("%v errors occurred", len(errs))}
- for _, err := range errs {
- buf = append(buf, err.Error())
- }
- return strings.Join(buf, "\n")
-}
-
-var errPartialEvaluationNotEffective = errors.New("partial evaluation not effective")
+type Errors = v1.Errors
// IsPartialEvaluationNotEffectiveErr returns true if err is an error returned by
// this package to indicate that partial evaluation was ineffective.
func IsPartialEvaluationNotEffectiveErr(err error) bool {
- errs, ok := err.(Errors)
- if !ok {
- return false
- }
- return len(errs) == 1 && errs[0] == errPartialEvaluationNotEffective
-}
-
-type compiledQuery struct {
- query ast.Body
- compiler ast.QueryCompiler
-}
-
-type queryType int
-
-// Define a query type for each of the top level Rego
-// API's that compile queries differently.
-const (
- evalQueryType queryType = iota
- partialResultQueryType
- partialQueryType
- compileQueryType
-)
-
-type loadPaths struct {
- paths []string
- filter loader.Filter
+ return v1.IsPartialEvaluationNotEffectiveErr(err)
}
// Rego constructs a query and can be evaluated to obtain results.
-type Rego struct {
- query string
- parsedQuery ast.Body
- compiledQueries map[queryType]compiledQuery
- pkg string
- parsedPackage *ast.Package
- imports []string
- parsedImports []*ast.Import
- rawInput *interface{}
- parsedInput ast.Value
- unknowns []string
- parsedUnknowns []*ast.Term
- disableInlining []string
- shallowInlining bool
- skipPartialNamespace bool
- partialNamespace string
- modules []rawModule
- parsedModules map[string]*ast.Module
- compiler *ast.Compiler
- store storage.Store
- ownStore bool
- ownStoreReadAst bool
- txn storage.Transaction
- metrics metrics.Metrics
- queryTracers []topdown.QueryTracer
- tracebuf *topdown.BufferTracer
- trace bool
- instrumentation *topdown.Instrumentation
- instrument bool
- capture map[*ast.Expr]ast.Var // map exprs to generated capture vars
- termVarID int
- dump io.Writer
- runtime *ast.Term
- time time.Time
- seed io.Reader
- capabilities *ast.Capabilities
- builtinDecls map[string]*ast.Builtin
- builtinFuncs map[string]*topdown.Builtin
- unsafeBuiltins map[string]struct{}
- loadPaths loadPaths
- bundlePaths []string
- bundles map[string]*bundle.Bundle
- skipBundleVerification bool
- interQueryBuiltinCache cache.InterQueryCache
- interQueryBuiltinValueCache cache.InterQueryValueCache
- ndBuiltinCache builtins.NDBCache
- strictBuiltinErrors bool
- builtinErrorList *[]topdown.Error
- resolvers []refResolver
- schemaSet *ast.SchemaSet
- target string // target type (wasm, rego, etc.)
- opa opa.EvalEngine
- generateJSON func(*ast.Term, *EvalContext) (interface{}, error)
- printHook print.Hook
- enablePrintStatements bool
- distributedTacingOpts tracing.Options
- strict bool
- pluginMgr *plugins.Manager
- plugins []TargetPlugin
- targetPrepState TargetPluginEval
- regoVersion ast.RegoVersion
-}
+type Rego = v1.Rego
// Function represents a built-in function that is callable in Rego.
-type Function struct {
- Name string
- Description string
- Decl *types.Function
- Memoize bool
- Nondeterministic bool
-}
+type Function = v1.Function
// BuiltinContext contains additional attributes from the evaluator that
// built-in functions can use, e.g., the request context.Context, caches, etc.
-type BuiltinContext = topdown.BuiltinContext
+type BuiltinContext = v1.BuiltinContext
type (
// Builtin1 defines a built-in function that accepts 1 argument.
- Builtin1 func(bctx BuiltinContext, op1 *ast.Term) (*ast.Term, error)
+ Builtin1 = v1.Builtin1
// Builtin2 defines a built-in function that accepts 2 arguments.
- Builtin2 func(bctx BuiltinContext, op1, op2 *ast.Term) (*ast.Term, error)
+ Builtin2 = v1.Builtin2
// Builtin3 defines a built-in function that accepts 3 argument.
- Builtin3 func(bctx BuiltinContext, op1, op2, op3 *ast.Term) (*ast.Term, error)
+ Builtin3 = v1.Builtin3
// Builtin4 defines a built-in function that accepts 4 argument.
- Builtin4 func(bctx BuiltinContext, op1, op2, op3, op4 *ast.Term) (*ast.Term, error)
+ Builtin4 = v1.Builtin4
// BuiltinDyn defines a built-in function that accepts a list of arguments.
- BuiltinDyn func(bctx BuiltinContext, terms []*ast.Term) (*ast.Term, error)
+ BuiltinDyn = v1.BuiltinDyn
)
// RegisterBuiltin1 adds a built-in function globally inside the OPA runtime.
func RegisterBuiltin1(decl *Function, impl Builtin1) {
- ast.RegisterBuiltin(&ast.Builtin{
- Name: decl.Name,
- Description: decl.Description,
- Decl: decl.Decl,
- Nondeterministic: decl.Nondeterministic,
- })
- topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
- result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0]) })
- return finishFunction(decl.Name, bctx, result, err, iter)
- })
+ v1.RegisterBuiltin1(decl, impl)
}
// RegisterBuiltin2 adds a built-in function globally inside the OPA runtime.
func RegisterBuiltin2(decl *Function, impl Builtin2) {
- ast.RegisterBuiltin(&ast.Builtin{
- Name: decl.Name,
- Description: decl.Description,
- Decl: decl.Decl,
- Nondeterministic: decl.Nondeterministic,
- })
- topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
- result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1]) })
- return finishFunction(decl.Name, bctx, result, err, iter)
- })
+ v1.RegisterBuiltin2(decl, impl)
}
// RegisterBuiltin3 adds a built-in function globally inside the OPA runtime.
func RegisterBuiltin3(decl *Function, impl Builtin3) {
- ast.RegisterBuiltin(&ast.Builtin{
- Name: decl.Name,
- Description: decl.Description,
- Decl: decl.Decl,
- Nondeterministic: decl.Nondeterministic,
- })
- topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
- result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1], terms[2]) })
- return finishFunction(decl.Name, bctx, result, err, iter)
- })
+ v1.RegisterBuiltin3(decl, impl)
}
// RegisterBuiltin4 adds a built-in function globally inside the OPA runtime.
func RegisterBuiltin4(decl *Function, impl Builtin4) {
- ast.RegisterBuiltin(&ast.Builtin{
- Name: decl.Name,
- Description: decl.Description,
- Decl: decl.Decl,
- Nondeterministic: decl.Nondeterministic,
- })
- topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
- result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1], terms[2], terms[3]) })
- return finishFunction(decl.Name, bctx, result, err, iter)
- })
+ v1.RegisterBuiltin4(decl, impl)
}
// RegisterBuiltinDyn adds a built-in function globally inside the OPA runtime.
func RegisterBuiltinDyn(decl *Function, impl BuiltinDyn) {
- ast.RegisterBuiltin(&ast.Builtin{
- Name: decl.Name,
- Description: decl.Description,
- Decl: decl.Decl,
- Nondeterministic: decl.Nondeterministic,
- })
- topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
- result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms) })
- return finishFunction(decl.Name, bctx, result, err, iter)
- })
+ v1.RegisterBuiltinDyn(decl, impl)
}
// Function1 returns an option that adds a built-in function to the Rego object.
func Function1(decl *Function, f Builtin1) func(*Rego) {
- return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
- result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0]) })
- return finishFunction(decl.Name, bctx, result, err, iter)
- })
+ return v1.Function1(decl, f)
}
// Function2 returns an option that adds a built-in function to the Rego object.
func Function2(decl *Function, f Builtin2) func(*Rego) {
- return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
- result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1]) })
- return finishFunction(decl.Name, bctx, result, err, iter)
- })
+ return v1.Function2(decl, f)
}
// Function3 returns an option that adds a built-in function to the Rego object.
func Function3(decl *Function, f Builtin3) func(*Rego) {
- return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
- result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1], terms[2]) })
- return finishFunction(decl.Name, bctx, result, err, iter)
- })
+ return v1.Function3(decl, f)
}
// Function4 returns an option that adds a built-in function to the Rego object.
func Function4(decl *Function, f Builtin4) func(*Rego) {
- return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
- result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1], terms[2], terms[3]) })
- return finishFunction(decl.Name, bctx, result, err, iter)
- })
+ return v1.Function4(decl, f)
}
// FunctionDyn returns an option that adds a built-in function to the Rego object.
func FunctionDyn(decl *Function, f BuiltinDyn) func(*Rego) {
- return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
- result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms) })
- return finishFunction(decl.Name, bctx, result, err, iter)
- })
+ return v1.FunctionDyn(decl, f)
}
// FunctionDecl returns an option that adds a custom-built-in function
// __declaration__. NO implementation is provided. This is used for
// non-interpreter execution envs (e.g., Wasm).
func FunctionDecl(decl *Function) func(*Rego) {
- return newDecl(decl)
-}
-
-func newDecl(decl *Function) func(*Rego) {
- return func(r *Rego) {
- r.builtinDecls[decl.Name] = &ast.Builtin{
- Name: decl.Name,
- Decl: decl.Decl,
- }
- }
-}
-
-type memo struct {
- term *ast.Term
- err error
-}
-
-type memokey string
-
-func memoize(decl *Function, bctx BuiltinContext, terms []*ast.Term, ifEmpty func() (*ast.Term, error)) (*ast.Term, error) {
-
- if !decl.Memoize {
- return ifEmpty()
- }
-
- // NOTE(tsandall): we assume memoization is applied to infrequent built-in
- // calls that do things like fetch data from remote locations. As such,
- // converting the terms to strings is acceptable for now.
- var b strings.Builder
- if _, err := b.WriteString(decl.Name); err != nil {
- return nil, err
- }
-
- // The term slice _may_ include an output term depending on how the caller
- // referred to the built-in function. Only use the arguments as the cache
- // key. Unification ensures we don't get false positive matches.
- for i := 0; i < len(decl.Decl.Args()); i++ {
- if _, err := b.WriteString(terms[i].String()); err != nil {
- return nil, err
- }
- }
-
- key := memokey(b.String())
- hit, ok := bctx.Cache.Get(key)
- var m memo
- if ok {
- m = hit.(memo)
- } else {
- m.term, m.err = ifEmpty()
- bctx.Cache.Put(key, m)
- }
-
- return m.term, m.err
+ return v1.FunctionDecl(decl)
}
// Dump returns an argument that sets the writer to dump debugging information to.
func Dump(w io.Writer) func(r *Rego) {
- return func(r *Rego) {
- r.dump = w
- }
+ return v1.Dump(w)
}
// Query returns an argument that sets the Rego query.
func Query(q string) func(r *Rego) {
- return func(r *Rego) {
- r.query = q
- }
+ return v1.Query(q)
}
// ParsedQuery returns an argument that sets the Rego query.
func ParsedQuery(q ast.Body) func(r *Rego) {
- return func(r *Rego) {
- r.parsedQuery = q
- }
+ return v1.ParsedQuery(q)
}
// Package returns an argument that sets the Rego package on the query's
// context.
func Package(p string) func(r *Rego) {
- return func(r *Rego) {
- r.pkg = p
- }
+ return v1.Package(p)
}
// ParsedPackage returns an argument that sets the Rego package on the query's
// context.
func ParsedPackage(pkg *ast.Package) func(r *Rego) {
- return func(r *Rego) {
- r.parsedPackage = pkg
- }
+ return v1.ParsedPackage(pkg)
}
// Imports returns an argument that adds a Rego import to the query's context.
func Imports(p []string) func(r *Rego) {
- return func(r *Rego) {
- r.imports = append(r.imports, p...)
- }
+ return v1.Imports(p)
}
// ParsedImports returns an argument that adds Rego imports to the query's
// context.
func ParsedImports(imp []*ast.Import) func(r *Rego) {
- return func(r *Rego) {
- r.parsedImports = append(r.parsedImports, imp...)
- }
+ return v1.ParsedImports(imp)
}
// Input returns an argument that sets the Rego input document. Input should be
// a native Go value representing the input document.
-func Input(x interface{}) func(r *Rego) {
- return func(r *Rego) {
- r.rawInput = &x
- }
+func Input(x any) func(r *Rego) {
+ return v1.Input(x)
}
// ParsedInput returns an argument that sets the Rego input document.
func ParsedInput(x ast.Value) func(r *Rego) {
- return func(r *Rego) {
- r.parsedInput = x
- }
+ return v1.ParsedInput(x)
}
// Unknowns returns an argument that sets the values to treat as unknown during
// partial evaluation.
func Unknowns(unknowns []string) func(r *Rego) {
- return func(r *Rego) {
- r.unknowns = unknowns
- }
+ return v1.Unknowns(unknowns)
}
// ParsedUnknowns returns an argument that sets the values to treat as unknown
// during partial evaluation.
func ParsedUnknowns(unknowns []*ast.Term) func(r *Rego) {
- return func(r *Rego) {
- r.parsedUnknowns = unknowns
- }
+ return v1.ParsedUnknowns(unknowns)
}
// DisableInlining adds a set of paths to exclude from partial evaluation inlining.
func DisableInlining(paths []string) func(r *Rego) {
- return func(r *Rego) {
- r.disableInlining = paths
- }
+ return v1.DisableInlining(paths)
}
// ShallowInlining prevents rules that depend on unknown values from being inlined.
// Rules that only depend on known values are inlined.
func ShallowInlining(yes bool) func(r *Rego) {
- return func(r *Rego) {
- r.shallowInlining = yes
- }
+ return v1.ShallowInlining(yes)
}
// SkipPartialNamespace disables namespacing of partial evalution results for support
// rules generated from policy. Synthetic support rules are still namespaced.
func SkipPartialNamespace(yes bool) func(r *Rego) {
- return func(r *Rego) {
- r.skipPartialNamespace = yes
- }
+ return v1.SkipPartialNamespace(yes)
}
// PartialNamespace returns an argument that sets the namespace to use for
// partial evaluation results. The namespace must be a valid package path
// component.
func PartialNamespace(ns string) func(r *Rego) {
- return func(r *Rego) {
- r.partialNamespace = ns
- }
+ return v1.PartialNamespace(ns)
}
// Module returns an argument that adds a Rego module.
func Module(filename, input string) func(r *Rego) {
- return func(r *Rego) {
- r.modules = append(r.modules, rawModule{
- filename: filename,
- module: input,
- })
- }
+ return v1.Module(filename, input)
}
// ParsedModule returns an argument that adds a parsed Rego module. If a string
// module with the same filename name is added, it will override the parsed
// module.
func ParsedModule(module *ast.Module) func(*Rego) {
- return func(r *Rego) {
- var filename string
- if module.Package.Location != nil {
- filename = module.Package.Location.File
- } else {
- filename = fmt.Sprintf("module_%p.rego", module)
- }
- r.parsedModules[filename] = module
- }
+ return v1.ParsedModule(module)
}
// Load returns an argument that adds a filesystem path to load data
@@ -968,9 +377,7 @@ func ParsedModule(module *ast.Module) func(*Rego) {
// The Load option can only be used once.
// Note: Loading files will require a write transaction on the store.
func Load(paths []string, filter loader.Filter) func(r *Rego) {
- return func(r *Rego) {
- r.loadPaths = loadPaths{paths, filter}
- }
+ return v1.Load(paths, filter)
}
// LoadBundle returns an argument that adds a filesystem path to load
@@ -978,23 +385,17 @@ func Load(paths []string, filter loader.Filter) func(r *Rego) {
// to be loaded as a bundle.
// Note: Loading bundles will require a write transaction on the store.
func LoadBundle(path string) func(r *Rego) {
- return func(r *Rego) {
- r.bundlePaths = append(r.bundlePaths, path)
- }
+ return v1.LoadBundle(path)
}
// ParsedBundle returns an argument that adds a bundle to be loaded.
func ParsedBundle(name string, b *bundle.Bundle) func(r *Rego) {
- return func(r *Rego) {
- r.bundles[name] = b
- }
+ return v1.ParsedBundle(name, b)
}
// Compiler returns an argument that sets the Rego compiler.
func Compiler(c *ast.Compiler) func(r *Rego) {
- return func(r *Rego) {
- r.compiler = c
- }
+ return v1.Compiler(c)
}
// Store returns an argument that sets the policy engine's data storage layer.
@@ -1003,18 +404,14 @@ func Compiler(c *ast.Compiler) func(r *Rego) {
// must also be provided via the Transaction() option. After loading files
// or bundles the transaction should be aborted or committed.
func Store(s storage.Store) func(r *Rego) {
- return func(r *Rego) {
- r.store = s
- }
+ return v1.Store(s)
}
// StoreReadAST returns an argument that sets whether the store should eagerly convert data to AST values.
//
// Only applicable when no store has been set on the Rego object through the Store option.
func StoreReadAST(enabled bool) func(r *Rego) {
- return func(r *Rego) {
- r.ownStoreReadAst = enabled
- }
+ return v1.StoreReadAST(enabled)
}
// Transaction returns an argument that sets the transaction to use for storage
@@ -1024,93 +421,65 @@ func StoreReadAST(enabled bool) func(r *Rego) {
// Store() option. If using Load(), LoadBundle(), or ParsedBundle() options
// the transaction will likely require write params.
func Transaction(txn storage.Transaction) func(r *Rego) {
- return func(r *Rego) {
- r.txn = txn
- }
+ return v1.Transaction(txn)
}
// Metrics returns an argument that sets the metrics collection.
func Metrics(m metrics.Metrics) func(r *Rego) {
- return func(r *Rego) {
- r.metrics = m
- }
+ return v1.Metrics(m)
}
// Instrument returns an argument that enables instrumentation for diagnosing
// performance issues.
func Instrument(yes bool) func(r *Rego) {
- return func(r *Rego) {
- r.instrument = yes
- }
+ return v1.Instrument(yes)
}
// Trace returns an argument that enables tracing on r.
func Trace(yes bool) func(r *Rego) {
- return func(r *Rego) {
- r.trace = yes
- }
+ return v1.Trace(yes)
}
// Tracer returns an argument that adds a query tracer to r.
// Deprecated: Use QueryTracer instead.
func Tracer(t topdown.Tracer) func(r *Rego) {
- return func(r *Rego) {
- if t != nil {
- r.queryTracers = append(r.queryTracers, topdown.WrapLegacyTracer(t))
- }
- }
+ return v1.Tracer(t)
}
// QueryTracer returns an argument that adds a query tracer to r.
func QueryTracer(t topdown.QueryTracer) func(r *Rego) {
- return func(r *Rego) {
- if t != nil {
- r.queryTracers = append(r.queryTracers, t)
- }
- }
+ return v1.QueryTracer(t)
}
// Runtime returns an argument that sets the runtime data to provide to the
// evaluation engine.
func Runtime(term *ast.Term) func(r *Rego) {
- return func(r *Rego) {
- r.runtime = term
- }
+ return v1.Runtime(term)
}
// Time sets the wall clock time to use during policy evaluation. Prepared queries
// do not inherit this parameter. Use EvalTime to set the wall clock time when
// executing a prepared query.
func Time(x time.Time) func(r *Rego) {
- return func(r *Rego) {
- r.time = x
- }
+ return v1.Time(x)
}
// Seed sets a reader that will seed randomization required by built-in functions.
// If a seed is not provided crypto/rand.Reader is used.
func Seed(r io.Reader) func(*Rego) {
- return func(e *Rego) {
- e.seed = r
- }
+ return v1.Seed(r)
}
// PrintTrace is a helper function to write a human-readable version of the
// trace to the writer w.
func PrintTrace(w io.Writer, r *Rego) {
- if r == nil || r.tracebuf == nil {
- return
- }
- topdown.PrettyTrace(w, *r.tracebuf)
+ v1.PrintTrace(w, r)
}
// PrintTraceWithLocation is a helper function to write a human-readable version of the
// trace to the writer w.
func PrintTraceWithLocation(w io.Writer, r *Rego) {
- if r == nil || r.tracebuf == nil {
- return
- }
- topdown.PrettyTraceWithLocation(w, *r.tracebuf)
+ v1.PrintTraceWithLocation(w, r)
}
// UnsafeBuiltins sets the built-in functions to treat as unsafe and not allow.
@@ -1118,104 +487,76 @@ func PrintTraceWithLocation(w io.Writer, r *Rego) {
// compiler. This option is always honored for query compilation. Provide an
// empty (non-nil) map to disable checks on queries.
func UnsafeBuiltins(unsafeBuiltins map[string]struct{}) func(r *Rego) {
- return func(r *Rego) {
- r.unsafeBuiltins = unsafeBuiltins
- }
+ return v1.UnsafeBuiltins(unsafeBuiltins)
}
// SkipBundleVerification skips verification of a signed bundle.
func SkipBundleVerification(yes bool) func(r *Rego) {
- return func(r *Rego) {
- r.skipBundleVerification = yes
- }
+ return v1.SkipBundleVerification(yes)
}
// InterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize
// during evaluation.
func InterQueryBuiltinCache(c cache.InterQueryCache) func(r *Rego) {
- return func(r *Rego) {
- r.interQueryBuiltinCache = c
- }
+ return v1.InterQueryBuiltinCache(c)
}
// InterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize
// during evaluation.
func InterQueryBuiltinValueCache(c cache.InterQueryValueCache) func(r *Rego) {
- return func(r *Rego) {
- r.interQueryBuiltinValueCache = c
- }
+ return v1.InterQueryBuiltinValueCache(c)
}
// NDBuiltinCache sets the non-deterministic builtins cache.
func NDBuiltinCache(c builtins.NDBCache) func(r *Rego) {
- return func(r *Rego) {
- r.ndBuiltinCache = c
- }
+ return v1.NDBuiltinCache(c)
}
// StrictBuiltinErrors tells the evaluator to treat all built-in function errors as fatal errors.
func StrictBuiltinErrors(yes bool) func(r *Rego) {
- return func(r *Rego) {
- r.strictBuiltinErrors = yes
- }
+ return v1.StrictBuiltinErrors(yes)
}
// BuiltinErrorList supplies an error slice to store built-in function errors.
func BuiltinErrorList(list *[]topdown.Error) func(r *Rego) {
- return func(r *Rego) {
- r.builtinErrorList = list
- }
+ return v1.BuiltinErrorList(list)
}
// Resolver sets a Resolver for a specified ref path.
func Resolver(ref ast.Ref, r resolver.Resolver) func(r *Rego) {
- return func(rego *Rego) {
- rego.resolvers = append(rego.resolvers, refResolver{ref, r})
- }
+ return v1.Resolver(ref, r)
}
// Schemas sets the schemaSet
func Schemas(x *ast.SchemaSet) func(r *Rego) {
- return func(r *Rego) {
- r.schemaSet = x
- }
+ return v1.Schemas(x)
}
// Capabilities configures the underlying compiler's capabilities.
// This option is ignored for module compilation if the caller supplies the
// compiler.
func Capabilities(c *ast.Capabilities) func(r *Rego) {
- return func(r *Rego) {
- r.capabilities = c
- }
+ return v1.Capabilities(c)
}
// Target sets the runtime to exercise.
func Target(t string) func(r *Rego) {
- return func(r *Rego) {
- r.target = t
- }
+ return v1.Target(t)
}
// GenerateJSON sets the AST to JSON converter for the results.
-func GenerateJSON(f func(*ast.Term, *EvalContext) (interface{}, error)) func(r *Rego) {
- return func(r *Rego) {
- r.generateJSON = f
- }
+func GenerateJSON(f func(*ast.Term, *EvalContext) (any, error)) func(r *Rego) {
+ return v1.GenerateJSON(f)
}
// PrintHook sets the object to use for handling print statement outputs.
func PrintHook(h print.Hook) func(r *Rego) {
- return func(r *Rego) {
- r.printHook = h
- }
+ return v1.PrintHook(h)
}
// DistributedTracingOpts sets the options to be used by distributed tracing.
func DistributedTracingOpts(tr tracing.Options) func(r *Rego) {
- return func(r *Rego) {
- r.distributedTacingOpts = tr
- }
+ return v1.DistributedTracingOpts(tr)
}
// EnablePrintStatements enables print() calls. If this option is not provided,
@@ -1223,1667 +564,65 @@ func DistributedTracingOpts(tr tracing.Options) func(r *Rego) {
// queries and policies that passed as raw strings, i.e., this function will not
// have any affect if the caller supplies the ast.Compiler instance.
func EnablePrintStatements(yes bool) func(r *Rego) {
- return func(r *Rego) {
- r.enablePrintStatements = yes
- }
+ return v1.EnablePrintStatements(yes)
}
// Strict enables or disables strict-mode in the compiler
func Strict(yes bool) func(r *Rego) {
- return func(r *Rego) {
- r.strict = yes
- }
+ return v1.Strict(yes)
}
func SetRegoVersion(version ast.RegoVersion) func(r *Rego) {
- return func(r *Rego) {
- r.regoVersion = version
- }
+ return v1.SetRegoVersion(version)
}
// New returns a new Rego object.
func New(options ...func(r *Rego)) *Rego {
-
- r := &Rego{
- parsedModules: map[string]*ast.Module{},
- capture: map[*ast.Expr]ast.Var{},
- compiledQueries: map[queryType]compiledQuery{},
- builtinDecls: map[string]*ast.Builtin{},
- builtinFuncs: map[string]*topdown.Builtin{},
- bundles: map[string]*bundle.Bundle{},
- }
-
- for _, option := range options {
- option(r)
- }
-
- if r.compiler == nil {
- r.compiler = ast.NewCompiler().
- WithUnsafeBuiltins(r.unsafeBuiltins).
- WithBuiltins(r.builtinDecls).
- WithDebug(r.dump).
- WithSchemas(r.schemaSet).
- WithCapabilities(r.capabilities).
- WithEnablePrintStatements(r.enablePrintStatements).
- WithStrict(r.strict).
- WithUseTypeCheckAnnotations(true)
-
- // topdown could be target "" or "rego", but both could be overridden by
- // a target plugin (checked below)
- if r.target == targetWasm {
- r.compiler = r.compiler.WithEvalMode(ast.EvalModeIR)
+ opts := make([]func(r *Rego), 0, len(options)+1)
+ opts = append(opts, options...)
+ opts = append(opts, func(r *Rego) {
+ if r.RegoVersion() == ast.RegoUndefined {
+ SetRegoVersion(ast.DefaultRegoVersion)(r)
}
- }
-
- if r.store == nil {
- r.store = inmem.NewWithOpts(inmem.OptReturnASTValuesOnRead(r.ownStoreReadAst))
- r.ownStore = true
- } else {
- r.ownStore = false
- }
-
- if r.metrics == nil {
- r.metrics = metrics.New()
- }
-
- if r.instrument {
- r.instrumentation = topdown.NewInstrumentation(r.metrics)
- r.compiler.WithMetrics(r.metrics)
- }
-
- if r.trace {
- r.tracebuf = topdown.NewBufferTracer()
- r.queryTracers = append(r.queryTracers, r.tracebuf)
- }
-
- if r.partialNamespace == "" {
- r.partialNamespace = defaultPartialNamespace
- }
-
- if r.generateJSON == nil {
- r.generateJSON = generateJSON
- }
-
- if r.pluginMgr != nil {
- for _, name := range r.pluginMgr.Plugins() {
- p := r.pluginMgr.Plugin(name)
- if p0, ok := p.(TargetPlugin); ok {
- r.plugins = append(r.plugins, p0)
- }
- }
- }
-
- if t := r.targetPlugin(r.target); t != nil {
- r.compiler = r.compiler.WithEvalMode(ast.EvalModeIR)
- }
-
- return r
-}
-
-// Eval evaluates this Rego object and returns a ResultSet.
-func (r *Rego) Eval(ctx context.Context) (ResultSet, error) {
- var err error
- var txnClose transactionCloser
- r.txn, txnClose, err = r.getTxn(ctx)
- if err != nil {
- return nil, err
- }
-
- pq, err := r.PrepareForEval(ctx)
- if err != nil {
- _ = txnClose(ctx, err) // Ignore error
- return nil, err
- }
-
- evalArgs := []EvalOption{
- EvalTransaction(r.txn),
- EvalMetrics(r.metrics),
- EvalInstrument(r.instrument),
- EvalTime(r.time),
- EvalInterQueryBuiltinCache(r.interQueryBuiltinCache),
- EvalInterQueryBuiltinValueCache(r.interQueryBuiltinValueCache),
- EvalSeed(r.seed),
- }
-
- if r.ndBuiltinCache != nil {
- evalArgs = append(evalArgs, EvalNDBuiltinCache(r.ndBuiltinCache))
- }
-
- for _, qt := range r.queryTracers {
- evalArgs = append(evalArgs, EvalQueryTracer(qt))
- }
-
- for i := range r.resolvers {
- evalArgs = append(evalArgs, EvalResolver(r.resolvers[i].ref, r.resolvers[i].r))
- }
-
- rs, err := pq.Eval(ctx, evalArgs...)
- txnErr := txnClose(ctx, err) // Always call closer
- if err == nil {
- err = txnErr
- }
- return rs, err
-}
-
-// PartialEval has been deprecated and renamed to PartialResult.
-func (r *Rego) PartialEval(ctx context.Context) (PartialResult, error) {
- return r.PartialResult(ctx)
-}
-
-// PartialResult partially evaluates this Rego object and returns a PartialResult.
-func (r *Rego) PartialResult(ctx context.Context) (PartialResult, error) {
- var err error
- var txnClose transactionCloser
- r.txn, txnClose, err = r.getTxn(ctx)
- if err != nil {
- return PartialResult{}, err
- }
-
- pq, err := r.PrepareForEval(ctx, WithPartialEval())
- txnErr := txnClose(ctx, err) // Always call closer
- if err != nil {
- return PartialResult{}, err
- }
- if txnErr != nil {
- return PartialResult{}, txnErr
- }
-
- pr := PartialResult{
- compiler: pq.r.compiler,
- store: pq.r.store,
- body: pq.r.parsedQuery,
- builtinDecls: pq.r.builtinDecls,
- builtinFuncs: pq.r.builtinFuncs,
- }
-
- return pr, nil
-}
-
-// Partial runs partial evaluation on r and returns the result.
-func (r *Rego) Partial(ctx context.Context) (*PartialQueries, error) {
- var err error
- var txnClose transactionCloser
- r.txn, txnClose, err = r.getTxn(ctx)
- if err != nil {
- return nil, err
- }
-
- pq, err := r.PrepareForPartial(ctx)
- if err != nil {
- _ = txnClose(ctx, err) // Ignore error
- return nil, err
- }
-
- evalArgs := []EvalOption{
- EvalTransaction(r.txn),
- EvalMetrics(r.metrics),
- EvalInstrument(r.instrument),
- EvalInterQueryBuiltinCache(r.interQueryBuiltinCache),
- EvalInterQueryBuiltinValueCache(r.interQueryBuiltinValueCache),
- }
-
- if r.ndBuiltinCache != nil {
- evalArgs = append(evalArgs, EvalNDBuiltinCache(r.ndBuiltinCache))
- }
-
- for _, t := range r.queryTracers {
- evalArgs = append(evalArgs, EvalQueryTracer(t))
- }
-
- for i := range r.resolvers {
- evalArgs = append(evalArgs, EvalResolver(r.resolvers[i].ref, r.resolvers[i].r))
- }
-
- pqs, err := pq.Partial(ctx, evalArgs...)
- txnErr := txnClose(ctx, err) // Always call closer
- if err == nil {
- err = txnErr
- }
- return pqs, err
+ })
+
+ return v1.New(opts...)
}
// CompileOption defines a function to set options on Compile calls.
-type CompileOption func(*CompileContext)
+type CompileOption = v1.CompileOption
// CompileContext contains options for Compile calls.
-type CompileContext struct {
- partial bool
-}
+type CompileContext = v1.CompileContext
// CompilePartial defines an option to control whether partial evaluation is run
// before the query is planned and compiled.
func CompilePartial(yes bool) CompileOption {
- return func(cfg *CompileContext) {
- cfg.partial = yes
- }
-}
-
-// Compile returns a compiled policy query.
-func (r *Rego) Compile(ctx context.Context, opts ...CompileOption) (*CompileResult, error) {
-
- var cfg CompileContext
-
- for _, opt := range opts {
- opt(&cfg)
- }
-
- var queries []ast.Body
- modules := make([]*ast.Module, 0, len(r.compiler.Modules))
-
- if cfg.partial {
-
- pq, err := r.Partial(ctx)
- if err != nil {
- return nil, err
- }
- if r.dump != nil {
- if len(pq.Queries) != 0 {
- msg := fmt.Sprintf("QUERIES (%d total):", len(pq.Queries))
- fmt.Fprintln(r.dump, msg)
- fmt.Fprintln(r.dump, strings.Repeat("-", len(msg)))
- for i := range pq.Queries {
- fmt.Println(pq.Queries[i])
- }
- fmt.Fprintln(r.dump)
- }
- if len(pq.Support) != 0 {
- msg := fmt.Sprintf("SUPPORT (%d total):", len(pq.Support))
- fmt.Fprintln(r.dump, msg)
- fmt.Fprintln(r.dump, strings.Repeat("-", len(msg)))
- for i := range pq.Support {
- fmt.Println(pq.Support[i])
- }
- fmt.Fprintln(r.dump)
- }
- }
-
- queries = pq.Queries
- modules = pq.Support
-
- for _, module := range r.compiler.Modules {
- modules = append(modules, module)
- }
- } else {
- var err error
- // If creating a new transaction it should be closed before calling the
- // planner to avoid holding open the transaction longer than needed.
- //
- // TODO(tsandall): in future, planner could make use of store, in which
- // case this will need to change.
- var txnClose transactionCloser
- r.txn, txnClose, err = r.getTxn(ctx)
- if err != nil {
- return nil, err
- }
-
- err = r.prepare(ctx, compileQueryType, nil)
- txnErr := txnClose(ctx, err) // Always call closer
- if err != nil {
- return nil, err
- }
- if txnErr != nil {
- return nil, err
- }
-
- for _, module := range r.compiler.Modules {
- modules = append(modules, module)
- }
-
- queries = []ast.Body{r.compiledQueries[compileQueryType].query}
- }
-
- if tgt := r.targetPlugin(r.target); tgt != nil {
- return nil, fmt.Errorf("unsupported for rego target plugins")
- }
-
- return r.compileWasm(modules, queries, compileQueryType) // TODO(sr) control flow is funky here
-}
-
-func (r *Rego) compileWasm(_ []*ast.Module, queries []ast.Body, qType queryType) (*CompileResult, error) {
- policy, err := r.planQuery(queries, qType)
- if err != nil {
- return nil, err
- }
-
- m, err := wasm.New().WithPolicy(policy).Compile()
- if err != nil {
- return nil, err
- }
-
- var out bytes.Buffer
- if err := encoding.WriteModule(&out, m); err != nil {
- return nil, err
- }
-
- return &CompileResult{
- Bytes: out.Bytes(),
- }, nil
+ return v1.CompilePartial(yes)
}
// PrepareOption defines a function to set an option to control
// the behavior of the Prepare call.
-type PrepareOption func(*PrepareConfig)
+type PrepareOption = v1.PrepareOption
// PrepareConfig holds settings to control the behavior of the
// Prepare call.
-type PrepareConfig struct {
- doPartialEval bool
- disableInlining *[]string
- builtinFuncs map[string]*topdown.Builtin
-}
+type PrepareConfig = v1.PrepareConfig
// WithPartialEval configures an option for PrepareForEval
// which will have it perform partial evaluation while preparing
// the query (similar to rego.Rego#PartialResult)
func WithPartialEval() PrepareOption {
- return func(p *PrepareConfig) {
- p.doPartialEval = true
- }
+ return v1.WithPartialEval()
}
// WithNoInline adds a set of paths to exclude from partial evaluation inlining.
func WithNoInline(paths []string) PrepareOption {
- return func(p *PrepareConfig) {
- p.disableInlining = &paths
- }
+ return v1.WithNoInline(paths)
}
// WithBuiltinFuncs carries the rego.Function{1,2,3} per-query function definitions
// to the target plugins.
func WithBuiltinFuncs(bis map[string]*topdown.Builtin) PrepareOption {
- return func(p *PrepareConfig) {
- if p.builtinFuncs == nil {
- p.builtinFuncs = make(map[string]*topdown.Builtin, len(bis))
- }
- for k, v := range bis {
- p.builtinFuncs[k] = v
- }
- }
-}
-
-// BuiltinFuncs allows retrieving the builtin funcs set via PrepareOption
-// WithBuiltinFuncs.
-func (p *PrepareConfig) BuiltinFuncs() map[string]*topdown.Builtin {
- return p.builtinFuncs
-}
-
-// PrepareForEval will parse inputs, modules, and query arguments in preparation
-// of evaluating them.
-func (r *Rego) PrepareForEval(ctx context.Context, opts ...PrepareOption) (PreparedEvalQuery, error) {
- if !r.hasQuery() {
- return PreparedEvalQuery{}, fmt.Errorf("cannot evaluate empty query")
- }
-
- pCfg := &PrepareConfig{}
- for _, o := range opts {
- o(pCfg)
- }
-
- var err error
- var txnClose transactionCloser
- r.txn, txnClose, err = r.getTxn(ctx)
- if err != nil {
- return PreparedEvalQuery{}, err
- }
-
- // If the caller wanted to do partial evaluation as part of preparation
- // do it now and use the new Rego object.
- if pCfg.doPartialEval {
-
- pr, err := r.partialResult(ctx, pCfg)
- if err != nil {
- _ = txnClose(ctx, err) // Ignore error
- return PreparedEvalQuery{}, err
- }
-
- // Prepare the new query using the result of partial evaluation
- pq, err := pr.Rego(Transaction(r.txn)).PrepareForEval(ctx)
- txnErr := txnClose(ctx, err)
- if err != nil {
- return pq, err
- }
- return pq, txnErr
- }
-
- err = r.prepare(ctx, evalQueryType, []extraStage{
- {
- after: "ResolveRefs",
- stage: ast.QueryCompilerStageDefinition{
- Name: "RewriteToCaptureValue",
- MetricName: "query_compile_stage_rewrite_to_capture_value",
- Stage: r.rewriteQueryToCaptureValue,
- },
- },
- })
- if err != nil {
- _ = txnClose(ctx, err) // Ignore error
- return PreparedEvalQuery{}, err
- }
-
- switch r.target {
- case targetWasm: // TODO(sr): make wasm a target plugin, too
-
- if r.hasWasmModule() {
- _ = txnClose(ctx, err) // Ignore error
- return PreparedEvalQuery{}, fmt.Errorf("wasm target not supported")
- }
-
- var modules []*ast.Module
- for _, module := range r.compiler.Modules {
- modules = append(modules, module)
- }
-
- queries := []ast.Body{r.compiledQueries[evalQueryType].query}
-
- e, err := opa.LookupEngine(targetWasm)
- if err != nil {
- return PreparedEvalQuery{}, err
- }
-
- // nolint: staticcheck // SA4006 false positive
- cr, err := r.compileWasm(modules, queries, evalQueryType)
- if err != nil {
- _ = txnClose(ctx, err) // Ignore error
- return PreparedEvalQuery{}, err
- }
-
- // nolint: staticcheck // SA4006 false positive
- data, err := r.store.Read(ctx, r.txn, storage.Path{})
- if err != nil {
- _ = txnClose(ctx, err) // Ignore error
- return PreparedEvalQuery{}, err
- }
-
- o, err := e.New().WithPolicyBytes(cr.Bytes).WithDataJSON(data).Init()
- if err != nil {
- _ = txnClose(ctx, err) // Ignore error
- return PreparedEvalQuery{}, err
- }
- r.opa = o
-
- case targetRego: // do nothing, don't lookup default plugin
- default: // either a specific plugin target, or one that is default
- if tgt := r.targetPlugin(r.target); tgt != nil {
- queries := []ast.Body{r.compiledQueries[evalQueryType].query}
- pol, err := r.planQuery(queries, evalQueryType)
- if err != nil {
- return PreparedEvalQuery{}, err
- }
- // always add the builtins provided via rego.FunctionN options
- opts = append(opts, WithBuiltinFuncs(r.builtinFuncs))
- r.targetPrepState, err = tgt.PrepareForEval(ctx, pol, opts...)
- if err != nil {
- return PreparedEvalQuery{}, err
- }
- }
- }
-
- txnErr := txnClose(ctx, err) // Always call closer
- if err != nil {
- return PreparedEvalQuery{}, err
- }
- if txnErr != nil {
- return PreparedEvalQuery{}, txnErr
- }
-
- return PreparedEvalQuery{preparedQuery{r, pCfg}}, err
-}
-
-// PrepareForPartial will parse inputs, modules, and query arguments in preparation
-// of partially evaluating them.
-func (r *Rego) PrepareForPartial(ctx context.Context, opts ...PrepareOption) (PreparedPartialQuery, error) {
- if !r.hasQuery() {
- return PreparedPartialQuery{}, fmt.Errorf("cannot evaluate empty query")
- }
-
- pCfg := &PrepareConfig{}
- for _, o := range opts {
- o(pCfg)
- }
-
- var err error
- var txnClose transactionCloser
- r.txn, txnClose, err = r.getTxn(ctx)
- if err != nil {
- return PreparedPartialQuery{}, err
- }
-
- err = r.prepare(ctx, partialQueryType, []extraStage{
- {
- after: "CheckSafety",
- stage: ast.QueryCompilerStageDefinition{
- Name: "RewriteEquals",
- MetricName: "query_compile_stage_rewrite_equals",
- Stage: r.rewriteEqualsForPartialQueryCompile,
- },
- },
- })
- txnErr := txnClose(ctx, err) // Always call closer
- if err != nil {
- return PreparedPartialQuery{}, err
- }
- if txnErr != nil {
- return PreparedPartialQuery{}, txnErr
- }
-
- return PreparedPartialQuery{preparedQuery{r, pCfg}}, err
-}
-
-func (r *Rego) prepare(ctx context.Context, qType queryType, extras []extraStage) error {
- var err error
-
- r.parsedInput, err = r.parseInput()
- if err != nil {
- return err
- }
-
- err = r.loadFiles(ctx, r.txn, r.metrics)
- if err != nil {
- return err
- }
-
- err = r.loadBundles(ctx, r.txn, r.metrics)
- if err != nil {
- return err
- }
-
- err = r.parseModules(ctx, r.txn, r.metrics)
- if err != nil {
- return err
- }
-
- // Compile the modules *before* the query, else functions
- // defined in the module won't be found...
- err = r.compileModules(ctx, r.txn, r.metrics)
- if err != nil {
- return err
- }
-
- imports, err := r.prepareImports()
- if err != nil {
- return err
- }
-
- queryImports := []*ast.Import{}
- for _, imp := range imports {
- path := imp.Path.Value.(ast.Ref)
- if path.HasPrefix([]*ast.Term{ast.FutureRootDocument}) || path.HasPrefix([]*ast.Term{ast.RegoRootDocument}) {
- queryImports = append(queryImports, imp)
- }
- }
-
- r.parsedQuery, err = r.parseQuery(queryImports, r.metrics)
- if err != nil {
- return err
- }
-
- err = r.compileAndCacheQuery(qType, r.parsedQuery, imports, r.metrics, extras)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (r *Rego) parseModules(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error {
- if len(r.modules) == 0 {
- return nil
- }
-
- ids, err := r.store.ListPolicies(ctx, txn)
- if err != nil {
- return err
- }
-
- m.Timer(metrics.RegoModuleParse).Start()
- defer m.Timer(metrics.RegoModuleParse).Stop()
- var errs Errors
-
- // Parse any modules that are saved to the store, but only if
- // another compile step is going to occur (ie. we have parsed modules
- // that need to be compiled).
- for _, id := range ids {
- // if it is already on the compiler we're using
- // then don't bother to re-parse it from source
- if _, haveMod := r.compiler.Modules[id]; haveMod {
- continue
- }
-
- bs, err := r.store.GetPolicy(ctx, txn, id)
- if err != nil {
- return err
- }
-
- parsed, err := ast.ParseModuleWithOpts(id, string(bs), ast.ParserOptions{RegoVersion: r.regoVersion})
- if err != nil {
- errs = append(errs, err)
- }
-
- r.parsedModules[id] = parsed
- }
-
- // Parse any passed in as arguments to the Rego object
- for _, module := range r.modules {
- p, err := module.ParseWithOpts(ast.ParserOptions{RegoVersion: r.regoVersion})
- if err != nil {
- switch errorWithType := err.(type) {
- case ast.Errors:
- for _, e := range errorWithType {
- errs = append(errs, e)
- }
- default:
- errs = append(errs, errorWithType)
- }
- }
- r.parsedModules[module.filename] = p
- }
-
- if len(errs) > 0 {
- return errs
- }
-
- return nil
-}
-
-func (r *Rego) loadFiles(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error {
- if len(r.loadPaths.paths) == 0 {
- return nil
- }
-
- m.Timer(metrics.RegoLoadFiles).Start()
- defer m.Timer(metrics.RegoLoadFiles).Stop()
-
- result, err := loader.NewFileLoader().
- WithMetrics(m).
- WithProcessAnnotation(true).
- WithRegoVersion(r.regoVersion).
- Filtered(r.loadPaths.paths, r.loadPaths.filter)
- if err != nil {
- return err
- }
- for name, mod := range result.Modules {
- r.parsedModules[name] = mod.Parsed
- }
-
- if len(result.Documents) > 0 {
- err = r.store.Write(ctx, txn, storage.AddOp, storage.Path{}, result.Documents)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (r *Rego) loadBundles(_ context.Context, _ storage.Transaction, m metrics.Metrics) error {
- if len(r.bundlePaths) == 0 {
- return nil
- }
-
- m.Timer(metrics.RegoLoadBundles).Start()
- defer m.Timer(metrics.RegoLoadBundles).Stop()
-
- for _, path := range r.bundlePaths {
- bndl, err := loader.NewFileLoader().
- WithMetrics(m).
- WithProcessAnnotation(true).
- WithSkipBundleVerification(r.skipBundleVerification).
- WithRegoVersion(r.regoVersion).
- AsBundle(path)
- if err != nil {
- return fmt.Errorf("loading error: %s", err)
- }
- r.bundles[path] = bndl
- }
- return nil
-}
-
-func (r *Rego) parseInput() (ast.Value, error) {
- if r.parsedInput != nil {
- return r.parsedInput, nil
- }
- return r.parseRawInput(r.rawInput, r.metrics)
-}
-
-func (r *Rego) parseRawInput(rawInput *interface{}, m metrics.Metrics) (ast.Value, error) {
- var input ast.Value
-
- if rawInput == nil {
- return input, nil
- }
-
- m.Timer(metrics.RegoInputParse).Start()
- defer m.Timer(metrics.RegoInputParse).Stop()
-
- rawPtr := util.Reference(rawInput)
-
- // roundtrip through json: this turns slices (e.g. []string, []bool) into
- // []interface{}, the only array type ast.InterfaceToValue can work with
- if err := util.RoundTrip(rawPtr); err != nil {
- return nil, err
- }
-
- return ast.InterfaceToValue(*rawPtr)
-}
-
-func (r *Rego) parseQuery(queryImports []*ast.Import, m metrics.Metrics) (ast.Body, error) {
- if r.parsedQuery != nil {
- return r.parsedQuery, nil
- }
-
- m.Timer(metrics.RegoQueryParse).Start()
- defer m.Timer(metrics.RegoQueryParse).Stop()
-
- popts, err := future.ParserOptionsFromFutureImports(queryImports)
- if err != nil {
- return nil, err
- }
- popts.RegoVersion = r.regoVersion
- popts, err = parserOptionsFromRegoVersionImport(queryImports, popts)
- if err != nil {
- return nil, err
- }
- popts.SkipRules = true
- return ast.ParseBodyWithOpts(r.query, popts)
-}
-
-func parserOptionsFromRegoVersionImport(imports []*ast.Import, popts ast.ParserOptions) (ast.ParserOptions, error) {
- for _, imp := range imports {
- path := imp.Path.Value.(ast.Ref)
- if ast.Compare(path, ast.RegoV1CompatibleRef) == 0 {
- popts.RegoVersion = ast.RegoV1
- return popts, nil
- }
- }
- return popts, nil
-}
-
-func (r *Rego) compileModules(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error {
-
- // Only compile again if there are new modules.
- if len(r.bundles) > 0 || len(r.parsedModules) > 0 {
-
- // The bundle.Activate call will activate any bundles passed in
- // (ie compile + handle data store changes), and include any of
- // the additional modules passed in. If no bundles are provided
- // it will only compile the passed in modules.
- // Use this as the single-point of compiling everything only a
- // single time.
- opts := &bundle.ActivateOpts{
- Ctx: ctx,
- Store: r.store,
- Txn: txn,
- Compiler: r.compilerForTxn(ctx, r.store, txn),
- Metrics: m,
- Bundles: r.bundles,
- ExtraModules: r.parsedModules,
- ParserOptions: ast.ParserOptions{RegoVersion: r.regoVersion},
- }
- err := bundle.Activate(opts)
- if err != nil {
- return err
- }
- }
-
- // Ensure all configured resolvers from the store are loaded. Skip if any were explicitly provided.
- if len(r.resolvers) == 0 {
- resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, r.store, txn, r.bundles)
- if err != nil {
- return err
- }
-
- for _, rslvr := range resolvers {
- for _, ep := range rslvr.Entrypoints() {
- r.resolvers = append(r.resolvers, refResolver{ep, rslvr})
- }
- }
- }
- return nil
-}
-
-func (r *Rego) compileAndCacheQuery(qType queryType, query ast.Body, imports []*ast.Import, m metrics.Metrics, extras []extraStage) error {
- m.Timer(metrics.RegoQueryCompile).Start()
- defer m.Timer(metrics.RegoQueryCompile).Stop()
-
- cachedQuery, ok := r.compiledQueries[qType]
- if ok && cachedQuery.query != nil && cachedQuery.compiler != nil {
- return nil
- }
-
- qc, compiled, err := r.compileQuery(query, imports, m, extras)
- if err != nil {
- return err
- }
-
- // cache the query for future use
- r.compiledQueries[qType] = compiledQuery{
- query: compiled,
- compiler: qc,
- }
- return nil
-}
-
-func (r *Rego) prepareImports() ([]*ast.Import, error) {
- imports := r.parsedImports
-
- if len(r.imports) > 0 {
- s := make([]string, len(r.imports))
- for i := range r.imports {
- s[i] = fmt.Sprintf("import %v", r.imports[i])
- }
- parsed, err := ast.ParseImports(strings.Join(s, "\n"))
- if err != nil {
- return nil, err
- }
- imports = append(imports, parsed...)
- }
- return imports, nil
-}
-
-func (r *Rego) compileQuery(query ast.Body, imports []*ast.Import, _ metrics.Metrics, extras []extraStage) (ast.QueryCompiler, ast.Body, error) {
- var pkg *ast.Package
-
- if r.pkg != "" {
- var err error
- pkg, err = ast.ParsePackage(fmt.Sprintf("package %v", r.pkg))
- if err != nil {
- return nil, nil, err
- }
- } else {
- pkg = r.parsedPackage
- }
-
- qctx := ast.NewQueryContext().
- WithPackage(pkg).
- WithImports(imports)
-
- qc := r.compiler.QueryCompiler().
- WithContext(qctx).
- WithUnsafeBuiltins(r.unsafeBuiltins).
- WithEnablePrintStatements(r.enablePrintStatements).
- WithStrict(false)
-
- for _, extra := range extras {
- qc = qc.WithStageAfter(extra.after, extra.stage)
- }
-
- compiled, err := qc.Compile(query)
-
- return qc, compiled, err
-
-}
-
-func (r *Rego) eval(ctx context.Context, ectx *EvalContext) (ResultSet, error) {
- switch {
- case r.targetPrepState != nil: // target plugin flow
- var val ast.Value
- if r.runtime != nil {
- val = r.runtime.Value
- }
- s, err := r.targetPrepState.Eval(ctx, ectx, val)
- if err != nil {
- return nil, err
- }
- return r.valueToQueryResult(s, ectx)
- case r.target == targetWasm:
- return r.evalWasm(ctx, ectx)
- case r.target == targetRego: // continue
- }
-
- q := topdown.NewQuery(ectx.compiledQuery.query).
- WithQueryCompiler(ectx.compiledQuery.compiler).
- WithCompiler(r.compiler).
- WithStore(r.store).
- WithTransaction(ectx.txn).
- WithBuiltins(r.builtinFuncs).
- WithMetrics(ectx.metrics).
- WithInstrumentation(ectx.instrumentation).
- WithRuntime(r.runtime).
- WithIndexing(ectx.indexing).
- WithEarlyExit(ectx.earlyExit).
- WithInterQueryBuiltinCache(ectx.interQueryBuiltinCache).
- WithInterQueryBuiltinValueCache(ectx.interQueryBuiltinValueCache).
- WithStrictBuiltinErrors(r.strictBuiltinErrors).
- WithBuiltinErrorList(r.builtinErrorList).
- WithSeed(ectx.seed).
- WithPrintHook(ectx.printHook).
- WithDistributedTracingOpts(r.distributedTacingOpts).
- WithVirtualCache(ectx.virtualCache)
-
- if !ectx.time.IsZero() {
- q = q.WithTime(ectx.time)
- }
-
- if ectx.ndBuiltinCache != nil {
- q = q.WithNDBuiltinCache(ectx.ndBuiltinCache)
- }
-
- for i := range ectx.queryTracers {
- q = q.WithQueryTracer(ectx.queryTracers[i])
- }
-
- if ectx.parsedInput != nil {
- q = q.WithInput(ast.NewTerm(ectx.parsedInput))
- }
-
- for i := range ectx.resolvers {
- q = q.WithResolver(ectx.resolvers[i].ref, ectx.resolvers[i].r)
- }
-
- // Cancel query if context is cancelled or deadline is reached.
- c := topdown.NewCancel()
- q = q.WithCancel(c)
- exit := make(chan struct{})
- defer close(exit)
- go waitForDone(ctx, exit, func() {
- c.Cancel()
- })
-
- var rs ResultSet
- err := q.Iter(ctx, func(qr topdown.QueryResult) error {
- result, err := r.generateResult(qr, ectx)
- if err != nil {
- return err
- }
- rs = append(rs, result)
- return nil
- })
-
- if err != nil {
- return nil, err
- }
-
- if len(rs) == 0 {
- return nil, nil
- }
-
- return rs, nil
-}
-
-func (r *Rego) evalWasm(ctx context.Context, ectx *EvalContext) (ResultSet, error) {
- input := ectx.rawInput
- if ectx.parsedInput != nil {
- i := interface{}(ectx.parsedInput)
- input = &i
- }
- result, err := r.opa.Eval(ctx, opa.EvalOpts{
- Metrics: r.metrics,
- Input: input,
- Time: ectx.time,
- Seed: ectx.seed,
- InterQueryBuiltinCache: ectx.interQueryBuiltinCache,
- NDBuiltinCache: ectx.ndBuiltinCache,
- PrintHook: ectx.printHook,
- Capabilities: ectx.capabilities,
- })
- if err != nil {
- return nil, err
- }
-
- parsed, err := ast.ParseTerm(string(result.Result))
- if err != nil {
- return nil, err
- }
-
- return r.valueToQueryResult(parsed.Value, ectx)
-}
-
-func (r *Rego) valueToQueryResult(res ast.Value, ectx *EvalContext) (ResultSet, error) {
- resultSet, ok := res.(ast.Set)
- if !ok {
- return nil, fmt.Errorf("illegal result type")
- }
-
- if resultSet.Len() == 0 {
- return nil, nil
- }
-
- var rs ResultSet
- err := resultSet.Iter(func(term *ast.Term) error {
- obj, ok := term.Value.(ast.Object)
- if !ok {
- return fmt.Errorf("illegal result type")
- }
- qr := topdown.QueryResult{}
- obj.Foreach(func(k, v *ast.Term) {
- kvt := ast.VarTerm(string(k.Value.(ast.String)))
- qr[kvt.Value.(ast.Var)] = v
- })
- result, err := r.generateResult(qr, ectx)
- if err != nil {
- return err
- }
- rs = append(rs, result)
- return nil
- })
-
- return rs, err
-}
-
-func (r *Rego) generateResult(qr topdown.QueryResult, ectx *EvalContext) (Result, error) {
-
- rewritten := ectx.compiledQuery.compiler.RewrittenVars()
-
- result := newResult()
- for k, term := range qr {
- v, err := r.generateJSON(term, ectx)
- if err != nil {
- return result, err
- }
-
- if rw, ok := rewritten[k]; ok {
- k = rw
- }
- if isTermVar(k) || isTermWasmVar(k) || k.IsGenerated() || k.IsWildcard() {
- continue
- }
- result.Bindings[string(k)] = v
- }
-
- for _, expr := range ectx.compiledQuery.query {
- if expr.Generated {
- continue
- }
-
- if k, ok := r.capture[expr]; ok {
- v, err := r.generateJSON(qr[k], ectx)
- if err != nil {
- return result, err
- }
- result.Expressions = append(result.Expressions, newExpressionValue(expr, v))
- } else {
- result.Expressions = append(result.Expressions, newExpressionValue(expr, true))
- }
-
- }
- return result, nil
-}
-
-func (r *Rego) partialResult(ctx context.Context, pCfg *PrepareConfig) (PartialResult, error) {
-
- err := r.prepare(ctx, partialResultQueryType, []extraStage{
- {
- after: "ResolveRefs",
- stage: ast.QueryCompilerStageDefinition{
- Name: "RewriteForPartialEval",
- MetricName: "query_compile_stage_rewrite_for_partial_eval",
- Stage: r.rewriteQueryForPartialEval,
- },
- },
- })
- if err != nil {
- return PartialResult{}, err
- }
-
- ectx := &EvalContext{
- parsedInput: r.parsedInput,
- metrics: r.metrics,
- txn: r.txn,
- partialNamespace: r.partialNamespace,
- queryTracers: r.queryTracers,
- compiledQuery: r.compiledQueries[partialResultQueryType],
- instrumentation: r.instrumentation,
- indexing: true,
- resolvers: r.resolvers,
- capabilities: r.capabilities,
- strictBuiltinErrors: r.strictBuiltinErrors,
- }
-
- disableInlining := r.disableInlining
-
- if pCfg.disableInlining != nil {
- disableInlining = *pCfg.disableInlining
- }
-
- ectx.disableInlining, err = parseStringsToRefs(disableInlining)
- if err != nil {
- return PartialResult{}, err
- }
-
- pq, err := r.partial(ctx, ectx)
- if err != nil {
- return PartialResult{}, err
- }
-
- // Construct module for queries.
- id := fmt.Sprintf("__partialresult__%s__", ectx.partialNamespace)
-
- module, err := ast.ParseModule(id, "package "+ectx.partialNamespace)
- if err != nil {
- return PartialResult{}, fmt.Errorf("bad partial namespace")
- }
-
- module.Rules = make([]*ast.Rule, len(pq.Queries))
- for i, body := range pq.Queries {
- rule := &ast.Rule{
- Head: ast.NewHead(ast.Var("__result__"), nil, ast.Wildcard),
- Body: body,
- Module: module,
- }
- module.Rules[i] = rule
- if checkPartialResultForRecursiveRefs(body, rule.Path()) {
- return PartialResult{}, Errors{errPartialEvaluationNotEffective}
- }
- }
-
- // Update compiler with partial evaluation output.
- r.compiler.Modules[id] = module
- for i, module := range pq.Support {
- r.compiler.Modules[fmt.Sprintf("__partialsupport__%s__%d__", ectx.partialNamespace, i)] = module
- }
-
- r.metrics.Timer(metrics.RegoModuleCompile).Start()
- r.compilerForTxn(ctx, r.store, r.txn).Compile(r.compiler.Modules)
- r.metrics.Timer(metrics.RegoModuleCompile).Stop()
-
- if r.compiler.Failed() {
- return PartialResult{}, r.compiler.Errors
- }
-
- result := PartialResult{
- compiler: r.compiler,
- store: r.store,
- body: ast.MustParseBody(fmt.Sprintf("data.%v.__result__", ectx.partialNamespace)),
- builtinDecls: r.builtinDecls,
- builtinFuncs: r.builtinFuncs,
- }
-
- return result, nil
-}
-
-func (r *Rego) partial(ctx context.Context, ectx *EvalContext) (*PartialQueries, error) {
-
- var unknowns []*ast.Term
-
- switch {
- case ectx.parsedUnknowns != nil:
- unknowns = ectx.parsedUnknowns
- case ectx.unknowns != nil:
- unknowns = make([]*ast.Term, len(ectx.unknowns))
- for i := range ectx.unknowns {
- var err error
- unknowns[i], err = ast.ParseTerm(ectx.unknowns[i])
- if err != nil {
- return nil, err
- }
- }
- default:
- // Use input document as unknown if caller has not specified any.
- unknowns = []*ast.Term{ast.NewTerm(ast.InputRootRef)}
- }
-
- q := topdown.NewQuery(ectx.compiledQuery.query).
- WithQueryCompiler(ectx.compiledQuery.compiler).
- WithCompiler(r.compiler).
- WithStore(r.store).
- WithTransaction(ectx.txn).
- WithBuiltins(r.builtinFuncs).
- WithMetrics(ectx.metrics).
- WithInstrumentation(ectx.instrumentation).
- WithUnknowns(unknowns).
- WithDisableInlining(ectx.disableInlining).
- WithRuntime(r.runtime).
- WithIndexing(ectx.indexing).
- WithEarlyExit(ectx.earlyExit).
- WithPartialNamespace(ectx.partialNamespace).
- WithSkipPartialNamespace(r.skipPartialNamespace).
- WithShallowInlining(r.shallowInlining).
- WithInterQueryBuiltinCache(ectx.interQueryBuiltinCache).
- WithInterQueryBuiltinValueCache(ectx.interQueryBuiltinValueCache).
- WithStrictBuiltinErrors(ectx.strictBuiltinErrors).
- WithSeed(ectx.seed).
- WithPrintHook(ectx.printHook)
-
- if !ectx.time.IsZero() {
- q = q.WithTime(ectx.time)
- }
-
- if ectx.ndBuiltinCache != nil {
- q = q.WithNDBuiltinCache(ectx.ndBuiltinCache)
- }
-
- for i := range ectx.queryTracers {
- q = q.WithQueryTracer(ectx.queryTracers[i])
- }
-
- if ectx.parsedInput != nil {
- q = q.WithInput(ast.NewTerm(ectx.parsedInput))
- }
-
- for i := range ectx.resolvers {
- q = q.WithResolver(ectx.resolvers[i].ref, ectx.resolvers[i].r)
- }
-
- // Cancel query if context is cancelled or deadline is reached.
- c := topdown.NewCancel()
- q = q.WithCancel(c)
- exit := make(chan struct{})
- defer close(exit)
- go waitForDone(ctx, exit, func() {
- c.Cancel()
- })
-
- queries, support, err := q.PartialRun(ctx)
- if err != nil {
- return nil, err
- }
-
- // If the target rego-version is v0, and the rego.v1 import is available, then we attempt to apply it to support modules.
- if r.regoVersion == ast.RegoV0 && (r.capabilities == nil || r.capabilities.ContainsFeature(ast.FeatureRegoV1Import)) {
-
- for i, mod := range support {
- // We can't apply the RegoV0CompatV1 version to the support module if it contains rules or vars that
- // conflict with future keywords.
- applyRegoVersion := true
-
- ast.WalkRules(mod, func(r *ast.Rule) bool {
- name := r.Head.Name
- if name == "" && len(r.Head.Reference) > 0 {
- name = r.Head.Reference[0].Value.(ast.Var)
- }
- if ast.IsFutureKeyword(name.String()) {
- applyRegoVersion = false
- return true
- }
- return false
- })
-
- if applyRegoVersion {
- ast.WalkVars(mod, func(v ast.Var) bool {
- if ast.IsFutureKeyword(v.String()) {
- applyRegoVersion = false
- return true
- }
- return false
- })
- }
-
- if applyRegoVersion {
- support[i].SetRegoVersion(ast.RegoV0CompatV1)
- } else {
- support[i].SetRegoVersion(r.regoVersion)
- }
- }
- } else {
- // If the target rego-version is not v0, then we apply the target rego-version to the support modules.
- for i := range support {
- support[i].SetRegoVersion(r.regoVersion)
- }
- }
-
- pq := &PartialQueries{
- Queries: queries,
- Support: support,
- }
-
- return pq, nil
-}
-
-func (r *Rego) rewriteQueryToCaptureValue(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) {
-
- checkCapture := iteration(query) || len(query) > 1
-
- for _, expr := range query {
-
- if expr.Negated {
- continue
- }
-
- if expr.IsAssignment() || expr.IsEquality() {
- continue
- }
-
- var capture *ast.Term
-
- // If the expression can be evaluated as a function, rewrite it to
- // capture the return value. E.g., neq(1,2) becomes neq(1,2,x) but
- // plus(1,2,x) does not get rewritten.
- switch terms := expr.Terms.(type) {
- case *ast.Term:
- capture = r.generateTermVar()
- expr.Terms = ast.Equality.Expr(terms, capture).Terms
- r.capture[expr] = capture.Value.(ast.Var)
- case []*ast.Term:
- tpe := r.compiler.TypeEnv.Get(terms[0])
- if !types.Void(tpe) && types.Arity(tpe) == len(terms)-1 {
- capture = r.generateTermVar()
- expr.Terms = append(terms, capture)
- r.capture[expr] = capture.Value.(ast.Var)
- }
- }
-
- if capture != nil && checkCapture {
- cpy := expr.Copy()
- cpy.Terms = capture
- cpy.Generated = true
- cpy.With = nil
- query.Append(cpy)
- }
- }
-
- return query, nil
-}
-
-func (r *Rego) rewriteQueryForPartialEval(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) {
- if len(query) != 1 {
- return nil, fmt.Errorf("partial evaluation requires single ref (not multiple expressions)")
- }
-
- term, ok := query[0].Terms.(*ast.Term)
- if !ok {
- return nil, fmt.Errorf("partial evaluation requires ref (not expression)")
- }
-
- ref, ok := term.Value.(ast.Ref)
- if !ok {
- return nil, fmt.Errorf("partial evaluation requires ref (not %v)", ast.TypeName(term.Value))
- }
-
- if !ref.IsGround() {
- return nil, fmt.Errorf("partial evaluation requires ground ref")
- }
-
- return ast.NewBody(ast.Equality.Expr(ast.Wildcard, term)), nil
-}
-
-// rewriteEqualsForPartialQueryCompile will rewrite == to = in queries. Normally
-// this wouldn't be done, except for handling queries with the `Partial` API
-// where rewriting them can substantially simplify the result, and it is unlikely
-// that the caller would need expression values.
-func (r *Rego) rewriteEqualsForPartialQueryCompile(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) {
- doubleEq := ast.Equal.Ref()
- unifyOp := ast.Equality.Ref()
- ast.WalkExprs(query, func(x *ast.Expr) bool {
- if x.IsCall() {
- operator := x.Operator()
- if operator.Equal(doubleEq) && len(x.Operands()) == 2 {
- x.SetOperator(ast.NewTerm(unifyOp))
- }
- }
- return false
- })
- return query, nil
-}
-
-func (r *Rego) generateTermVar() *ast.Term {
- r.termVarID++
- prefix := ast.WildcardPrefix
- if p := r.targetPlugin(r.target); p != nil {
- prefix = wasmVarPrefix
- } else if r.target == targetWasm {
- prefix = wasmVarPrefix
- }
- return ast.VarTerm(fmt.Sprintf("%sterm%v", prefix, r.termVarID))
-}
-
-func (r Rego) hasQuery() bool {
- return len(r.query) != 0 || len(r.parsedQuery) != 0
-}
-
-func (r Rego) hasWasmModule() bool {
- for _, b := range r.bundles {
- if len(b.WasmModules) > 0 {
- return true
- }
- }
- return false
-}
-
-type transactionCloser func(ctx context.Context, err error) error
-
-// getTxn will conditionally create a read or write transaction suitable for
-// the configured Rego object. The returned function should be used to close the txn
-// regardless of status.
-func (r *Rego) getTxn(ctx context.Context) (storage.Transaction, transactionCloser, error) {
-
- noopCloser := func(_ context.Context, _ error) error {
- return nil // no-op default
- }
-
- if r.txn != nil {
- // Externally provided txn
- return r.txn, noopCloser, nil
- }
-
- // Create a new transaction..
- params := storage.TransactionParams{}
-
- // Bundles and data paths may require writing data files or manifests to storage
- if len(r.bundles) > 0 || len(r.bundlePaths) > 0 || len(r.loadPaths.paths) > 0 {
-
- // If we were given a store we will *not* write to it, only do that on one
- // which was created automatically on behalf of the user.
- if !r.ownStore {
- return nil, noopCloser, errors.New("unable to start write transaction when store was provided")
- }
-
- params.Write = true
- }
-
- txn, err := r.store.NewTransaction(ctx, params)
- if err != nil {
- return nil, noopCloser, err
- }
-
- // Setup a closer function that will abort or commit as needed.
- closer := func(ctx context.Context, txnErr error) error {
- var err error
-
- if txnErr == nil && params.Write {
- err = r.store.Commit(ctx, txn)
- } else {
- r.store.Abort(ctx, txn)
- }
-
- // Clear the auto created transaction now that it is closed.
- r.txn = nil
-
- return err
- }
-
- return txn, closer, nil
-}
-
-func (r *Rego) compilerForTxn(ctx context.Context, store storage.Store, txn storage.Transaction) *ast.Compiler {
- // Update the compiler to have a valid path conflict check
- // for the current context and transaction.
- return r.compiler.WithPathConflictsCheck(storage.NonEmpty(ctx, store, txn))
-}
-
-func checkPartialResultForRecursiveRefs(body ast.Body, path ast.Ref) bool {
- var stop bool
- ast.WalkRefs(body, func(x ast.Ref) bool {
- if !stop {
- if path.HasPrefix(x) {
- stop = true
- }
- }
- return stop
- })
- return stop
-}
-
-func isTermVar(v ast.Var) bool {
- return strings.HasPrefix(string(v), ast.WildcardPrefix+"term")
-}
-
-func isTermWasmVar(v ast.Var) bool {
- return strings.HasPrefix(string(v), wasmVarPrefix+"term")
-}
-
-func waitForDone(ctx context.Context, exit chan struct{}, f func()) {
- select {
- case <-exit:
- return
- case <-ctx.Done():
- f()
- return
- }
-}
-
-type rawModule struct {
- filename string
- module string
-}
-
-func (m rawModule) Parse() (*ast.Module, error) {
- return ast.ParseModule(m.filename, m.module)
-}
-
-func (m rawModule) ParseWithOpts(opts ast.ParserOptions) (*ast.Module, error) {
- return ast.ParseModuleWithOpts(m.filename, m.module, opts)
-}
-
-type extraStage struct {
- after string
- stage ast.QueryCompilerStageDefinition
-}
-
-type refResolver struct {
- ref ast.Ref
- r resolver.Resolver
-}
-
-func iteration(x interface{}) bool {
-
- var stopped bool
-
- vis := ast.NewGenericVisitor(func(x interface{}) bool {
- switch x := x.(type) {
- case *ast.Term:
- if ast.IsComprehension(x.Value) {
- return true
- }
- case ast.Ref:
- if !stopped {
- if bi := ast.BuiltinMap[x.String()]; bi != nil {
- if bi.Relation {
- stopped = true
- return stopped
- }
- }
- for i := 1; i < len(x); i++ {
- if _, ok := x[i].Value.(ast.Var); ok {
- stopped = true
- return stopped
- }
- }
- }
- return stopped
- }
- return stopped
- })
-
- vis.Walk(x)
-
- return stopped
-}
-
-func parseStringsToRefs(s []string) ([]ast.Ref, error) {
-
- refs := make([]ast.Ref, len(s))
- for i := range refs {
- var err error
- refs[i], err = ast.ParseRef(s[i])
- if err != nil {
- return nil, err
- }
- }
-
- return refs, nil
-}
-
-// helper function to finish a built-in function call. If an error occurred,
-// wrap the error and return it. Otherwise, invoke the iterator if the result
-// was defined.
-func finishFunction(name string, bctx topdown.BuiltinContext, result *ast.Term, err error, iter func(*ast.Term) error) error {
- if err != nil {
- var e *HaltError
- if errors.As(err, &e) {
- tdErr := &topdown.Error{
- Code: topdown.BuiltinErr,
- Message: fmt.Sprintf("%v: %v", name, e.Error()),
- Location: bctx.Location,
- }
- return topdown.Halt{Err: tdErr.Wrap(e)}
- }
- tdErr := &topdown.Error{
- Code: topdown.BuiltinErr,
- Message: fmt.Sprintf("%v: %v", name, err.Error()),
- Location: bctx.Location,
- }
- return tdErr.Wrap(err)
- }
- if result == nil {
- return nil
- }
- return iter(result)
-}
-
-// helper function to return an option that sets a custom built-in function.
-func newFunction(decl *Function, f topdown.BuiltinFunc) func(*Rego) {
- return func(r *Rego) {
- r.builtinDecls[decl.Name] = &ast.Builtin{
- Name: decl.Name,
- Decl: decl.Decl,
- Nondeterministic: decl.Nondeterministic,
- }
- r.builtinFuncs[decl.Name] = &topdown.Builtin{
- Decl: r.builtinDecls[decl.Name],
- Func: f,
- }
- }
-}
-
-func generateJSON(term *ast.Term, ectx *EvalContext) (interface{}, error) {
- return ast.JSONWithOpt(term.Value,
- ast.JSONOpt{
- SortSets: ectx.sortSets,
- CopyMaps: ectx.copyMaps,
- })
-}
-
-func (r *Rego) planQuery(queries []ast.Body, evalQueryType queryType) (*ir.Policy, error) {
- modules := make([]*ast.Module, 0, len(r.compiler.Modules))
- for _, module := range r.compiler.Modules {
- modules = append(modules, module)
- }
-
- decls := make(map[string]*ast.Builtin, len(r.builtinDecls)+len(ast.BuiltinMap))
-
- for k, v := range ast.BuiltinMap {
- decls[k] = v
- }
-
- for k, v := range r.builtinDecls {
- decls[k] = v
- }
-
- const queryName = "eval" // NOTE(tsandall): the query name is arbitrary
-
- p := planner.New().
- WithQueries([]planner.QuerySet{
- {
- Name: queryName,
- Queries: queries,
- RewrittenVars: r.compiledQueries[evalQueryType].compiler.RewrittenVars(),
- },
- }).
- WithModules(modules).
- WithBuiltinDecls(decls).
- WithDebug(r.dump)
-
- policy, err := p.Plan()
- if err != nil {
- return nil, err
- }
- if r.dump != nil {
- fmt.Fprintln(r.dump, "PLAN:")
- fmt.Fprintln(r.dump, "-----")
- err = ir.Pretty(r.dump, policy)
- if err != nil {
- return nil, err
- }
- fmt.Fprintln(r.dump)
- }
- return policy, nil
+ return v1.WithBuiltinFuncs(bis)
}
diff --git a/vendor/github.com/open-policy-agent/opa/rego/resultset.go b/vendor/github.com/open-policy-agent/opa/rego/resultset.go
index e60fa6fbe4..5c03360dfa 100644
--- a/vendor/github.com/open-policy-agent/opa/rego/resultset.go
+++ b/vendor/github.com/open-policy-agent/opa/rego/resultset.go
@@ -1,90 +1,22 @@
package rego
import (
- "fmt"
-
- "github.com/open-policy-agent/opa/ast"
+ v1 "github.com/open-policy-agent/opa/v1/rego"
)
// ResultSet represents a collection of output from Rego evaluation. An empty
// result set represents an undefined query.
-type ResultSet []Result
+type ResultSet = v1.ResultSet
// Vars represents a collection of variable bindings. The keys are the variable
// names and the values are the binding values.
-type Vars map[string]interface{}
-
-// WithoutWildcards returns a copy of v with wildcard variables removed.
-func (v Vars) WithoutWildcards() Vars {
- n := Vars{}
- for k, v := range v {
- if ast.Var(k).IsWildcard() || ast.Var(k).IsGenerated() {
- continue
- }
- n[k] = v
- }
- return n
-}
+type Vars = v1.Vars
// Result defines the output of Rego evaluation.
-type Result struct {
- Expressions []*ExpressionValue `json:"expressions"`
- Bindings Vars `json:"bindings,omitempty"`
-}
-
-func newResult() Result {
- return Result{
- Bindings: Vars{},
- }
-}
+type Result = v1.Result
// Location defines a position in a Rego query or module.
-type Location struct {
- Row int `json:"row"`
- Col int `json:"col"`
-}
+type Location = v1.Location
// ExpressionValue defines the value of an expression in a Rego query.
-type ExpressionValue struct {
- Value interface{} `json:"value"`
- Text string `json:"text"`
- Location *Location `json:"location"`
-}
-
-func newExpressionValue(expr *ast.Expr, value interface{}) *ExpressionValue {
- result := &ExpressionValue{
- Value: value,
- }
- if expr.Location != nil {
- result.Text = string(expr.Location.Text)
- result.Location = &Location{
- Row: expr.Location.Row,
- Col: expr.Location.Col,
- }
- }
- return result
-}
-
-func (ev *ExpressionValue) String() string {
- return fmt.Sprint(ev.Value)
-}
-
-// Allowed is a helper method that'll return true if all of these conditions hold:
-// - the result set only has one element
-// - there is only one expression in the result set's only element
-// - that expression has the value `true`
-// - there are no bindings.
-//
-// If bindings are present, this will yield `false`: it would be a pitfall to
-// return `true` for a query like `data.authz.allow = x`, which always has result
-// set element with value true, but could also have a binding `x: false`.
-func (rs ResultSet) Allowed() bool {
- if len(rs) == 1 && len(rs[0].Bindings) == 0 {
- if exprs := rs[0].Expressions; len(exprs) == 1 {
- if b, ok := exprs[0].Value.(bool); ok {
- return b
- }
- }
- }
- return false
-}
+type ExpressionValue = v1.ExpressionValue
diff --git a/vendor/github.com/open-policy-agent/opa/storage/doc.go b/vendor/github.com/open-policy-agent/opa/storage/doc.go
index 6fa2f86d98..c33db689ed 100644
--- a/vendor/github.com/open-policy-agent/opa/storage/doc.go
+++ b/vendor/github.com/open-policy-agent/opa/storage/doc.go
@@ -3,4 +3,8 @@
// license that can be found in the LICENSE file.
// Package storage exposes the policy engine's storage layer.
+//
+// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended.
+// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead.
+// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information.
package storage
diff --git a/vendor/github.com/open-policy-agent/opa/storage/errors.go b/vendor/github.com/open-policy-agent/opa/storage/errors.go
index 8c789052ed..1403b3a988 100644
--- a/vendor/github.com/open-policy-agent/opa/storage/errors.go
+++ b/vendor/github.com/open-policy-agent/opa/storage/errors.go
@@ -5,118 +5,69 @@
package storage
import (
- "fmt"
+ v1 "github.com/open-policy-agent/opa/v1/storage"
)
const (
// InternalErr indicates an unknown, internal error has occurred.
- InternalErr = "storage_internal_error"
+ InternalErr = v1.InternalErr
// NotFoundErr indicates the path used in the storage operation does not
// locate a document.
- NotFoundErr = "storage_not_found_error"
+ NotFoundErr = v1.NotFoundErr
// WriteConflictErr indicates a write on the path enocuntered a conflicting
// value inside the transaction.
- WriteConflictErr = "storage_write_conflict_error"
+ WriteConflictErr = v1.WriteConflictErr
// InvalidPatchErr indicates an invalid patch/write was issued. The patch
// was rejected.
- InvalidPatchErr = "storage_invalid_patch_error"
+ InvalidPatchErr = v1.InvalidPatchErr
// InvalidTransactionErr indicates an invalid operation was performed
// inside of the transaction.
- InvalidTransactionErr = "storage_invalid_txn_error"
+ InvalidTransactionErr = v1.InvalidTransactionErr
// TriggersNotSupportedErr indicates the caller attempted to register a
// trigger against a store that does not support them.
- TriggersNotSupportedErr = "storage_triggers_not_supported_error"
+ TriggersNotSupportedErr = v1.TriggersNotSupportedErr
// WritesNotSupportedErr indicate the caller attempted to perform a write
// against a store that does not support them.
- WritesNotSupportedErr = "storage_writes_not_supported_error"
+ WritesNotSupportedErr = v1.WritesNotSupportedErr
// PolicyNotSupportedErr indicate the caller attempted to perform a policy
// management operation against a store that does not support them.
- PolicyNotSupportedErr = "storage_policy_not_supported_error"
+ PolicyNotSupportedErr = v1.PolicyNotSupportedErr
)
// Error is the error type returned by the storage layer.
-type Error struct {
- Code string `json:"code"`
- Message string `json:"message"`
-}
-
-func (err *Error) Error() string {
- if err.Message != "" {
- return fmt.Sprintf("%v: %v", err.Code, err.Message)
- }
- return err.Code
-}
+type Error = v1.Error
// IsNotFound returns true if this error is a NotFoundErr.
func IsNotFound(err error) bool {
- switch err := err.(type) {
- case *Error:
- return err.Code == NotFoundErr
- }
- return false
+ return v1.IsNotFound(err)
}
// IsWriteConflictError returns true if this error a WriteConflictErr.
func IsWriteConflictError(err error) bool {
- switch err := err.(type) {
- case *Error:
- return err.Code == WriteConflictErr
- }
- return false
+ return v1.IsWriteConflictError(err)
}
// IsInvalidPatch returns true if this error is a InvalidPatchErr.
func IsInvalidPatch(err error) bool {
- switch err := err.(type) {
- case *Error:
- return err.Code == InvalidPatchErr
- }
- return false
+ return v1.IsInvalidPatch(err)
}
// IsInvalidTransaction returns true if this error is a InvalidTransactionErr.
func IsInvalidTransaction(err error) bool {
- switch err := err.(type) {
- case *Error:
- return err.Code == InvalidTransactionErr
- }
- return false
+ return v1.IsInvalidTransaction(err)
}
// IsIndexingNotSupported is a stub for backwards-compatibility.
//
// Deprecated: We no longer return IndexingNotSupported errors, so it is
// unnecessary to check for them.
-func IsIndexingNotSupported(error) bool { return false }
-
-func writeConflictError(path Path) *Error {
- return &Error{
- Code: WriteConflictErr,
- Message: path.String(),
- }
-}
-
-func triggersNotSupportedError() *Error {
- return &Error{
- Code: TriggersNotSupportedErr,
- }
-}
-
-func writesNotSupportedError() *Error {
- return &Error{
- Code: WritesNotSupportedErr,
- }
-}
-
-func policyNotSupportedError() *Error {
- return &Error{
- Code: PolicyNotSupportedErr,
- }
+func IsIndexingNotSupported(err error) bool {
+ return v1.IsIndexingNotSupported(err)
}
diff --git a/vendor/github.com/open-policy-agent/opa/storage/inmem/doc.go b/vendor/github.com/open-policy-agent/opa/storage/inmem/doc.go
new file mode 100644
index 0000000000..5f536b66dd
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/storage/inmem/doc.go
@@ -0,0 +1,8 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended.
+// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead.
+// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information.
+package inmem
diff --git a/vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go b/vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go
index 9f5b8ba258..dabedd4ef8 100644
--- a/vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go
+++ b/vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go
@@ -16,443 +16,41 @@
package inmem
import (
- "context"
- "fmt"
"io"
- "path/filepath"
- "strings"
- "sync"
- "sync/atomic"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/internal/merge"
"github.com/open-policy-agent/opa/storage"
- "github.com/open-policy-agent/opa/util"
+ v1 "github.com/open-policy-agent/opa/v1/storage/inmem"
)
// New returns an empty in-memory store.
func New() storage.Store {
- return NewWithOpts()
+ return v1.New()
}
// NewWithOpts returns an empty in-memory store, with extra options passed.
func NewWithOpts(opts ...Opt) storage.Store {
- s := &store{
- triggers: map[*handle]storage.TriggerConfig{},
- policies: map[string][]byte{},
- roundTripOnWrite: true,
- returnASTValuesOnRead: false,
- }
-
- for _, opt := range opts {
- opt(s)
- }
-
- if s.returnASTValuesOnRead {
- s.data = ast.NewObject()
- } else {
- s.data = map[string]interface{}{}
- }
-
- return s
+ return v1.NewWithOpts(opts...)
}
// NewFromObject returns a new in-memory store from the supplied data object.
-func NewFromObject(data map[string]interface{}) storage.Store {
- return NewFromObjectWithOpts(data)
+func NewFromObject(data map[string]any) storage.Store {
+ return v1.NewFromObject(data)
}
// NewFromObjectWithOpts returns a new in-memory store from the supplied data object, with the
// options passed.
-func NewFromObjectWithOpts(data map[string]interface{}, opts ...Opt) storage.Store {
- db := NewWithOpts(opts...)
- ctx := context.Background()
- txn, err := db.NewTransaction(ctx, storage.WriteParams)
- if err != nil {
- panic(err)
- }
- if err := db.Write(ctx, txn, storage.AddOp, storage.Path{}, data); err != nil {
- panic(err)
- }
- if err := db.Commit(ctx, txn); err != nil {
- panic(err)
- }
- return db
+func NewFromObjectWithOpts(data map[string]any, opts ...Opt) storage.Store {
+ return v1.NewFromObjectWithOpts(data, opts...)
}
// NewFromReader returns a new in-memory store from a reader that produces a
// JSON serialized object. This function is for test purposes.
func NewFromReader(r io.Reader) storage.Store {
- return NewFromReaderWithOpts(r)
+ return v1.NewFromReader(r)
}
// NewFromReader returns a new in-memory store from a reader that produces a
// JSON serialized object, with extra options. This function is for test purposes.
func NewFromReaderWithOpts(r io.Reader, opts ...Opt) storage.Store {
- d := util.NewJSONDecoder(r)
- var data map[string]interface{}
- if err := d.Decode(&data); err != nil {
- panic(err)
- }
- return NewFromObjectWithOpts(data, opts...)
-}
-
-type store struct {
- rmu sync.RWMutex // reader-writer lock
- wmu sync.Mutex // writer lock
- xid uint64 // last generated transaction id
- data interface{} // raw or AST data
- policies map[string][]byte // raw policies
- triggers map[*handle]storage.TriggerConfig // registered triggers
-
- // roundTripOnWrite, if true, means that every call to Write round trips the
- // data through JSON before adding the data to the store. Defaults to true.
- roundTripOnWrite bool
-
- // returnASTValuesOnRead, if true, means that the store will eagerly convert data to AST values,
- // and return them on Read.
- // FIXME: naming(?)
- returnASTValuesOnRead bool
-}
-
-type handle struct {
- db *store
-}
-
-func (db *store) NewTransaction(_ context.Context, params ...storage.TransactionParams) (storage.Transaction, error) {
- var write bool
- var ctx *storage.Context
- if len(params) > 0 {
- write = params[0].Write
- ctx = params[0].Context
- }
- xid := atomic.AddUint64(&db.xid, uint64(1))
- if write {
- db.wmu.Lock()
- } else {
- db.rmu.RLock()
- }
- return newTransaction(xid, write, ctx, db), nil
-}
-
-// Truncate implements the storage.Store interface. This method must be called within a transaction.
-func (db *store) Truncate(ctx context.Context, txn storage.Transaction, params storage.TransactionParams, it storage.Iterator) error {
- var update *storage.Update
- var err error
- mergedData := map[string]interface{}{}
-
- underlying, err := db.underlying(txn)
- if err != nil {
- return err
- }
-
- for {
- update, err = it.Next()
- if err != nil {
- break
- }
-
- if update.IsPolicy {
- err = underlying.UpsertPolicy(strings.TrimLeft(update.Path.String(), "/"), update.Value)
- if err != nil {
- return err
- }
- } else {
- var value interface{}
- err = util.Unmarshal(update.Value, &value)
- if err != nil {
- return err
- }
-
- var key []string
- dirpath := strings.TrimLeft(update.Path.String(), "/")
- if len(dirpath) > 0 {
- key = strings.Split(dirpath, "/")
- }
-
- if value != nil {
- obj, err := mktree(key, value)
- if err != nil {
- return err
- }
-
- merged, ok := merge.InterfaceMaps(mergedData, obj)
- if !ok {
- return fmt.Errorf("failed to insert data file from path %s", filepath.Join(key...))
- }
- mergedData = merged
- }
- }
- }
-
- if err != nil && err != io.EOF {
- return err
- }
-
- // For backwards compatibility, check if `RootOverwrite` was configured.
- if params.RootOverwrite {
- newPath, ok := storage.ParsePathEscaped("/")
- if !ok {
- return fmt.Errorf("storage path invalid: %v", newPath)
- }
- return underlying.Write(storage.AddOp, newPath, mergedData)
- }
-
- for _, root := range params.BasePaths {
- newPath, ok := storage.ParsePathEscaped("/" + root)
- if !ok {
- return fmt.Errorf("storage path invalid: %v", newPath)
- }
-
- if value, ok := lookup(newPath, mergedData); ok {
- if len(newPath) > 0 {
- if err := storage.MakeDir(ctx, db, txn, newPath[:len(newPath)-1]); err != nil {
- return err
- }
- }
- if err := underlying.Write(storage.AddOp, newPath, value); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (db *store) Commit(ctx context.Context, txn storage.Transaction) error {
- underlying, err := db.underlying(txn)
- if err != nil {
- return err
- }
- if underlying.write {
- db.rmu.Lock()
- event := underlying.Commit()
- db.runOnCommitTriggers(ctx, txn, event)
- // Mark the transaction stale after executing triggers, so they can
- // perform store operations if needed.
- underlying.stale = true
- db.rmu.Unlock()
- db.wmu.Unlock()
- } else {
- db.rmu.RUnlock()
- }
- return nil
-}
-
-func (db *store) Abort(_ context.Context, txn storage.Transaction) {
- underlying, err := db.underlying(txn)
- if err != nil {
- panic(err)
- }
- underlying.stale = true
- if underlying.write {
- db.wmu.Unlock()
- } else {
- db.rmu.RUnlock()
- }
-}
-
-func (db *store) ListPolicies(_ context.Context, txn storage.Transaction) ([]string, error) {
- underlying, err := db.underlying(txn)
- if err != nil {
- return nil, err
- }
- return underlying.ListPolicies(), nil
-}
-
-func (db *store) GetPolicy(_ context.Context, txn storage.Transaction, id string) ([]byte, error) {
- underlying, err := db.underlying(txn)
- if err != nil {
- return nil, err
- }
- return underlying.GetPolicy(id)
-}
-
-func (db *store) UpsertPolicy(_ context.Context, txn storage.Transaction, id string, bs []byte) error {
- underlying, err := db.underlying(txn)
- if err != nil {
- return err
- }
- return underlying.UpsertPolicy(id, bs)
-}
-
-func (db *store) DeletePolicy(_ context.Context, txn storage.Transaction, id string) error {
- underlying, err := db.underlying(txn)
- if err != nil {
- return err
- }
- if _, err := underlying.GetPolicy(id); err != nil {
- return err
- }
- return underlying.DeletePolicy(id)
-}
-
-func (db *store) Register(_ context.Context, txn storage.Transaction, config storage.TriggerConfig) (storage.TriggerHandle, error) {
- underlying, err := db.underlying(txn)
- if err != nil {
- return nil, err
- }
- if !underlying.write {
- return nil, &storage.Error{
- Code: storage.InvalidTransactionErr,
- Message: "triggers must be registered with a write transaction",
- }
- }
- h := &handle{db}
- db.triggers[h] = config
- return h, nil
-}
-
-func (db *store) Read(_ context.Context, txn storage.Transaction, path storage.Path) (interface{}, error) {
- underlying, err := db.underlying(txn)
- if err != nil {
- return nil, err
- }
-
- v, err := underlying.Read(path)
- if err != nil {
- return nil, err
- }
-
- return v, nil
-}
-
-func (db *store) Write(_ context.Context, txn storage.Transaction, op storage.PatchOp, path storage.Path, value interface{}) error {
- underlying, err := db.underlying(txn)
- if err != nil {
- return err
- }
- val := util.Reference(value)
- if db.roundTripOnWrite {
- if err := util.RoundTrip(val); err != nil {
- return err
- }
- }
- return underlying.Write(op, path, *val)
-}
-
-func (h *handle) Unregister(_ context.Context, txn storage.Transaction) {
- underlying, err := h.db.underlying(txn)
- if err != nil {
- panic(err)
- }
- if !underlying.write {
- panic(&storage.Error{
- Code: storage.InvalidTransactionErr,
- Message: "triggers must be unregistered with a write transaction",
- })
- }
- delete(h.db.triggers, h)
-}
-
-func (db *store) runOnCommitTriggers(ctx context.Context, txn storage.Transaction, event storage.TriggerEvent) {
- if db.returnASTValuesOnRead && len(db.triggers) > 0 {
- // FIXME: Not very performant for large data.
-
- dataEvents := make([]storage.DataEvent, 0, len(event.Data))
-
- for _, dataEvent := range event.Data {
- if astData, ok := dataEvent.Data.(ast.Value); ok {
- jsn, err := ast.ValueToInterface(astData, illegalResolver{})
- if err != nil {
- panic(err)
- }
- dataEvents = append(dataEvents, storage.DataEvent{
- Path: dataEvent.Path,
- Data: jsn,
- Removed: dataEvent.Removed,
- })
- } else {
- dataEvents = append(dataEvents, dataEvent)
- }
- }
-
- event = storage.TriggerEvent{
- Policy: event.Policy,
- Data: dataEvents,
- Context: event.Context,
- }
- }
-
- for _, t := range db.triggers {
- t.OnCommit(ctx, txn, event)
- }
-}
-
-type illegalResolver struct{}
-
-func (illegalResolver) Resolve(ref ast.Ref) (interface{}, error) {
- return nil, fmt.Errorf("illegal value: %v", ref)
-}
-
-func (db *store) underlying(txn storage.Transaction) (*transaction, error) {
- underlying, ok := txn.(*transaction)
- if !ok {
- return nil, &storage.Error{
- Code: storage.InvalidTransactionErr,
- Message: fmt.Sprintf("unexpected transaction type %T", txn),
- }
- }
- if underlying.db != db {
- return nil, &storage.Error{
- Code: storage.InvalidTransactionErr,
- Message: "unknown transaction",
- }
- }
- if underlying.stale {
- return nil, &storage.Error{
- Code: storage.InvalidTransactionErr,
- Message: "stale transaction",
- }
- }
- return underlying, nil
-}
-
-const rootMustBeObjectMsg = "root must be object"
-const rootCannotBeRemovedMsg = "root cannot be removed"
-
-func invalidPatchError(f string, a ...interface{}) *storage.Error {
- return &storage.Error{
- Code: storage.InvalidPatchErr,
- Message: fmt.Sprintf(f, a...),
- }
-}
-
-func mktree(path []string, value interface{}) (map[string]interface{}, error) {
- if len(path) == 0 {
- // For 0 length path the value is the full tree.
- obj, ok := value.(map[string]interface{})
- if !ok {
- return nil, invalidPatchError(rootMustBeObjectMsg)
- }
- return obj, nil
- }
-
- dir := map[string]interface{}{}
- for i := len(path) - 1; i > 0; i-- {
- dir[path[i]] = value
- value = dir
- dir = map[string]interface{}{}
- }
- dir[path[0]] = value
-
- return dir, nil
-}
-
-func lookup(path storage.Path, data map[string]interface{}) (interface{}, bool) {
- if len(path) == 0 {
- return data, true
- }
- for i := 0; i < len(path)-1; i++ {
- value, ok := data[path[i]]
- if !ok {
- return nil, false
- }
- obj, ok := value.(map[string]interface{})
- if !ok {
- return nil, false
- }
- data = obj
- }
- value, ok := data[path[len(path)-1]]
- return value, ok
+ return v1.NewFromReaderWithOpts(r, opts...)
}
diff --git a/vendor/github.com/open-policy-agent/opa/storage/inmem/opts.go b/vendor/github.com/open-policy-agent/opa/storage/inmem/opts.go
index 2239fc73a3..43f03ef27b 100644
--- a/vendor/github.com/open-policy-agent/opa/storage/inmem/opts.go
+++ b/vendor/github.com/open-policy-agent/opa/storage/inmem/opts.go
@@ -1,7 +1,9 @@
package inmem
+import v1 "github.com/open-policy-agent/opa/v1/storage/inmem"
+
// An Opt modifies store at instantiation.
-type Opt func(*store)
+type Opt = v1.Opt
// OptRoundTripOnWrite sets whether incoming objects written to store are
// round-tripped through JSON to ensure they are serializable to JSON.
@@ -19,9 +21,7 @@ type Opt func(*store)
// and that mutations happening to the objects after they have been passed into
// Write() don't affect their logic.
func OptRoundTripOnWrite(enabled bool) Opt {
- return func(s *store) {
- s.roundTripOnWrite = enabled
- }
+ return v1.OptRoundTripOnWrite(enabled)
}
// OptReturnASTValuesOnRead sets whether data values added to the store should be
@@ -31,7 +31,5 @@ func OptRoundTripOnWrite(enabled bool) Opt {
// which may result in panics if the data is not valid. Callers should ensure that passed data
// can be serialized to AST values; otherwise, it's recommended to also enable OptRoundTripOnWrite.
func OptReturnASTValuesOnRead(enabled bool) Opt {
- return func(s *store) {
- s.returnASTValuesOnRead = enabled
- }
+ return v1.OptReturnASTValuesOnRead(enabled)
}
diff --git a/vendor/github.com/open-policy-agent/opa/storage/interface.go b/vendor/github.com/open-policy-agent/opa/storage/interface.go
index 6baca9a59f..a21b5575e9 100644
--- a/vendor/github.com/open-policy-agent/opa/storage/interface.go
+++ b/vendor/github.com/open-policy-agent/opa/storage/interface.go
@@ -5,243 +5,85 @@
package storage
import (
- "context"
-
- "github.com/open-policy-agent/opa/metrics"
+ v1 "github.com/open-policy-agent/opa/v1/storage"
)
// Transaction defines the interface that identifies a consistent snapshot over
// the policy engine's storage layer.
-type Transaction interface {
- ID() uint64
-}
+type Transaction = v1.Transaction
// Store defines the interface for the storage layer's backend.
-type Store interface {
- Trigger
- Policy
-
- // NewTransaction is called create a new transaction in the store.
- NewTransaction(context.Context, ...TransactionParams) (Transaction, error)
-
- // Read is called to fetch a document referred to by path.
- Read(context.Context, Transaction, Path) (interface{}, error)
-
- // Write is called to modify a document referred to by path.
- Write(context.Context, Transaction, PatchOp, Path, interface{}) error
-
- // Commit is called to finish the transaction. If Commit returns an error, the
- // transaction must be automatically aborted by the Store implementation.
- Commit(context.Context, Transaction) error
-
- // Truncate is called to make a copy of the underlying store, write documents in the new store
- // by creating multiple transactions in the new store as needed and finally swapping
- // over to the new storage instance. This method must be called within a transaction on the original store.
- Truncate(context.Context, Transaction, TransactionParams, Iterator) error
-
- // Abort is called to cancel the transaction.
- Abort(context.Context, Transaction)
-}
+type Store = v1.Store
// MakeDirer defines the interface a Store could realize to override the
// generic MakeDir functionality in storage.MakeDir
-type MakeDirer interface {
- MakeDir(context.Context, Transaction, Path) error
-}
-
-// TransactionParams describes a new transaction.
-type TransactionParams struct {
-
- // BasePaths indicates the top-level paths where write operations will be performed in this transaction.
- BasePaths []string
-
- // RootOverwrite is deprecated. Use BasePaths instead.
- RootOverwrite bool
+type MakeDirer = v1.MakeDirer
- // Write indicates if this transaction will perform any write operations.
- Write bool
+// NonEmptyer allows a store implemention to override NonEmpty())
+type NonEmptyer = v1.NonEmptyer
- // Context contains key/value pairs passed to triggers.
- Context *Context
-}
+// TransactionParams describes a new transaction.
+type TransactionParams = v1.TransactionParams
// Context is a simple container for key/value pairs.
-type Context struct {
- values map[interface{}]interface{}
-}
+type Context = v1.Context
// NewContext returns a new context object.
func NewContext() *Context {
- return &Context{
- values: map[interface{}]interface{}{},
- }
-}
-
-// Get returns the key value in the context.
-func (ctx *Context) Get(key interface{}) interface{} {
- if ctx == nil {
- return nil
- }
- return ctx.values[key]
-}
-
-// Put adds a key/value pair to the context.
-func (ctx *Context) Put(key, value interface{}) {
- ctx.values[key] = value
-}
-
-var metricsKey = struct{}{}
-
-// WithMetrics allows passing metrics via the Context.
-// It puts the metrics object in the ctx, and returns the same
-// ctx (not a copy) for convenience.
-func (ctx *Context) WithMetrics(m metrics.Metrics) *Context {
- ctx.values[metricsKey] = m
- return ctx
-}
-
-// Metrics() allows using a Context's metrics. Returns nil if metrics
-// were not attached to the Context.
-func (ctx *Context) Metrics() metrics.Metrics {
- if m, ok := ctx.values[metricsKey]; ok {
- if met, ok := m.(metrics.Metrics); ok {
- return met
- }
- }
- return nil
+ return v1.NewContext()
}
// WriteParams specifies the TransactionParams for a write transaction.
-var WriteParams = TransactionParams{
- Write: true,
-}
+var WriteParams = v1.WriteParams
// PatchOp is the enumeration of supposed modifications.
-type PatchOp int
+type PatchOp = v1.PatchOp
// Patch supports add, remove, and replace operations.
const (
- AddOp PatchOp = iota
- RemoveOp = iota
- ReplaceOp = iota
+ AddOp = v1.AddOp
+ RemoveOp = v1.RemoveOp
+ ReplaceOp = v1.ReplaceOp
)
// WritesNotSupported provides a default implementation of the write
// interface which may be used if the backend does not support writes.
-type WritesNotSupported struct{}
-
-func (WritesNotSupported) Write(context.Context, Transaction, PatchOp, Path, interface{}) error {
- return writesNotSupportedError()
-}
+type WritesNotSupported = v1.WritesNotSupported
// Policy defines the interface for policy module storage.
-type Policy interface {
- ListPolicies(context.Context, Transaction) ([]string, error)
- GetPolicy(context.Context, Transaction, string) ([]byte, error)
- UpsertPolicy(context.Context, Transaction, string, []byte) error
- DeletePolicy(context.Context, Transaction, string) error
-}
+type Policy = v1.Policy
// PolicyNotSupported provides a default implementation of the policy interface
// which may be used if the backend does not support policy storage.
-type PolicyNotSupported struct{}
-
-// ListPolicies always returns a PolicyNotSupportedErr.
-func (PolicyNotSupported) ListPolicies(context.Context, Transaction) ([]string, error) {
- return nil, policyNotSupportedError()
-}
-
-// GetPolicy always returns a PolicyNotSupportedErr.
-func (PolicyNotSupported) GetPolicy(context.Context, Transaction, string) ([]byte, error) {
- return nil, policyNotSupportedError()
-}
-
-// UpsertPolicy always returns a PolicyNotSupportedErr.
-func (PolicyNotSupported) UpsertPolicy(context.Context, Transaction, string, []byte) error {
- return policyNotSupportedError()
-}
-
-// DeletePolicy always returns a PolicyNotSupportedErr.
-func (PolicyNotSupported) DeletePolicy(context.Context, Transaction, string) error {
- return policyNotSupportedError()
-}
+type PolicyNotSupported = v1.PolicyNotSupported
// PolicyEvent describes a change to a policy.
-type PolicyEvent struct {
- ID string
- Data []byte
- Removed bool
-}
+type PolicyEvent = v1.PolicyEvent
// DataEvent describes a change to a base data document.
-type DataEvent struct {
- Path Path
- Data interface{}
- Removed bool
-}
+type DataEvent = v1.DataEvent
// TriggerEvent describes the changes that caused the trigger to be invoked.
-type TriggerEvent struct {
- Policy []PolicyEvent
- Data []DataEvent
- Context *Context
-}
-
-// IsZero returns true if the TriggerEvent indicates no changes occurred. This
-// function is primarily for test purposes.
-func (e TriggerEvent) IsZero() bool {
- return !e.PolicyChanged() && !e.DataChanged()
-}
-
-// PolicyChanged returns true if the trigger was caused by a policy change.
-func (e TriggerEvent) PolicyChanged() bool {
- return len(e.Policy) > 0
-}
-
-// DataChanged returns true if the trigger was caused by a data change.
-func (e TriggerEvent) DataChanged() bool {
- return len(e.Data) > 0
-}
+type TriggerEvent = v1.TriggerEvent
// TriggerConfig contains the trigger registration configuration.
-type TriggerConfig struct {
-
- // OnCommit is invoked when a transaction is successfully committed. The
- // callback is invoked with a handle to the write transaction that
- // successfully committed before other clients see the changes.
- OnCommit func(context.Context, Transaction, TriggerEvent)
-}
+type TriggerConfig = v1.TriggerConfig
// Trigger defines the interface that stores implement to register for change
// notifications when the store is changed.
-type Trigger interface {
- Register(context.Context, Transaction, TriggerConfig) (TriggerHandle, error)
-}
+type Trigger = v1.Trigger
// TriggersNotSupported provides default implementations of the Trigger
// interface which may be used if the backend does not support triggers.
-type TriggersNotSupported struct{}
-
-// Register always returns an error indicating triggers are not supported.
-func (TriggersNotSupported) Register(context.Context, Transaction, TriggerConfig) (TriggerHandle, error) {
- return nil, triggersNotSupportedError()
-}
+type TriggersNotSupported = v1.TriggersNotSupported
// TriggerHandle defines the interface that can be used to unregister triggers that have
// been registered on a Store.
-type TriggerHandle interface {
- Unregister(context.Context, Transaction)
-}
+type TriggerHandle = v1.TriggerHandle
// Iterator defines the interface that can be used to read files from a directory starting with
// files at the base of the directory, then sub-directories etc.
-type Iterator interface {
- Next() (*Update, error)
-}
+type Iterator = v1.Iterator
// Update contains information about a file
-type Update struct {
- Path Path
- Value []byte
- IsPolicy bool
-}
+type Update = v1.Update
diff --git a/vendor/github.com/open-policy-agent/opa/storage/internal/errors/errors.go b/vendor/github.com/open-policy-agent/opa/storage/internal/errors/errors.go
deleted file mode 100644
index 0bba74b907..0000000000
--- a/vendor/github.com/open-policy-agent/opa/storage/internal/errors/errors.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2021 The OPA Authors. All rights reserved.
-// Use of this source code is governed by an Apache2
-// license that can be found in the LICENSE file.
-
-// Package errors contains reusable error-related code for the storage layer.
-package errors
-
-import (
- "fmt"
-
- "github.com/open-policy-agent/opa/storage"
-)
-
-const ArrayIndexTypeMsg = "array index must be integer"
-const DoesNotExistMsg = "document does not exist"
-const OutOfRangeMsg = "array index out of range"
-
-func NewNotFoundError(path storage.Path) *storage.Error {
- return NewNotFoundErrorWithHint(path, DoesNotExistMsg)
-}
-
-func NewNotFoundErrorWithHint(path storage.Path, hint string) *storage.Error {
- return NewNotFoundErrorf("%v: %v", path.String(), hint)
-}
-
-func NewNotFoundErrorf(f string, a ...interface{}) *storage.Error {
- msg := fmt.Sprintf(f, a...)
- return &storage.Error{
- Code: storage.NotFoundErr,
- Message: msg,
- }
-}
-
-func NewWriteConflictError(p storage.Path) *storage.Error {
- return &storage.Error{
- Code: storage.WriteConflictErr,
- Message: p.String(),
- }
-}
diff --git a/vendor/github.com/open-policy-agent/opa/storage/path.go b/vendor/github.com/open-policy-agent/opa/storage/path.go
index 02ef4cab40..91d4f34f2b 100644
--- a/vendor/github.com/open-policy-agent/opa/storage/path.go
+++ b/vendor/github.com/open-policy-agent/opa/storage/path.go
@@ -5,150 +5,30 @@
package storage
import (
- "fmt"
- "net/url"
- "strconv"
- "strings"
-
"github.com/open-policy-agent/opa/ast"
+ v1 "github.com/open-policy-agent/opa/v1/storage"
)
// Path refers to a document in storage.
-type Path []string
+type Path = v1.Path
// ParsePath returns a new path for the given str.
func ParsePath(str string) (path Path, ok bool) {
- if len(str) == 0 {
- return nil, false
- }
- if str[0] != '/' {
- return nil, false
- }
- if len(str) == 1 {
- return Path{}, true
- }
- parts := strings.Split(str[1:], "/")
- return parts, true
+ return v1.ParsePath(str)
}
// ParsePathEscaped returns a new path for the given escaped str.
func ParsePathEscaped(str string) (path Path, ok bool) {
- path, ok = ParsePath(str)
- if !ok {
- return
- }
- for i := range path {
- segment, err := url.PathUnescape(path[i])
- if err == nil {
- path[i] = segment
- }
- }
- return
+ return v1.ParsePathEscaped(str)
}
// NewPathForRef returns a new path for the given ref.
func NewPathForRef(ref ast.Ref) (path Path, err error) {
-
- if len(ref) == 0 {
- return nil, fmt.Errorf("empty reference (indicates error in caller)")
- }
-
- if len(ref) == 1 {
- return Path{}, nil
- }
-
- path = make(Path, 0, len(ref)-1)
-
- for _, term := range ref[1:] {
- switch v := term.Value.(type) {
- case ast.String:
- path = append(path, string(v))
- case ast.Number:
- path = append(path, v.String())
- case ast.Boolean, ast.Null:
- return nil, &Error{
- Code: NotFoundErr,
- Message: fmt.Sprintf("%v: does not exist", ref),
- }
- case *ast.Array, ast.Object, ast.Set:
- return nil, fmt.Errorf("composites cannot be base document keys: %v", ref)
- default:
- return nil, fmt.Errorf("unresolved reference (indicates error in caller): %v", ref)
- }
- }
-
- return path, nil
-}
-
-// Compare performs lexigraphical comparison on p and other and returns -1 if p
-// is less than other, 0 if p is equal to other, or 1 if p is greater than
-// other.
-func (p Path) Compare(other Path) (cmp int) {
- min := len(p)
- if len(other) < min {
- min = len(other)
- }
- for i := 0; i < min; i++ {
- if cmp := strings.Compare(p[i], other[i]); cmp != 0 {
- return cmp
- }
- }
- if len(p) < len(other) {
- return -1
- }
- if len(p) == len(other) {
- return 0
- }
- return 1
-}
-
-// Equal returns true if p is the same as other.
-func (p Path) Equal(other Path) bool {
- return p.Compare(other) == 0
-}
-
-// HasPrefix returns true if p starts with other.
-func (p Path) HasPrefix(other Path) bool {
- if len(other) > len(p) {
- return false
- }
- for i := range other {
- if p[i] != other[i] {
- return false
- }
- }
- return true
-}
-
-// Ref returns a ref that represents p rooted at head.
-func (p Path) Ref(head *ast.Term) (ref ast.Ref) {
- ref = make(ast.Ref, len(p)+1)
- ref[0] = head
- for i := range p {
- idx, err := strconv.ParseInt(p[i], 10, 64)
- if err == nil {
- ref[i+1] = ast.UIntNumberTerm(uint64(idx))
- } else {
- ref[i+1] = ast.StringTerm(p[i])
- }
- }
- return ref
-}
-
-func (p Path) String() string {
- buf := make([]string, len(p))
- for i := range buf {
- buf[i] = url.PathEscape(p[i])
- }
- return "/" + strings.Join(buf, "/")
+ return v1.NewPathForRef(ref)
}
// MustParsePath returns a new Path for s. If s cannot be parsed, this function
// will panic. This is mostly for test purposes.
func MustParsePath(s string) Path {
- path, ok := ParsePath(s)
- if !ok {
- panic(s)
- }
- return path
+ return v1.MustParsePath(s)
}
diff --git a/vendor/github.com/open-policy-agent/opa/storage/storage.go b/vendor/github.com/open-policy-agent/opa/storage/storage.go
index 2f8a39c597..d1abc1046d 100644
--- a/vendor/github.com/open-policy-agent/opa/storage/storage.go
+++ b/vendor/github.com/open-policy-agent/opa/storage/storage.go
@@ -7,85 +7,34 @@ package storage
import (
"context"
- "github.com/open-policy-agent/opa/ast"
+ v1 "github.com/open-policy-agent/opa/v1/storage"
)
// NewTransactionOrDie is a helper function to create a new transaction. If the
// storage layer cannot create a new transaction, this function will panic. This
// function should only be used for tests.
func NewTransactionOrDie(ctx context.Context, store Store, params ...TransactionParams) Transaction {
- txn, err := store.NewTransaction(ctx, params...)
- if err != nil {
- panic(err)
- }
- return txn
+ return v1.NewTransactionOrDie(ctx, store, params...)
}
// ReadOne is a convenience function to read a single value from the provided Store. It
// will create a new Transaction to perform the read with, and clean up after itself
// should an error occur.
-func ReadOne(ctx context.Context, store Store, path Path) (interface{}, error) {
- txn, err := store.NewTransaction(ctx)
- if err != nil {
- return nil, err
- }
- defer store.Abort(ctx, txn)
-
- return store.Read(ctx, txn, path)
+func ReadOne(ctx context.Context, store Store, path Path) (any, error) {
+ return v1.ReadOne(ctx, store, path)
}
// WriteOne is a convenience function to write a single value to the provided Store. It
// will create a new Transaction to perform the write with, and clean up after itself
// should an error occur.
-func WriteOne(ctx context.Context, store Store, op PatchOp, path Path, value interface{}) error {
- txn, err := store.NewTransaction(ctx, WriteParams)
- if err != nil {
- return err
- }
-
- if err := store.Write(ctx, txn, op, path, value); err != nil {
- store.Abort(ctx, txn)
- return err
- }
-
- return store.Commit(ctx, txn)
+func WriteOne(ctx context.Context, store Store, op PatchOp, path Path, value any) error {
+ return v1.WriteOne(ctx, store, op, path, value)
}
// MakeDir inserts an empty object at path. If the parent path does not exist,
// MakeDir will create it recursively.
func MakeDir(ctx context.Context, store Store, txn Transaction, path Path) error {
-
- // Allow the Store implementation to deal with this in its own way.
- if md, ok := store.(MakeDirer); ok {
- return md.MakeDir(ctx, txn, path)
- }
-
- if len(path) == 0 {
- return nil
- }
-
- node, err := store.Read(ctx, txn, path)
- if err != nil {
- if !IsNotFound(err) {
- return err
- }
-
- if err := MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil {
- return err
- }
-
- return store.Write(ctx, txn, AddOp, path, map[string]interface{}{})
- }
-
- if _, ok := node.(map[string]interface{}); ok {
- return nil
- }
-
- if _, ok := node.(ast.Object); ok {
- return nil
- }
-
- return writeConflictError(path)
+ return v1.MakeDir(ctx, store, txn, path)
}
// Txn is a convenience function that executes f inside a new transaction
@@ -93,44 +42,12 @@ func MakeDir(ctx context.Context, store Store, txn Transaction, path Path) error
// aborted and the error is returned. Otherwise, the transaction is committed
// and the result of the commit is returned.
func Txn(ctx context.Context, store Store, params TransactionParams, f func(Transaction) error) error {
-
- txn, err := store.NewTransaction(ctx, params)
- if err != nil {
- return err
- }
-
- if err := f(txn); err != nil {
- store.Abort(ctx, txn)
- return err
- }
-
- return store.Commit(ctx, txn)
+ return v1.Txn(ctx, store, params, f)
}
// NonEmpty returns a function that tests if a path is non-empty. A
// path is non-empty if a Read on the path returns a value or a Read
// on any of the path prefixes returns a non-object value.
func NonEmpty(ctx context.Context, store Store, txn Transaction) func([]string) (bool, error) {
- return func(path []string) (bool, error) {
- if _, err := store.Read(ctx, txn, Path(path)); err == nil {
- return true, nil
- } else if !IsNotFound(err) {
- return false, err
- }
- for i := len(path) - 1; i > 0; i-- {
- val, err := store.Read(ctx, txn, Path(path[:i]))
- if err != nil && !IsNotFound(err) {
- return false, err
- } else if err == nil {
- if _, ok := val.(map[string]interface{}); ok {
- return false, nil
- }
- if _, ok := val.(ast.Object); ok {
- return false, nil
- }
- return true, nil
- }
- }
- return false, nil
- }
+ return v1.NonEmpty(ctx, store, txn)
}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/builtins.go b/vendor/github.com/open-policy-agent/opa/topdown/builtins.go
index cf694d4331..f28c6c795d 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/builtins.go
+++ b/vendor/github.com/open-policy-agent/opa/topdown/builtins.go
@@ -5,219 +5,63 @@
package topdown
import (
- "context"
- "encoding/binary"
- "fmt"
- "io"
- "math/rand"
-
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/metrics"
- "github.com/open-policy-agent/opa/topdown/builtins"
- "github.com/open-policy-agent/opa/topdown/cache"
- "github.com/open-policy-agent/opa/topdown/print"
- "github.com/open-policy-agent/opa/tracing"
+ v1 "github.com/open-policy-agent/opa/v1/topdown"
)
type (
// Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead.
- FunctionalBuiltin1 func(op1 ast.Value) (output ast.Value, err error)
+ FunctionalBuiltin1 = v1.FunctionalBuiltin1 //nolint:staticcheck // SA1019: Intentional use of deprecated type.
// Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead.
- FunctionalBuiltin2 func(op1, op2 ast.Value) (output ast.Value, err error)
+ FunctionalBuiltin2 = v1.FunctionalBuiltin2 //nolint:staticcheck // SA1019: Intentional use of deprecated type.
// Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead.
- FunctionalBuiltin3 func(op1, op2, op3 ast.Value) (output ast.Value, err error)
+ FunctionalBuiltin3 = v1.FunctionalBuiltin3 //nolint:staticcheck // SA1019: Intentional use of deprecated type.
// Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead.
- FunctionalBuiltin4 func(op1, op2, op3, op4 ast.Value) (output ast.Value, err error)
+ FunctionalBuiltin4 = v1.FunctionalBuiltin4 //nolint:staticcheck // SA1019: Intentional use of deprecated type.
// BuiltinContext contains context from the evaluator that may be used by
// built-in functions.
- BuiltinContext struct {
- Context context.Context // request context that was passed when query started
- Metrics metrics.Metrics // metrics registry for recording built-in specific metrics
- Seed io.Reader // randomization source
- Time *ast.Term // wall clock time
- Cancel Cancel // atomic value that signals evaluation to halt
- Runtime *ast.Term // runtime information on the OPA instance
- Cache builtins.Cache // built-in function state cache
- InterQueryBuiltinCache cache.InterQueryCache // cross-query built-in function state cache
- InterQueryBuiltinValueCache cache.InterQueryValueCache // cross-query built-in function state value cache. this cache is useful for scenarios where the entry size cannot be calculated
- NDBuiltinCache builtins.NDBCache // cache for non-deterministic built-in state
- Location *ast.Location // location of built-in call
- Tracers []Tracer // Deprecated: Use QueryTracers instead
- QueryTracers []QueryTracer // tracer objects for trace() built-in function
- TraceEnabled bool // indicates whether tracing is enabled for the evaluation
- QueryID uint64 // identifies query being evaluated
- ParentID uint64 // identifies parent of query being evaluated
- PrintHook print.Hook // provides callback function to use for printing
- DistributedTracingOpts tracing.Options // options to be used by distributed tracing.
- rand *rand.Rand // randomization source for non-security-sensitive operations
- Capabilities *ast.Capabilities
- }
+ BuiltinContext = v1.BuiltinContext
// BuiltinFunc defines an interface for implementing built-in functions.
// The built-in function is called with the plugged operands from the call
// (including the output operands.) The implementation should evaluate the
// operands and invoke the iterator for each successful/defined output
// value.
- BuiltinFunc func(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error
+ BuiltinFunc = v1.BuiltinFunc
)
-// Rand returns a random number generator based on the Seed for this built-in
-// context. The random number will be re-used across multiple calls to this
-// function. If a random number generator cannot be created, an error is
-// returned.
-func (bctx *BuiltinContext) Rand() (*rand.Rand, error) {
-
- if bctx.rand != nil {
- return bctx.rand, nil
- }
-
- seed, err := readInt64(bctx.Seed)
- if err != nil {
- return nil, err
- }
-
- bctx.rand = rand.New(rand.NewSource(seed))
- return bctx.rand, nil
-}
-
// RegisterBuiltinFunc adds a new built-in function to the evaluation engine.
func RegisterBuiltinFunc(name string, f BuiltinFunc) {
- builtinFunctions[name] = builtinErrorWrapper(name, f)
+ v1.RegisterBuiltinFunc(name, f)
}
// Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead.
func RegisterFunctionalBuiltin1(name string, fun FunctionalBuiltin1) {
- builtinFunctions[name] = functionalWrapper1(name, fun)
+ v1.RegisterFunctionalBuiltin1(name, fun)
}
// Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead.
func RegisterFunctionalBuiltin2(name string, fun FunctionalBuiltin2) {
- builtinFunctions[name] = functionalWrapper2(name, fun)
+ v1.RegisterFunctionalBuiltin2(name, fun)
}
// Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead.
func RegisterFunctionalBuiltin3(name string, fun FunctionalBuiltin3) {
- builtinFunctions[name] = functionalWrapper3(name, fun)
+ v1.RegisterFunctionalBuiltin3(name, fun)
}
// Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead.
func RegisterFunctionalBuiltin4(name string, fun FunctionalBuiltin4) {
- builtinFunctions[name] = functionalWrapper4(name, fun)
+ v1.RegisterFunctionalBuiltin4(name, fun)
}
// GetBuiltin returns a built-in function implementation, nil if no built-in found.
func GetBuiltin(name string) BuiltinFunc {
- return builtinFunctions[name]
+ return v1.GetBuiltin(name)
}
// Deprecated: The BuiltinEmpty type is no longer needed. Use nil return values instead.
-type BuiltinEmpty struct{}
-
-func (BuiltinEmpty) Error() string {
- return ""
-}
-
-var builtinFunctions = map[string]BuiltinFunc{}
-
-func builtinErrorWrapper(name string, fn BuiltinFunc) BuiltinFunc {
- return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
- err := fn(bctx, args, iter)
- if err == nil {
- return nil
- }
- return handleBuiltinErr(name, bctx.Location, err)
- }
-}
-
-func functionalWrapper1(name string, fn FunctionalBuiltin1) BuiltinFunc {
- return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
- result, err := fn(args[0].Value)
- if err == nil {
- return iter(ast.NewTerm(result))
- }
- return handleBuiltinErr(name, bctx.Location, err)
- }
-}
-
-func functionalWrapper2(name string, fn FunctionalBuiltin2) BuiltinFunc {
- return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
- result, err := fn(args[0].Value, args[1].Value)
- if err == nil {
- return iter(ast.NewTerm(result))
- }
- return handleBuiltinErr(name, bctx.Location, err)
- }
-}
-
-func functionalWrapper3(name string, fn FunctionalBuiltin3) BuiltinFunc {
- return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
- result, err := fn(args[0].Value, args[1].Value, args[2].Value)
- if err == nil {
- return iter(ast.NewTerm(result))
- }
- return handleBuiltinErr(name, bctx.Location, err)
- }
-}
-
-func functionalWrapper4(name string, fn FunctionalBuiltin4) BuiltinFunc {
- return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
- result, err := fn(args[0].Value, args[1].Value, args[2].Value, args[3].Value)
- if err == nil {
- return iter(ast.NewTerm(result))
- }
- if _, empty := err.(BuiltinEmpty); empty {
- return nil
- }
- return handleBuiltinErr(name, bctx.Location, err)
- }
-}
-
-func handleBuiltinErr(name string, loc *ast.Location, err error) error {
- switch err := err.(type) {
- case BuiltinEmpty:
- return nil
- case *Error, Halt:
- return err
- case builtins.ErrOperand:
- e := &Error{
- Code: TypeErr,
- Message: fmt.Sprintf("%v: %v", name, err.Error()),
- Location: loc,
- }
- return e.Wrap(err)
- default:
- e := &Error{
- Code: BuiltinErr,
- Message: fmt.Sprintf("%v: %v", name, err.Error()),
- Location: loc,
- }
- return e.Wrap(err)
- }
-}
-
-func readInt64(r io.Reader) (int64, error) {
- bs := make([]byte, 8)
- n, err := io.ReadFull(r, bs)
- if n != len(bs) || err != nil {
- return 0, err
- }
- return int64(binary.BigEndian.Uint64(bs)), nil
-}
-
-// Used to get older-style (ast.Term, error) tuples out of newer functions.
-func getResult(fn BuiltinFunc, operands ...*ast.Term) (*ast.Term, error) {
- var result *ast.Term
- extractionFn := func(r *ast.Term) error {
- result = r
- return nil
- }
- err := fn(BuiltinContext{}, operands, extractionFn)
- if err != nil {
- return nil, err
- }
- return result, nil
-}
+type BuiltinEmpty = v1.Builtin
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/cache.go b/vendor/github.com/open-policy-agent/opa/topdown/cache.go
index 265457e02f..bb39df03e0 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/cache.go
+++ b/vendor/github.com/open-policy-agent/opa/topdown/cache.go
@@ -5,348 +5,15 @@
package topdown
import (
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/util"
+ v1 "github.com/open-policy-agent/opa/v1/topdown"
)
// VirtualCache defines the interface for a cache that stores the results of
// evaluated virtual documents (rules).
// The cache is a stack of frames, where each frame is a mapping from references
// to values.
-type VirtualCache interface {
- // Push pushes a new, empty frame of value mappings onto the stack.
- Push()
-
- // Pop pops the top frame of value mappings from the stack, removing all associated entries.
- Pop()
-
- // Get returns the value associated with the given reference. The second return value
- // indicates whether the reference has a recorded 'undefined' result.
- Get(ref ast.Ref) (*ast.Term, bool)
-
- // Put associates the given reference with the given value. If the value is nil, the reference
- // is marked as having an 'undefined' result.
- Put(ref ast.Ref, value *ast.Term)
-
- // Keys returns the set of keys that have been cached for the active frame.
- Keys() []ast.Ref
-}
-
-type virtualCache struct {
- stack []*virtualCacheElem
-}
-
-type virtualCacheElem struct {
- value *ast.Term
- children *util.HashMap
- undefined bool
-}
+type VirtualCache = v1.VirtualCache
func NewVirtualCache() VirtualCache {
- cache := &virtualCache{}
- cache.Push()
- return cache
-}
-
-func (c *virtualCache) Push() {
- c.stack = append(c.stack, newVirtualCacheElem())
-}
-
-func (c *virtualCache) Pop() {
- c.stack = c.stack[:len(c.stack)-1]
-}
-
-// Returns the resolved value of the AST term and a flag indicating if the value
-// should be interpretted as undefined:
-//
-// nil, true indicates the ref is undefined
-// ast.Term, false indicates the ref is defined
-// nil, false indicates the ref has not been cached
-// ast.Term, true is impossible
-func (c *virtualCache) Get(ref ast.Ref) (*ast.Term, bool) {
- node := c.stack[len(c.stack)-1]
- for i := 0; i < len(ref); i++ {
- x, ok := node.children.Get(ref[i])
- if !ok {
- return nil, false
- }
- node = x.(*virtualCacheElem)
- }
- if node.undefined {
- return nil, true
- }
-
- return node.value, false
-}
-
-// If value is a nil pointer, set the 'undefined' flag on the cache element to
-// indicate that the Ref has resolved to undefined.
-func (c *virtualCache) Put(ref ast.Ref, value *ast.Term) {
- node := c.stack[len(c.stack)-1]
- for i := 0; i < len(ref); i++ {
- x, ok := node.children.Get(ref[i])
- if ok {
- node = x.(*virtualCacheElem)
- } else {
- next := newVirtualCacheElem()
- node.children.Put(ref[i], next)
- node = next
- }
- }
- if value != nil {
- node.value = value
- } else {
- node.undefined = true
- }
-}
-
-func (c *virtualCache) Keys() []ast.Ref {
- node := c.stack[len(c.stack)-1]
- return keysRecursive(nil, node)
-}
-
-func keysRecursive(root ast.Ref, node *virtualCacheElem) []ast.Ref {
- var keys []ast.Ref
- node.children.Iter(func(k, v util.T) bool {
- ref := root.Append(k.(*ast.Term))
- if v.(*virtualCacheElem).value != nil {
- keys = append(keys, ref)
- }
- if v.(*virtualCacheElem).children.Len() > 0 {
- keys = append(keys, keysRecursive(ref, v.(*virtualCacheElem))...)
- }
- return false
- })
- return keys
-}
-
-func newVirtualCacheElem() *virtualCacheElem {
- return &virtualCacheElem{children: newVirtualCacheHashMap()}
-}
-
-func newVirtualCacheHashMap() *util.HashMap {
- return util.NewHashMap(func(a, b util.T) bool {
- return a.(*ast.Term).Equal(b.(*ast.Term))
- }, func(x util.T) int {
- return x.(*ast.Term).Hash()
- })
-}
-
-// baseCache implements a trie structure to cache base documents read out of
-// storage. Values inserted into the cache may contain other values that were
-// previously inserted. In this case, the previous values are erased from the
-// structure.
-type baseCache struct {
- root *baseCacheElem
-}
-
-func newBaseCache() *baseCache {
- return &baseCache{
- root: newBaseCacheElem(),
- }
-}
-
-func (c *baseCache) Get(ref ast.Ref) ast.Value {
- node := c.root
- for i := 0; i < len(ref); i++ {
- node = node.children[ref[i].Value]
- if node == nil {
- return nil
- } else if node.value != nil {
- result, err := node.value.Find(ref[i+1:])
- if err != nil {
- return nil
- }
- return result
- }
- }
- return nil
-}
-
-func (c *baseCache) Put(ref ast.Ref, value ast.Value) {
- node := c.root
- for i := 0; i < len(ref); i++ {
- if child, ok := node.children[ref[i].Value]; ok {
- node = child
- } else {
- child := newBaseCacheElem()
- node.children[ref[i].Value] = child
- node = child
- }
- }
- node.set(value)
-}
-
-type baseCacheElem struct {
- value ast.Value
- children map[ast.Value]*baseCacheElem
-}
-
-func newBaseCacheElem() *baseCacheElem {
- return &baseCacheElem{
- children: map[ast.Value]*baseCacheElem{},
- }
-}
-
-func (e *baseCacheElem) set(value ast.Value) {
- e.value = value
- e.children = map[ast.Value]*baseCacheElem{}
-}
-
-type refStack struct {
- sl []refStackElem
-}
-
-type refStackElem struct {
- refs []ast.Ref
-}
-
-func newRefStack() *refStack {
- return &refStack{}
-}
-
-func (s *refStack) Push(refs []ast.Ref) {
- s.sl = append(s.sl, refStackElem{refs: refs})
-}
-
-func (s *refStack) Pop() {
- s.sl = s.sl[:len(s.sl)-1]
-}
-
-func (s *refStack) Prefixed(ref ast.Ref) bool {
- if s != nil {
- for i := len(s.sl) - 1; i >= 0; i-- {
- for j := range s.sl[i].refs {
- if ref.HasPrefix(s.sl[i].refs[j]) {
- return true
- }
- }
- }
- }
- return false
-}
-
-type comprehensionCache struct {
- stack []map[*ast.Term]*comprehensionCacheElem
-}
-
-type comprehensionCacheElem struct {
- value *ast.Term
- children *util.HashMap
-}
-
-func newComprehensionCache() *comprehensionCache {
- cache := &comprehensionCache{}
- cache.Push()
- return cache
-}
-
-func (c *comprehensionCache) Push() {
- c.stack = append(c.stack, map[*ast.Term]*comprehensionCacheElem{})
-}
-
-func (c *comprehensionCache) Pop() {
- c.stack = c.stack[:len(c.stack)-1]
-}
-
-func (c *comprehensionCache) Elem(t *ast.Term) (*comprehensionCacheElem, bool) {
- elem, ok := c.stack[len(c.stack)-1][t]
- return elem, ok
-}
-
-func (c *comprehensionCache) Set(t *ast.Term, elem *comprehensionCacheElem) {
- c.stack[len(c.stack)-1][t] = elem
-}
-
-func newComprehensionCacheElem() *comprehensionCacheElem {
- return &comprehensionCacheElem{children: newComprehensionCacheHashMap()}
-}
-
-func (c *comprehensionCacheElem) Get(key []*ast.Term) *ast.Term {
- node := c
- for i := 0; i < len(key); i++ {
- x, ok := node.children.Get(key[i])
- if !ok {
- return nil
- }
- node = x.(*comprehensionCacheElem)
- }
- return node.value
-}
-
-func (c *comprehensionCacheElem) Put(key []*ast.Term, value *ast.Term) {
- node := c
- for i := 0; i < len(key); i++ {
- x, ok := node.children.Get(key[i])
- if ok {
- node = x.(*comprehensionCacheElem)
- } else {
- next := newComprehensionCacheElem()
- node.children.Put(key[i], next)
- node = next
- }
- }
- node.value = value
-}
-
-func newComprehensionCacheHashMap() *util.HashMap {
- return util.NewHashMap(func(a, b util.T) bool {
- return a.(*ast.Term).Equal(b.(*ast.Term))
- }, func(x util.T) int {
- return x.(*ast.Term).Hash()
- })
-}
-
-type functionMocksStack struct {
- stack []*functionMocksElem
-}
-
-type functionMocksElem []frame
-
-type frame map[string]*ast.Term
-
-func newFunctionMocksStack() *functionMocksStack {
- stack := &functionMocksStack{}
- stack.Push()
- return stack
-}
-
-func newFunctionMocksElem() *functionMocksElem {
- return &functionMocksElem{}
-}
-
-func (s *functionMocksStack) Push() {
- s.stack = append(s.stack, newFunctionMocksElem())
-}
-
-func (s *functionMocksStack) Pop() {
- s.stack = s.stack[:len(s.stack)-1]
-}
-
-func (s *functionMocksStack) PopPairs() {
- current := s.stack[len(s.stack)-1]
- *current = (*current)[:len(*current)-1]
-}
-
-func (s *functionMocksStack) PutPairs(mocks [][2]*ast.Term) {
- el := frame{}
- for i := range mocks {
- el[mocks[i][0].Value.String()] = mocks[i][1]
- }
- s.Put(el)
-}
-
-func (s *functionMocksStack) Put(el frame) {
- current := s.stack[len(s.stack)-1]
- *current = append(*current, el)
-}
-
-func (s *functionMocksStack) Get(f ast.Ref) (*ast.Term, bool) {
- current := *s.stack[len(s.stack)-1]
- for i := len(current) - 1; i >= 0; i-- {
- if r, ok := current[i][f.String()]; ok {
- return r, true
- }
- }
- return nil, false
+ return v1.NewVirtualCache()
}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/cancel.go b/vendor/github.com/open-policy-agent/opa/topdown/cancel.go
index 534e0799a1..395a14a80d 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/cancel.go
+++ b/vendor/github.com/open-policy-agent/opa/topdown/cancel.go
@@ -5,29 +5,14 @@
package topdown
import (
- "sync/atomic"
+ v1 "github.com/open-policy-agent/opa/v1/topdown"
)
// Cancel defines the interface for cancelling topdown queries. Cancel
// operations are thread-safe and idempotent.
-type Cancel interface {
- Cancel()
- Cancelled() bool
-}
-
-type cancel struct {
- flag int32
-}
+type Cancel = v1.Cancel
// NewCancel returns a new Cancel object.
func NewCancel() Cancel {
- return &cancel{}
-}
-
-func (c *cancel) Cancel() {
- atomic.StoreInt32(&c.flag, 1)
-}
-
-func (c *cancel) Cancelled() bool {
- return atomic.LoadInt32(&c.flag) != 0
+ return v1.NewCancel()
}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/doc.go b/vendor/github.com/open-policy-agent/opa/topdown/doc.go
index 9aa7aa45c5..a303ef7886 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/doc.go
+++ b/vendor/github.com/open-policy-agent/opa/topdown/doc.go
@@ -7,4 +7,8 @@
// The topdown implementation is a modified version of the standard top-down
// evaluation algorithm used in Datalog. References and comprehensions are
// evaluated eagerly while all other terms are evaluated lazily.
+//
+// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended.
+// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead.
+// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information.
package topdown
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/errors.go b/vendor/github.com/open-policy-agent/opa/topdown/errors.go
index 918df6c853..47853ec6d1 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/errors.go
+++ b/vendor/github.com/open-policy-agent/opa/topdown/errors.go
@@ -5,145 +5,50 @@
package topdown
import (
- "errors"
- "fmt"
-
- "github.com/open-policy-agent/opa/ast"
+ v1 "github.com/open-policy-agent/opa/v1/topdown"
)
// Halt is a special error type that built-in function implementations return to indicate
// that policy evaluation should stop immediately.
-type Halt struct {
- Err error
-}
-
-func (h Halt) Error() string {
- return h.Err.Error()
-}
-
-func (h Halt) Unwrap() error { return h.Err }
+type Halt = v1.Halt
// Error is the error type returned by the Eval and Query functions when
// an evaluation error occurs.
-type Error struct {
- Code string `json:"code"`
- Message string `json:"message"`
- Location *ast.Location `json:"location,omitempty"`
- err error `json:"-"`
-}
+type Error = v1.Error
const (
// InternalErr represents an unknown evaluation error.
- InternalErr string = "eval_internal_error"
+ InternalErr = v1.InternalErr
// CancelErr indicates the evaluation process was cancelled.
- CancelErr string = "eval_cancel_error"
+ CancelErr = v1.CancelErr
// ConflictErr indicates a conflict was encountered during evaluation. For
// instance, a conflict occurs if a rule produces multiple, differing values
// for the same key in an object. Conflict errors indicate the policy does
// not account for the data loaded into the policy engine.
- ConflictErr string = "eval_conflict_error"
+ ConflictErr = v1.ConflictErr
// TypeErr indicates evaluation stopped because an expression was applied to
// a value of an inappropriate type.
- TypeErr string = "eval_type_error"
+ TypeErr = v1.TypeErr
// BuiltinErr indicates a built-in function received a semantically invalid
// input or encountered some kind of runtime error, e.g., connection
// timeout, connection refused, etc.
- BuiltinErr string = "eval_builtin_error"
+ BuiltinErr = v1.BuiltinErr
// WithMergeErr indicates that the real and replacement data could not be merged.
- WithMergeErr string = "eval_with_merge_error"
+ WithMergeErr = v1.WithMergeErr
)
// IsError returns true if the err is an Error.
func IsError(err error) bool {
- var e *Error
- return errors.As(err, &e)
+ return v1.IsError(err)
}
// IsCancel returns true if err was caused by cancellation.
func IsCancel(err error) bool {
- return errors.Is(err, &Error{Code: CancelErr})
-}
-
-// Is allows matching topdown errors using errors.Is (see IsCancel).
-func (e *Error) Is(target error) bool {
- var t *Error
- if errors.As(target, &t) {
- return (t.Code == "" || e.Code == t.Code) &&
- (t.Message == "" || e.Message == t.Message) &&
- (t.Location == nil || t.Location.Compare(e.Location) == 0)
- }
- return false
-}
-
-func (e *Error) Error() string {
- msg := fmt.Sprintf("%v: %v", e.Code, e.Message)
-
- if e.Location != nil {
- msg = e.Location.String() + ": " + msg
- }
-
- return msg
-}
-
-func (e *Error) Wrap(err error) *Error {
- e.err = err
- return e
-}
-
-func (e *Error) Unwrap() error {
- return e.err
-}
-
-func functionConflictErr(loc *ast.Location) error {
- return &Error{
- Code: ConflictErr,
- Location: loc,
- Message: "functions must not produce multiple outputs for same inputs",
- }
-}
-
-func completeDocConflictErr(loc *ast.Location) error {
- return &Error{
- Code: ConflictErr,
- Location: loc,
- Message: "complete rules must not produce multiple outputs",
- }
-}
-
-func objectDocKeyConflictErr(loc *ast.Location) error {
- return &Error{
- Code: ConflictErr,
- Location: loc,
- Message: "object keys must be unique",
- }
-}
-
-func unsupportedBuiltinErr(loc *ast.Location) error {
- return &Error{
- Code: InternalErr,
- Location: loc,
- Message: "unsupported built-in",
- }
-}
-
-func mergeConflictErr(loc *ast.Location) error {
- return &Error{
- Code: WithMergeErr,
- Location: loc,
- Message: "real and replacement data could not be merged",
- }
-}
-
-func internalErr(loc *ast.Location, msg string) error {
- return &Error{
- Code: InternalErr,
- Location: loc,
- Message: msg,
- }
+ return v1.IsCancel(err)
}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/graphql.go b/vendor/github.com/open-policy-agent/opa/topdown/graphql.go
index 8fb1b58a76..3729b14daa 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/graphql.go
+++ b/vendor/github.com/open-policy-agent/opa/topdown/graphql.go
@@ -9,15 +9,15 @@ import (
"fmt"
"strings"
- gqlast "github.com/open-policy-agent/opa/internal/gqlparser/ast"
- gqlparser "github.com/open-policy-agent/opa/internal/gqlparser/parser"
- gqlvalidator "github.com/open-policy-agent/opa/internal/gqlparser/validator"
+ gqlast "github.com/vektah/gqlparser/v2/ast"
+ gqlparser "github.com/vektah/gqlparser/v2/parser"
+ gqlvalidator "github.com/vektah/gqlparser/v2/validator"
// Side-effecting import. Triggers GraphQL library's validation rule init() functions.
- _ "github.com/open-policy-agent/opa/internal/gqlparser/validator/rules"
+ _ "github.com/vektah/gqlparser/v2/validator/rules"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
// Parses a GraphQL schema, and returns the GraphQL AST for the schema.
@@ -100,7 +100,7 @@ func convertSchema(schemaDoc *gqlast.SchemaDocument) (*gqlast.Schema, error) {
// Converts an ast.Object into a gqlast.QueryDocument object.
func objectToQueryDocument(value ast.Object) (*gqlast.QueryDocument, error) {
- // Convert ast.Term to interface{} for JSON encoding below.
+ // Convert ast.Term to any for JSON encoding below.
asJSON, err := ast.JSON(value)
if err != nil {
return nil, err
@@ -121,7 +121,7 @@ func objectToQueryDocument(value ast.Object) (*gqlast.QueryDocument, error) {
// Converts an ast.Object into a gqlast.SchemaDocument object.
func objectToSchemaDocument(value ast.Object) (*gqlast.SchemaDocument, error) {
- // Convert ast.Term to interface{} for JSON encoding below.
+ // Convert ast.Term to any for JSON encoding below.
asJSON, err := ast.JSON(value)
if err != nil {
return nil, err
@@ -160,7 +160,7 @@ func pruneIrrelevantGraphQLASTNodes(value ast.Value) ast.Value {
// Iterate over the array's elements, and do the following:
// - Drop any Nulls
// - Drop any any empty object/array value (after running the pruner)
- for i := 0; i < x.Len(); i++ {
+ for i := range x.Len() {
vTerm := x.Elem(i)
switch v := vTerm.Value.(type) {
case ast.Null:
@@ -295,7 +295,7 @@ func builtinGraphQLParseAndVerify(_ BuiltinContext, operands []*ast.Term, iter f
var err error
unverified := ast.ArrayTerm(
- ast.BooleanTerm(false),
+ ast.InternedTerm(false),
ast.NewTerm(ast.NewObject()),
ast.NewTerm(ast.NewObject()),
)
@@ -353,7 +353,7 @@ func builtinGraphQLParseAndVerify(_ BuiltinContext, operands []*ast.Term, iter f
// Construct return value.
verified := ast.ArrayTerm(
- ast.BooleanTerm(true),
+ ast.InternedTerm(true),
ast.NewTerm(queryResult),
ast.NewTerm(querySchema),
)
@@ -421,10 +421,10 @@ func builtinGraphQLIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*as
queryDoc, err = objectToQueryDocument(x)
default:
// Error if wrong type.
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
if err != nil {
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
switch x := operands[1].Value.(type) {
@@ -434,23 +434,23 @@ func builtinGraphQLIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*as
schemaDoc, err = objectToSchemaDocument(x)
default:
// Error if wrong type.
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
if err != nil {
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
// Validate the query against the schema, erroring if there's an issue.
schema, err := convertSchema(schemaDoc)
if err != nil {
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
if err := validateQuery(schema, queryDoc); err != nil {
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
// If we got this far, the GraphQL query passed validation.
- return iter(ast.BooleanTerm(true))
+ return iter(ast.InternedTerm(true))
}
func builtinGraphQLSchemaIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -464,15 +464,15 @@ func builtinGraphQLSchemaIsValid(_ BuiltinContext, operands []*ast.Term, iter fu
schemaDoc, err = objectToSchemaDocument(x)
default:
// Error if wrong type.
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
if err != nil {
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
// Validate the schema, this determines the result
_, err = convertSchema(schemaDoc)
- return iter(ast.BooleanTerm(err == nil))
+ return iter(ast.InternedTerm(err == nil))
}
func init() {
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/http.go b/vendor/github.com/open-policy-agent/opa/topdown/http.go
index 18bfd3c722..693ea4048c 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/http.go
+++ b/vendor/github.com/open-policy-agent/opa/topdown/http.go
@@ -5,1616 +5,13 @@
package topdown
import (
- "bytes"
- "context"
- "crypto/tls"
- "crypto/x509"
- "encoding/json"
- "fmt"
- "io"
- "math"
- "net"
- "net/http"
- "net/url"
- "os"
- "runtime"
- "strconv"
- "strings"
- "time"
-
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/internal/version"
- "github.com/open-policy-agent/opa/topdown/builtins"
- "github.com/open-policy-agent/opa/topdown/cache"
- "github.com/open-policy-agent/opa/tracing"
- "github.com/open-policy-agent/opa/util"
-)
-
-type cachingMode string
-
-const (
- defaultHTTPRequestTimeoutEnv = "HTTP_SEND_TIMEOUT"
- defaultCachingMode cachingMode = "serialized"
- cachingModeDeserialized cachingMode = "deserialized"
-)
-
-var defaultHTTPRequestTimeout = time.Second * 5
-
-var allowedKeyNames = [...]string{
- "method",
- "url",
- "body",
- "enable_redirect",
- "force_json_decode",
- "force_yaml_decode",
- "headers",
- "raw_body",
- "tls_use_system_certs",
- "tls_ca_cert",
- "tls_ca_cert_file",
- "tls_ca_cert_env_variable",
- "tls_client_cert",
- "tls_client_cert_file",
- "tls_client_cert_env_variable",
- "tls_client_key",
- "tls_client_key_file",
- "tls_client_key_env_variable",
- "tls_insecure_skip_verify",
- "tls_server_name",
- "timeout",
- "cache",
- "force_cache",
- "force_cache_duration_seconds",
- "raise_error",
- "caching_mode",
- "max_retry_attempts",
- "cache_ignored_headers",
-}
-
-// ref: https://www.rfc-editor.org/rfc/rfc7231#section-6.1
-var cacheableHTTPStatusCodes = [...]int{
- http.StatusOK,
- http.StatusNonAuthoritativeInfo,
- http.StatusNoContent,
- http.StatusPartialContent,
- http.StatusMultipleChoices,
- http.StatusMovedPermanently,
- http.StatusNotFound,
- http.StatusMethodNotAllowed,
- http.StatusGone,
- http.StatusRequestURITooLong,
- http.StatusNotImplemented,
-}
-
-var (
- allowedKeys = ast.NewSet()
- cacheableCodes = ast.NewSet()
- requiredKeys = ast.NewSet(ast.StringTerm("method"), ast.StringTerm("url"))
- httpSendLatencyMetricKey = "rego_builtin_" + strings.ReplaceAll(ast.HTTPSend.Name, ".", "_")
- httpSendInterQueryCacheHits = httpSendLatencyMetricKey + "_interquery_cache_hits"
+ v1 "github.com/open-policy-agent/opa/v1/topdown"
)
-type httpSendKey string
-
const (
- // httpSendBuiltinCacheKey is the key in the builtin context cache that
- // points to the http.send() specific cache resides at.
- httpSendBuiltinCacheKey httpSendKey = "HTTP_SEND_CACHE_KEY"
-
// HTTPSendInternalErr represents a runtime evaluation error.
- HTTPSendInternalErr string = "eval_http_send_internal_error"
+ HTTPSendInternalErr = v1.HTTPSendInternalErr
// HTTPSendNetworkErr represents a network error.
- HTTPSendNetworkErr string = "eval_http_send_network_error"
-
- // minRetryDelay is amount of time to backoff after the first failure.
- minRetryDelay = time.Millisecond * 100
-
- // maxRetryDelay is the upper bound of backoff delay.
- maxRetryDelay = time.Second * 60
+ HTTPSendNetworkErr = v1.HTTPSendNetworkErr
)
-
-func builtinHTTPSend(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
-
- obj, err := builtins.ObjectOperand(operands[0].Value, 1)
- if err != nil {
- return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err)
- }
-
- raiseError, err := getRaiseErrorValue(obj)
- if err != nil {
- return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err)
- }
-
- req, err := validateHTTPRequestOperand(operands[0], 1)
- if err != nil {
- if raiseError {
- return handleHTTPSendErr(bctx, err)
- }
-
- return iter(generateRaiseErrorResult(handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err)))
- }
-
- result, err := getHTTPResponse(bctx, req)
- if err != nil {
- if raiseError {
- return handleHTTPSendErr(bctx, err)
- }
-
- result = generateRaiseErrorResult(err)
- }
- return iter(result)
-}
-
-func generateRaiseErrorResult(err error) *ast.Term {
- obj := ast.NewObject()
- obj.Insert(ast.StringTerm("status_code"), ast.IntNumberTerm(0))
-
- errObj := ast.NewObject()
-
- switch err.(type) {
- case *url.Error:
- errObj.Insert(ast.StringTerm("code"), ast.StringTerm(HTTPSendNetworkErr))
- default:
- errObj.Insert(ast.StringTerm("code"), ast.StringTerm(HTTPSendInternalErr))
- }
-
- errObj.Insert(ast.StringTerm("message"), ast.StringTerm(err.Error()))
- obj.Insert(ast.StringTerm("error"), ast.NewTerm(errObj))
-
- return ast.NewTerm(obj)
-}
-
-func getHTTPResponse(bctx BuiltinContext, req ast.Object) (*ast.Term, error) {
-
- bctx.Metrics.Timer(httpSendLatencyMetricKey).Start()
- defer bctx.Metrics.Timer(httpSendLatencyMetricKey).Stop()
-
- key, err := getKeyFromRequest(req)
- if err != nil {
- return nil, err
- }
-
- reqExecutor, err := newHTTPRequestExecutor(bctx, req, key)
- if err != nil {
- return nil, err
- }
- // Check if cache already has a response for this query
- // set headers to exclude cache_ignored_headers
- resp, err := reqExecutor.CheckCache()
- if err != nil {
- return nil, err
- }
-
- if resp == nil {
- httpResp, err := reqExecutor.ExecuteHTTPRequest()
- if err != nil {
- reqExecutor.InsertErrorIntoCache(err)
- return nil, err
- }
- defer util.Close(httpResp)
- // Add result to intra/inter-query cache.
- resp, err = reqExecutor.InsertIntoCache(httpResp)
- if err != nil {
- return nil, err
- }
- }
-
- return ast.NewTerm(resp), nil
-}
-
-// getKeyFromRequest returns a key to be used for caching HTTP responses
-// deletes headers from request object mentioned in cache_ignored_headers
-func getKeyFromRequest(req ast.Object) (ast.Object, error) {
- // deep copy so changes to key do not reflect in the request object
- key := req.Copy()
- cacheIgnoredHeadersTerm := req.Get(ast.StringTerm("cache_ignored_headers"))
- allHeadersTerm := req.Get(ast.StringTerm("headers"))
- // skip because no headers to delete
- if cacheIgnoredHeadersTerm == nil || allHeadersTerm == nil {
- // need to explicitly set cache_ignored_headers to null
- // equivalent requests might have different sets of exclusion lists
- key.Insert(ast.StringTerm("cache_ignored_headers"), ast.NullTerm())
- return key, nil
- }
- var cacheIgnoredHeaders []string
- var allHeaders map[string]interface{}
- err := ast.As(cacheIgnoredHeadersTerm.Value, &cacheIgnoredHeaders)
- if err != nil {
- return nil, err
- }
- err = ast.As(allHeadersTerm.Value, &allHeaders)
- if err != nil {
- return nil, err
- }
- for _, header := range cacheIgnoredHeaders {
- delete(allHeaders, header)
- }
- val, err := ast.InterfaceToValue(allHeaders)
- if err != nil {
- return nil, err
- }
- key.Insert(ast.StringTerm("headers"), ast.NewTerm(val))
- // remove cache_ignored_headers key
- key.Insert(ast.StringTerm("cache_ignored_headers"), ast.NullTerm())
- return key, nil
-}
-
-func init() {
- createAllowedKeys()
- createCacheableHTTPStatusCodes()
- initDefaults()
- RegisterBuiltinFunc(ast.HTTPSend.Name, builtinHTTPSend)
-}
-
-func handleHTTPSendErr(bctx BuiltinContext, err error) error {
- // Return HTTP client timeout errors in a generic error message to avoid confusion about what happened.
- // Do not do this if the builtin context was cancelled and is what caused the request to stop.
- if urlErr, ok := err.(*url.Error); ok && urlErr.Timeout() && bctx.Context.Err() == nil {
- err = fmt.Errorf("%s %s: request timed out", urlErr.Op, urlErr.URL)
- }
- if err := bctx.Context.Err(); err != nil {
- return Halt{
- Err: &Error{
- Code: CancelErr,
- Message: fmt.Sprintf("http.send: timed out (%s)", err.Error()),
- },
- }
- }
- return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err)
-}
-
-func initDefaults() {
- timeoutDuration := os.Getenv(defaultHTTPRequestTimeoutEnv)
- if timeoutDuration != "" {
- var err error
- defaultHTTPRequestTimeout, err = time.ParseDuration(timeoutDuration)
- if err != nil {
- // If it is set to something not valid don't let the process continue in a state
- // that will almost definitely give unexpected results by having it set at 0
- // which means no timeout..
- // This environment variable isn't considered part of the public API.
- // TODO(patrick-east): Remove the environment variable
- panic(fmt.Sprintf("invalid value for HTTP_SEND_TIMEOUT: %s", err))
- }
- }
-}
-
-func validateHTTPRequestOperand(term *ast.Term, pos int) (ast.Object, error) {
-
- obj, err := builtins.ObjectOperand(term.Value, pos)
- if err != nil {
- return nil, err
- }
-
- requestKeys := ast.NewSet(obj.Keys()...)
-
- invalidKeys := requestKeys.Diff(allowedKeys)
- if invalidKeys.Len() != 0 {
- return nil, builtins.NewOperandErr(pos, "invalid request parameters(s): %v", invalidKeys)
- }
-
- missingKeys := requiredKeys.Diff(requestKeys)
- if missingKeys.Len() != 0 {
- return nil, builtins.NewOperandErr(pos, "missing required request parameters(s): %v", missingKeys)
- }
-
- return obj, nil
-
-}
-
-// canonicalizeHeaders returns a copy of the headers where the keys are in
-// canonical HTTP form.
-func canonicalizeHeaders(headers map[string]interface{}) map[string]interface{} {
- canonicalized := map[string]interface{}{}
-
- for k, v := range headers {
- canonicalized[http.CanonicalHeaderKey(k)] = v
- }
-
- return canonicalized
-}
-
-// useSocket examines the url for "unix://" and returns a *http.Transport with
-// a DialContext that opens a socket (specified in the http call).
-// The url is expected to contain socket=/path/to/socket (url encoded)
-// Ex. "unix://localhost/end/point?socket=%2Ftmp%2Fhttp.sock"
-func useSocket(rawURL string, tlsConfig *tls.Config) (bool, string, *http.Transport) {
- u, err := url.Parse(rawURL)
- if err != nil {
- return false, "", nil
- }
-
- if u.Scheme != "unix" || u.RawQuery == "" {
- return false, rawURL, nil
- }
-
- v, err := url.ParseQuery(u.RawQuery)
- if err != nil {
- return false, rawURL, nil
- }
-
- // Rewrite URL targeting the UNIX domain socket.
- u.Scheme = "http"
-
- // Extract the path to the socket.
- // Only retrieve the first value. Subsequent values are ignored and removed
- // to prevent HTTP parameter pollution.
- socket := v.Get("socket")
- v.Del("socket")
- u.RawQuery = v.Encode()
-
- tr := http.DefaultTransport.(*http.Transport).Clone()
- tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) {
- return http.DefaultTransport.(*http.Transport).DialContext(ctx, "unix", socket)
- }
- tr.TLSClientConfig = tlsConfig
- tr.DisableKeepAlives = true
-
- return true, u.String(), tr
-}
-
-func verifyHost(bctx BuiltinContext, host string) error {
- if bctx.Capabilities == nil || bctx.Capabilities.AllowNet == nil {
- return nil
- }
-
- for _, allowed := range bctx.Capabilities.AllowNet {
- if allowed == host {
- return nil
- }
- }
-
- return fmt.Errorf("unallowed host: %s", host)
-}
-
-func verifyURLHost(bctx BuiltinContext, unverifiedURL string) error {
- // Eager return to avoid unnecessary URL parsing
- if bctx.Capabilities == nil || bctx.Capabilities.AllowNet == nil {
- return nil
- }
-
- parsedURL, err := url.Parse(unverifiedURL)
- if err != nil {
- return err
- }
-
- host := strings.Split(parsedURL.Host, ":")[0]
-
- return verifyHost(bctx, host)
-}
-
-func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *http.Client, error) {
- var url string
- var method string
-
- // Additional CA certificates loading options.
- var tlsCaCert []byte
- var tlsCaCertEnvVar string
- var tlsCaCertFile string
-
- // Client TLS certificate and key options. Each input source
- // comes in a matched pair.
- var tlsClientCert []byte
- var tlsClientKey []byte
-
- var tlsClientCertEnvVar string
- var tlsClientKeyEnvVar string
-
- var tlsClientCertFile string
- var tlsClientKeyFile string
-
- var tlsServerName string
- var body *bytes.Buffer
- var rawBody *bytes.Buffer
- var enableRedirect bool
- var tlsUseSystemCerts *bool
- var tlsConfig tls.Config
- var customHeaders map[string]interface{}
- var tlsInsecureSkipVerify bool
- timeout := defaultHTTPRequestTimeout
-
- for _, val := range obj.Keys() {
- key, err := ast.JSON(val.Value)
- if err != nil {
- return nil, nil, err
- }
-
- key = key.(string)
-
- var strVal string
-
- if s, ok := obj.Get(val).Value.(ast.String); ok {
- strVal = strings.Trim(string(s), "\"")
- } else {
- // Most parameters are strings, so consolidate the type checking.
- switch key {
- case "method",
- "url",
- "raw_body",
- "tls_ca_cert",
- "tls_ca_cert_file",
- "tls_ca_cert_env_variable",
- "tls_client_cert",
- "tls_client_cert_file",
- "tls_client_cert_env_variable",
- "tls_client_key",
- "tls_client_key_file",
- "tls_client_key_env_variable",
- "tls_server_name":
- return nil, nil, fmt.Errorf("%q must be a string", key)
- }
- }
-
- switch key {
- case "method":
- method = strings.ToUpper(strVal)
- case "url":
- err := verifyURLHost(bctx, strVal)
- if err != nil {
- return nil, nil, err
- }
- url = strVal
- case "enable_redirect":
- enableRedirect, err = strconv.ParseBool(obj.Get(val).String())
- if err != nil {
- return nil, nil, err
- }
- case "body":
- bodyVal := obj.Get(val).Value
- bodyValInterface, err := ast.JSON(bodyVal)
- if err != nil {
- return nil, nil, err
- }
-
- bodyValBytes, err := json.Marshal(bodyValInterface)
- if err != nil {
- return nil, nil, err
- }
- body = bytes.NewBuffer(bodyValBytes)
- case "raw_body":
- rawBody = bytes.NewBufferString(strVal)
- case "tls_use_system_certs":
- tempTLSUseSystemCerts, err := strconv.ParseBool(obj.Get(val).String())
- if err != nil {
- return nil, nil, err
- }
- tlsUseSystemCerts = &tempTLSUseSystemCerts
- case "tls_ca_cert":
- tlsCaCert = []byte(strVal)
- case "tls_ca_cert_file":
- tlsCaCertFile = strVal
- case "tls_ca_cert_env_variable":
- tlsCaCertEnvVar = strVal
- case "tls_client_cert":
- tlsClientCert = []byte(strVal)
- case "tls_client_cert_file":
- tlsClientCertFile = strVal
- case "tls_client_cert_env_variable":
- tlsClientCertEnvVar = strVal
- case "tls_client_key":
- tlsClientKey = []byte(strVal)
- case "tls_client_key_file":
- tlsClientKeyFile = strVal
- case "tls_client_key_env_variable":
- tlsClientKeyEnvVar = strVal
- case "tls_server_name":
- tlsServerName = strVal
- case "headers":
- headersVal := obj.Get(val).Value
- headersValInterface, err := ast.JSON(headersVal)
- if err != nil {
- return nil, nil, err
- }
- var ok bool
- customHeaders, ok = headersValInterface.(map[string]interface{})
- if !ok {
- return nil, nil, fmt.Errorf("invalid type for headers key")
- }
- case "tls_insecure_skip_verify":
- tlsInsecureSkipVerify, err = strconv.ParseBool(obj.Get(val).String())
- if err != nil {
- return nil, nil, err
- }
- case "timeout":
- timeout, err = parseTimeout(obj.Get(val).Value)
- if err != nil {
- return nil, nil, err
- }
- case "cache", "caching_mode",
- "force_cache", "force_cache_duration_seconds",
- "force_json_decode", "force_yaml_decode",
- "raise_error", "max_retry_attempts", "cache_ignored_headers": // no-op
- default:
- return nil, nil, fmt.Errorf("invalid parameter %q", key)
- }
- }
-
- isTLS := false
- client := &http.Client{
- Timeout: timeout,
- CheckRedirect: func(*http.Request, []*http.Request) error {
- return http.ErrUseLastResponse
- },
- }
-
- if tlsInsecureSkipVerify {
- isTLS = true
- tlsConfig.InsecureSkipVerify = tlsInsecureSkipVerify
- }
-
- if len(tlsClientCert) > 0 && len(tlsClientKey) > 0 {
- cert, err := tls.X509KeyPair(tlsClientCert, tlsClientKey)
- if err != nil {
- return nil, nil, err
- }
-
- isTLS = true
- tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
- }
-
- if tlsClientCertFile != "" && tlsClientKeyFile != "" {
- cert, err := tls.LoadX509KeyPair(tlsClientCertFile, tlsClientKeyFile)
- if err != nil {
- return nil, nil, err
- }
-
- isTLS = true
- tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
- }
-
- if tlsClientCertEnvVar != "" && tlsClientKeyEnvVar != "" {
- cert, err := tls.X509KeyPair(
- []byte(os.Getenv(tlsClientCertEnvVar)),
- []byte(os.Getenv(tlsClientKeyEnvVar)))
- if err != nil {
- return nil, nil, fmt.Errorf("cannot extract public/private key pair from envvars %q, %q: %w",
- tlsClientCertEnvVar, tlsClientKeyEnvVar, err)
- }
-
- isTLS = true
- tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
- }
-
- // Use system certs if no CA cert is provided
- // or system certs flag is not set
- if len(tlsCaCert) == 0 && tlsCaCertFile == "" && tlsCaCertEnvVar == "" && tlsUseSystemCerts == nil {
- trueValue := true
- tlsUseSystemCerts = &trueValue
- }
-
- // Check the system certificates config first so that we
- // load additional certificated into the correct pool.
- if tlsUseSystemCerts != nil && *tlsUseSystemCerts && runtime.GOOS != "windows" {
- pool, err := x509.SystemCertPool()
- if err != nil {
- return nil, nil, err
- }
-
- isTLS = true
- tlsConfig.RootCAs = pool
- }
-
- if len(tlsCaCert) != 0 {
- tlsCaCert = bytes.Replace(tlsCaCert, []byte("\\n"), []byte("\n"), -1)
- pool, err := addCACertsFromBytes(tlsConfig.RootCAs, tlsCaCert)
- if err != nil {
- return nil, nil, err
- }
-
- isTLS = true
- tlsConfig.RootCAs = pool
- }
-
- if tlsCaCertFile != "" {
- pool, err := addCACertsFromFile(tlsConfig.RootCAs, tlsCaCertFile)
- if err != nil {
- return nil, nil, err
- }
-
- isTLS = true
- tlsConfig.RootCAs = pool
- }
-
- if tlsCaCertEnvVar != "" {
- pool, err := addCACertsFromEnv(tlsConfig.RootCAs, tlsCaCertEnvVar)
- if err != nil {
- return nil, nil, err
- }
-
- isTLS = true
- tlsConfig.RootCAs = pool
- }
-
- if isTLS {
- if ok, parsedURL, tr := useSocket(url, &tlsConfig); ok {
- client.Transport = tr
- url = parsedURL
- } else {
- tr := http.DefaultTransport.(*http.Transport).Clone()
- tr.TLSClientConfig = &tlsConfig
- tr.DisableKeepAlives = true
- client.Transport = tr
- }
- } else {
- if ok, parsedURL, tr := useSocket(url, nil); ok {
- client.Transport = tr
- url = parsedURL
- }
- }
-
- // check if redirects are enabled
- if enableRedirect {
- client.CheckRedirect = func(req *http.Request, _ []*http.Request) error {
- return verifyURLHost(bctx, req.URL.String())
- }
- }
-
- if rawBody != nil {
- body = rawBody
- } else if body == nil {
- body = bytes.NewBufferString("")
- }
-
- // create the http request, use the builtin context's context to ensure
- // the request is cancelled if evaluation is cancelled.
- req, err := http.NewRequest(method, url, body)
- if err != nil {
- return nil, nil, err
- }
-
- req = req.WithContext(bctx.Context)
-
- // Add custom headers
- if len(customHeaders) != 0 {
- customHeaders = canonicalizeHeaders(customHeaders)
-
- for k, v := range customHeaders {
- header, ok := v.(string)
- if !ok {
- return nil, nil, fmt.Errorf("invalid type for headers value %q", v)
- }
-
- req.Header.Add(k, header)
- }
-
- // Don't overwrite or append to one that was set in the custom headers
- if _, hasUA := customHeaders["User-Agent"]; !hasUA {
- req.Header.Add("User-Agent", version.UserAgent)
- }
-
- // If the caller specifies the Host header, use it for the HTTP
- // request host and the TLS server name.
- if host, hasHost := customHeaders["Host"]; hasHost {
- host := host.(string) // We already checked that it's a string.
- req.Host = host
-
- // Only default the ServerName if the caller has
- // specified the host. If we don't specify anything,
- // Go will default to the target hostname. This name
- // is not the same as the default that Go populates
- // `req.Host` with, which is why we don't just set
- // this unconditionally.
- tlsConfig.ServerName = host
- }
- }
-
- if tlsServerName != "" {
- tlsConfig.ServerName = tlsServerName
- }
-
- if len(bctx.DistributedTracingOpts) > 0 {
- client.Transport = tracing.NewTransport(client.Transport, bctx.DistributedTracingOpts)
- }
-
- return req, client, nil
-}
-
-func executeHTTPRequest(req *http.Request, client *http.Client, inputReqObj ast.Object) (*http.Response, error) {
- var err error
- var retry int
-
- retry, err = getNumberValFromReqObj(inputReqObj, ast.StringTerm("max_retry_attempts"))
- if err != nil {
- return nil, err
- }
-
- for i := 0; true; i++ {
-
- var resp *http.Response
- resp, err = client.Do(req)
- if err == nil {
- return resp, nil
- }
-
- // final attempt
- if i == retry {
- break
- }
-
- if err == context.Canceled {
- return nil, err
- }
-
- delay := util.DefaultBackoff(float64(minRetryDelay), float64(maxRetryDelay), i)
- timer, timerCancel := util.TimerWithCancel(delay)
- select {
- case <-timer.C:
- case <-req.Context().Done():
- timerCancel() // explicitly cancel the timer.
- return nil, context.Canceled
- }
- }
- return nil, err
-}
-
-func isContentType(header http.Header, typ ...string) bool {
- for _, t := range typ {
- if strings.Contains(header.Get("Content-Type"), t) {
- return true
- }
- }
- return false
-}
-
-type httpSendCacheEntry struct {
- response *ast.Value
- error error
-}
-
-// The httpSendCache is used for intra-query caching of http.send results.
-type httpSendCache struct {
- entries *util.HashMap
-}
-
-func newHTTPSendCache() *httpSendCache {
- return &httpSendCache{
- entries: util.NewHashMap(valueEq, valueHash),
- }
-}
-
-func valueHash(v util.T) int {
- return ast.StringTerm(v.(ast.Value).String()).Hash()
-}
-
-func valueEq(a, b util.T) bool {
- av := a.(ast.Value)
- bv := b.(ast.Value)
- return av.String() == bv.String()
-}
-
-func (cache *httpSendCache) get(k ast.Value) *httpSendCacheEntry {
- if v, ok := cache.entries.Get(k); ok {
- v := v.(httpSendCacheEntry)
- return &v
- }
- return nil
-}
-
-func (cache *httpSendCache) putResponse(k ast.Value, v *ast.Value) {
- cache.entries.Put(k, httpSendCacheEntry{response: v})
-}
-
-func (cache *httpSendCache) putError(k ast.Value, v error) {
- cache.entries.Put(k, httpSendCacheEntry{error: v})
-}
-
-// In the BuiltinContext cache we only store a single entry that points to
-// our ValueMap which is the "real" http.send() cache.
-func getHTTPSendCache(bctx BuiltinContext) *httpSendCache {
- raw, ok := bctx.Cache.Get(httpSendBuiltinCacheKey)
- if !ok {
- // Initialize if it isn't there
- c := newHTTPSendCache()
- bctx.Cache.Put(httpSendBuiltinCacheKey, c)
- return c
- }
-
- c, ok := raw.(*httpSendCache)
- if !ok {
- return nil
- }
- return c
-}
-
-// checkHTTPSendCache checks for the given key's value in the cache
-func checkHTTPSendCache(bctx BuiltinContext, key ast.Object) (ast.Value, error) {
- requestCache := getHTTPSendCache(bctx)
- if requestCache == nil {
- return nil, nil
- }
-
- v := requestCache.get(key)
- if v != nil {
- if v.error != nil {
- return nil, v.error
- }
- if v.response != nil {
- return *v.response, nil
- }
- // This should never happen
- }
-
- return nil, nil
-}
-
-func insertIntoHTTPSendCache(bctx BuiltinContext, key ast.Object, value ast.Value) {
- requestCache := getHTTPSendCache(bctx)
- if requestCache == nil {
- // Should never happen.. if it does just skip caching the value
- // FIXME: return error instead, to prevent inconsistencies?
- return
- }
- requestCache.putResponse(key, &value)
-}
-
-func insertErrorIntoHTTPSendCache(bctx BuiltinContext, key ast.Object, err error) {
- requestCache := getHTTPSendCache(bctx)
- if requestCache == nil {
- // Should never happen.. if it does just skip caching the value
- // FIXME: return error instead, to prevent inconsistencies?
- return
- }
- requestCache.putError(key, err)
-}
-
-// checkHTTPSendInterQueryCache checks for the given key's value in the inter-query cache
-func (c *interQueryCache) checkHTTPSendInterQueryCache() (ast.Value, error) {
- requestCache := c.bctx.InterQueryBuiltinCache
-
- cachedValue, found := requestCache.Get(c.key)
- if !found {
- return nil, nil
- }
-
- value, cerr := requestCache.Clone(cachedValue)
- if cerr != nil {
- return nil, handleHTTPSendErr(c.bctx, cerr)
- }
-
- c.bctx.Metrics.Counter(httpSendInterQueryCacheHits).Incr()
- var cachedRespData *interQueryCacheData
-
- switch v := value.(type) {
- case *interQueryCacheValue:
- var err error
- cachedRespData, err = v.copyCacheData()
- if err != nil {
- return nil, err
- }
- case *interQueryCacheData:
- cachedRespData = v
- default:
- return nil, nil
- }
-
- if getCurrentTime(c.bctx).Before(cachedRespData.ExpiresAt) {
- return cachedRespData.formatToAST(c.forceJSONDecode, c.forceYAMLDecode)
- }
-
- var err error
- c.httpReq, c.httpClient, err = createHTTPRequest(c.bctx, c.key)
- if err != nil {
- return nil, handleHTTPSendErr(c.bctx, err)
- }
-
- headers := parseResponseHeaders(cachedRespData.Headers)
-
- // check with the server if the stale response is still up-to-date.
- // If server returns a new response (ie. status_code=200), update the cache with the new response
- // If server returns an unmodified response (ie. status_code=304), update the headers for the existing response
- result, modified, err := revalidateCachedResponse(c.httpReq, c.httpClient, c.key, headers)
- requestCache.Delete(c.key)
- if err != nil || result == nil {
- return nil, err
- }
-
- defer result.Body.Close()
-
- if !modified {
- // update the headers in the cached response with their corresponding values from the 304 (Not Modified) response
- for headerName, values := range result.Header {
- cachedRespData.Headers.Del(headerName)
- for _, v := range values {
- cachedRespData.Headers.Add(headerName, v)
- }
- }
-
- if forceCaching(c.forceCacheParams) {
- createdAt := getCurrentTime(c.bctx)
- cachedRespData.ExpiresAt = createdAt.Add(time.Second * time.Duration(c.forceCacheParams.forceCacheDurationSeconds))
- } else {
- expiresAt, err := expiryFromHeaders(result.Header)
- if err != nil {
- return nil, err
- }
- cachedRespData.ExpiresAt = expiresAt
- }
-
- cachingMode, err := getCachingMode(c.key)
- if err != nil {
- return nil, err
- }
-
- var pcv cache.InterQueryCacheValue
-
- if cachingMode == defaultCachingMode {
- pcv, err = cachedRespData.toCacheValue()
- if err != nil {
- return nil, err
- }
- } else {
- pcv = cachedRespData
- }
-
- c.bctx.InterQueryBuiltinCache.InsertWithExpiry(c.key, pcv, cachedRespData.ExpiresAt)
-
- return cachedRespData.formatToAST(c.forceJSONDecode, c.forceYAMLDecode)
- }
-
- newValue, respBody, err := formatHTTPResponseToAST(result, c.forceJSONDecode, c.forceYAMLDecode)
- if err != nil {
- return nil, err
- }
-
- if err := insertIntoHTTPSendInterQueryCache(c.bctx, c.key, result, respBody, c.forceCacheParams); err != nil {
- return nil, err
- }
-
- return newValue, nil
-}
-
-// insertIntoHTTPSendInterQueryCache inserts given key and value in the inter-query cache
-func insertIntoHTTPSendInterQueryCache(bctx BuiltinContext, key ast.Value, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) error {
- if resp == nil || (!forceCaching(cacheParams) && !canStore(resp.Header)) || !cacheableCodes.Contains(ast.IntNumberTerm(resp.StatusCode)) {
- return nil
- }
-
- requestCache := bctx.InterQueryBuiltinCache
-
- obj, ok := key.(ast.Object)
- if !ok {
- return fmt.Errorf("interface conversion error")
- }
-
- cachingMode, err := getCachingMode(obj)
- if err != nil {
- return err
- }
-
- var pcv cache.InterQueryCacheValue
- var pcvData *interQueryCacheData
- if cachingMode == defaultCachingMode {
- pcv, pcvData, err = newInterQueryCacheValue(bctx, resp, respBody, cacheParams)
- } else {
- pcvData, err = newInterQueryCacheData(bctx, resp, respBody, cacheParams)
- pcv = pcvData
- }
-
- if err != nil {
- return err
- }
-
- requestCache.InsertWithExpiry(key, pcv, pcvData.ExpiresAt)
- return nil
-}
-
-func createAllowedKeys() {
- for _, element := range allowedKeyNames {
- allowedKeys.Add(ast.StringTerm(element))
- }
-}
-
-func createCacheableHTTPStatusCodes() {
- for _, element := range cacheableHTTPStatusCodes {
- cacheableCodes.Add(ast.IntNumberTerm(element))
- }
-}
-
-func parseTimeout(timeoutVal ast.Value) (time.Duration, error) {
- var timeout time.Duration
- switch t := timeoutVal.(type) {
- case ast.Number:
- timeoutInt, ok := t.Int64()
- if !ok {
- return timeout, fmt.Errorf("invalid timeout number value %v, must be int64", timeoutVal)
- }
- return time.Duration(timeoutInt), nil
- case ast.String:
- // Support strings without a unit, treat them the same as just a number value (ns)
- var err error
- timeoutInt, err := strconv.ParseInt(string(t), 10, 64)
- if err == nil {
- return time.Duration(timeoutInt), nil
- }
-
- // Try parsing it as a duration (requires a supported units suffix)
- timeout, err = time.ParseDuration(string(t))
- if err != nil {
- return timeout, fmt.Errorf("invalid timeout value %v: %s", timeoutVal, err)
- }
- return timeout, nil
- default:
- return timeout, builtins.NewOperandErr(1, "'timeout' must be one of {string, number} but got %s", ast.TypeName(t))
- }
-}
-
-func getBoolValFromReqObj(req ast.Object, key *ast.Term) (bool, error) {
- var b ast.Boolean
- var ok bool
- if v := req.Get(key); v != nil {
- if b, ok = v.Value.(ast.Boolean); !ok {
- return false, fmt.Errorf("invalid value for %v field", key.String())
- }
- }
- return bool(b), nil
-}
-
-func getNumberValFromReqObj(req ast.Object, key *ast.Term) (int, error) {
- term := req.Get(key)
- if term == nil {
- return 0, nil
- }
-
- if t, ok := term.Value.(ast.Number); ok {
- num, ok := t.Int()
- if !ok || num < 0 {
- return 0, fmt.Errorf("invalid value %v for field %v", t.String(), key.String())
- }
- return num, nil
- }
-
- return 0, fmt.Errorf("invalid value %v for field %v", term.String(), key.String())
-}
-
-func getCachingMode(req ast.Object) (cachingMode, error) {
- key := ast.StringTerm("caching_mode")
- var s ast.String
- var ok bool
- if v := req.Get(key); v != nil {
- if s, ok = v.Value.(ast.String); !ok {
- return "", fmt.Errorf("invalid value for %v field", key.String())
- }
-
- switch cachingMode(s) {
- case defaultCachingMode, cachingModeDeserialized:
- return cachingMode(s), nil
- default:
- return "", fmt.Errorf("invalid value specified for %v field: %v", key.String(), string(s))
- }
- }
- return defaultCachingMode, nil
-}
-
-type interQueryCacheValue struct {
- Data []byte
-}
-
-func newInterQueryCacheValue(bctx BuiltinContext, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) (*interQueryCacheValue, *interQueryCacheData, error) {
- data, err := newInterQueryCacheData(bctx, resp, respBody, cacheParams)
- if err != nil {
- return nil, nil, err
- }
-
- b, err := json.Marshal(data)
- if err != nil {
- return nil, nil, err
- }
- return &interQueryCacheValue{Data: b}, data, nil
-}
-
-func (cb interQueryCacheValue) Clone() (cache.InterQueryCacheValue, error) {
- dup := make([]byte, len(cb.Data))
- copy(dup, cb.Data)
- return &interQueryCacheValue{Data: dup}, nil
-}
-
-func (cb interQueryCacheValue) SizeInBytes() int64 {
- return int64(len(cb.Data))
-}
-
-func (cb *interQueryCacheValue) copyCacheData() (*interQueryCacheData, error) {
- var res interQueryCacheData
- err := util.UnmarshalJSON(cb.Data, &res)
- if err != nil {
- return nil, err
- }
- return &res, nil
-}
-
-type interQueryCacheData struct {
- RespBody []byte
- Status string
- StatusCode int
- Headers http.Header
- ExpiresAt time.Time
-}
-
-func forceCaching(cacheParams *forceCacheParams) bool {
- return cacheParams != nil && cacheParams.forceCacheDurationSeconds > 0
-}
-
-func expiryFromHeaders(headers http.Header) (time.Time, error) {
- var expiresAt time.Time
- maxAge, err := parseMaxAgeCacheDirective(parseCacheControlHeader(headers))
- if err != nil {
- return time.Time{}, err
- }
- if maxAge != -1 {
- createdAt, err := getResponseHeaderDate(headers)
- if err != nil {
- return time.Time{}, err
- }
- expiresAt = createdAt.Add(time.Second * time.Duration(maxAge))
- } else {
- expiresAt = getResponseHeaderExpires(headers)
- }
- return expiresAt, nil
-}
-
-func newInterQueryCacheData(bctx BuiltinContext, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) (*interQueryCacheData, error) {
- var expiresAt time.Time
-
- if forceCaching(cacheParams) {
- createdAt := getCurrentTime(bctx)
- expiresAt = createdAt.Add(time.Second * time.Duration(cacheParams.forceCacheDurationSeconds))
- } else {
- var err error
- expiresAt, err = expiryFromHeaders(resp.Header)
- if err != nil {
- return nil, err
- }
- }
-
- cv := interQueryCacheData{
- ExpiresAt: expiresAt,
- RespBody: respBody,
- Status: resp.Status,
- StatusCode: resp.StatusCode,
- Headers: resp.Header}
-
- return &cv, nil
-}
-
-func (c *interQueryCacheData) formatToAST(forceJSONDecode, forceYAMLDecode bool) (ast.Value, error) {
- return prepareASTResult(c.Headers, forceJSONDecode, forceYAMLDecode, c.RespBody, c.Status, c.StatusCode)
-}
-
-func (c *interQueryCacheData) toCacheValue() (*interQueryCacheValue, error) {
- b, err := json.Marshal(c)
- if err != nil {
- return nil, err
- }
- return &interQueryCacheValue{Data: b}, nil
-}
-
-func (c *interQueryCacheData) SizeInBytes() int64 {
- return 0
-}
-
-func (c *interQueryCacheData) Clone() (cache.InterQueryCacheValue, error) {
- dup := make([]byte, len(c.RespBody))
- copy(dup, c.RespBody)
-
- return &interQueryCacheData{
- ExpiresAt: c.ExpiresAt,
- RespBody: dup,
- Status: c.Status,
- StatusCode: c.StatusCode,
- Headers: c.Headers.Clone()}, nil
-}
-
-type responseHeaders struct {
- etag string // identifier for a specific version of the response
- lastModified string // date and time response was last modified as per origin server
-}
-
-// deltaSeconds specifies a non-negative integer, representing
-// time in seconds: http://tools.ietf.org/html/rfc7234#section-1.2.1
-type deltaSeconds int32
-
-func parseResponseHeaders(headers http.Header) *responseHeaders {
- result := responseHeaders{}
-
- result.etag = headers.Get("etag")
-
- result.lastModified = headers.Get("last-modified")
-
- return &result
-}
-
-func revalidateCachedResponse(req *http.Request, client *http.Client, inputReqObj ast.Object, headers *responseHeaders) (*http.Response, bool, error) {
- etag := headers.etag
- lastModified := headers.lastModified
-
- if etag == "" && lastModified == "" {
- return nil, false, nil
- }
-
- cloneReq := req.Clone(req.Context())
-
- if etag != "" {
- cloneReq.Header.Set("if-none-match", etag)
- }
-
- if lastModified != "" {
- cloneReq.Header.Set("if-modified-since", lastModified)
- }
-
- response, err := executeHTTPRequest(cloneReq, client, inputReqObj)
- if err != nil {
- return nil, false, err
- }
-
- switch response.StatusCode {
- case http.StatusOK:
- return response, true, nil
-
- case http.StatusNotModified:
- return response, false, nil
- }
- util.Close(response)
- return nil, false, nil
-}
-
-func canStore(headers http.Header) bool {
- ccHeaders := parseCacheControlHeader(headers)
-
- // Check "no-store" cache directive
- // The "no-store" response directive indicates that a cache MUST NOT
- // store any part of either the immediate request or response.
- if _, ok := ccHeaders["no-store"]; ok {
- return false
- }
- return true
-}
-
-func getCurrentTime(bctx BuiltinContext) time.Time {
- var current time.Time
-
- value, err := ast.JSON(bctx.Time.Value)
- if err != nil {
- return current
- }
-
- valueNum, ok := value.(json.Number)
- if !ok {
- return current
- }
-
- valueNumInt, err := valueNum.Int64()
- if err != nil {
- return current
- }
-
- current = time.Unix(0, valueNumInt).UTC()
- return current
-}
-
-func parseCacheControlHeader(headers http.Header) map[string]string {
- ccDirectives := map[string]string{}
- ccHeader := headers.Get("cache-control")
-
- for _, part := range strings.Split(ccHeader, ",") {
- part = strings.Trim(part, " ")
- if part == "" {
- continue
- }
- if strings.ContainsRune(part, '=') {
- items := strings.Split(part, "=")
- if len(items) != 2 {
- continue
- }
- ccDirectives[strings.Trim(items[0], " ")] = strings.Trim(items[1], ",")
- } else {
- ccDirectives[part] = ""
- }
- }
-
- return ccDirectives
-}
-
-func getResponseHeaderDate(headers http.Header) (date time.Time, err error) {
- dateHeader := headers.Get("date")
- if dateHeader == "" {
- err = fmt.Errorf("no date header")
- return
- }
- return http.ParseTime(dateHeader)
-}
-
-func getResponseHeaderExpires(headers http.Header) time.Time {
- expiresHeader := headers.Get("expires")
- if expiresHeader == "" {
- return time.Time{}
- }
-
- date, err := http.ParseTime(expiresHeader)
- if err != nil {
- // servers can set `Expires: 0` which is an invalid date to indicate expired content
- return time.Time{}
- }
-
- return date
-}
-
-// parseMaxAgeCacheDirective parses the max-age directive expressed in delta-seconds as per
-// https://tools.ietf.org/html/rfc7234#section-1.2.1
-func parseMaxAgeCacheDirective(cc map[string]string) (deltaSeconds, error) {
- maxAge, ok := cc["max-age"]
- if !ok {
- return deltaSeconds(-1), nil
- }
-
- val, err := strconv.ParseUint(maxAge, 10, 32)
- if err != nil {
- if numError, ok := err.(*strconv.NumError); ok {
- if numError.Err == strconv.ErrRange {
- return deltaSeconds(math.MaxInt32), nil
- }
- }
- return deltaSeconds(-1), err
- }
-
- if val > math.MaxInt32 {
- return deltaSeconds(math.MaxInt32), nil
- }
- return deltaSeconds(val), nil
-}
-
-func formatHTTPResponseToAST(resp *http.Response, forceJSONDecode, forceYAMLDecode bool) (ast.Value, []byte, error) {
-
- resultRawBody, err := io.ReadAll(resp.Body)
- if err != nil {
- return nil, nil, err
- }
-
- resultObj, err := prepareASTResult(resp.Header, forceJSONDecode, forceYAMLDecode, resultRawBody, resp.Status, resp.StatusCode)
- if err != nil {
- return nil, nil, err
- }
-
- return resultObj, resultRawBody, nil
-}
-
-func prepareASTResult(headers http.Header, forceJSONDecode, forceYAMLDecode bool, body []byte, status string, statusCode int) (ast.Value, error) {
- var resultBody interface{}
-
- // If the response body cannot be JSON/YAML decoded,
- // an error will not be returned. Instead, the "body" field
- // in the result will be null.
- switch {
- case forceJSONDecode || isContentType(headers, "application/json"):
- _ = util.UnmarshalJSON(body, &resultBody)
- case forceYAMLDecode || isContentType(headers, "application/yaml", "application/x-yaml"):
- _ = util.Unmarshal(body, &resultBody)
- }
-
- result := make(map[string]interface{})
- result["status"] = status
- result["status_code"] = statusCode
- result["body"] = resultBody
- result["raw_body"] = string(body)
- result["headers"] = getResponseHeaders(headers)
-
- resultObj, err := ast.InterfaceToValue(result)
- if err != nil {
- return nil, err
- }
-
- return resultObj, nil
-}
-
-func getResponseHeaders(headers http.Header) map[string]interface{} {
- respHeaders := map[string]interface{}{}
- for headerName, values := range headers {
- var respValues []interface{}
- for _, v := range values {
- respValues = append(respValues, v)
- }
- respHeaders[strings.ToLower(headerName)] = respValues
- }
- return respHeaders
-}
-
-// httpRequestExecutor defines an interface for the http send cache
-type httpRequestExecutor interface {
- CheckCache() (ast.Value, error)
- InsertIntoCache(value *http.Response) (ast.Value, error)
- InsertErrorIntoCache(err error)
- ExecuteHTTPRequest() (*http.Response, error)
-}
-
-// newHTTPRequestExecutor returns a new HTTP request executor that wraps either an inter-query or
-// intra-query cache implementation
-func newHTTPRequestExecutor(bctx BuiltinContext, req ast.Object, key ast.Object) (httpRequestExecutor, error) {
- useInterQueryCache, forceCacheParams, err := useInterQueryCache(req)
- if err != nil {
- return nil, handleHTTPSendErr(bctx, err)
- }
-
- if useInterQueryCache && bctx.InterQueryBuiltinCache != nil {
- return newInterQueryCache(bctx, req, key, forceCacheParams)
- }
- return newIntraQueryCache(bctx, req, key)
-}
-
-type interQueryCache struct {
- bctx BuiltinContext
- req ast.Object
- key ast.Object
- httpReq *http.Request
- httpClient *http.Client
- forceJSONDecode bool
- forceYAMLDecode bool
- forceCacheParams *forceCacheParams
-}
-
-func newInterQueryCache(bctx BuiltinContext, req ast.Object, key ast.Object, forceCacheParams *forceCacheParams) (*interQueryCache, error) {
- return &interQueryCache{bctx: bctx, req: req, key: key, forceCacheParams: forceCacheParams}, nil
-}
-
-// CheckCache checks the cache for the value of the key set on this object
-func (c *interQueryCache) CheckCache() (ast.Value, error) {
- var err error
-
- // Checking the intra-query cache first ensures consistency of errors and HTTP responses within a query.
- resp, err := checkHTTPSendCache(c.bctx, c.key)
- if err != nil {
- return nil, err
- }
- if resp != nil {
- return resp, nil
- }
-
- c.forceJSONDecode, err = getBoolValFromReqObj(c.key, ast.StringTerm("force_json_decode"))
- if err != nil {
- return nil, handleHTTPSendErr(c.bctx, err)
- }
- c.forceYAMLDecode, err = getBoolValFromReqObj(c.key, ast.StringTerm("force_yaml_decode"))
- if err != nil {
- return nil, handleHTTPSendErr(c.bctx, err)
- }
-
- resp, err = c.checkHTTPSendInterQueryCache()
- // Always insert the result of the inter-query cache into the intra-query cache, to maintain consistency within the same query.
- if err != nil {
- insertErrorIntoHTTPSendCache(c.bctx, c.key, err)
- }
- if resp != nil {
- insertIntoHTTPSendCache(c.bctx, c.key, resp)
- }
- return resp, err
-}
-
-// InsertIntoCache inserts the key set on this object into the cache with the given value
-func (c *interQueryCache) InsertIntoCache(value *http.Response) (ast.Value, error) {
- result, respBody, err := formatHTTPResponseToAST(value, c.forceJSONDecode, c.forceYAMLDecode)
- if err != nil {
- return nil, handleHTTPSendErr(c.bctx, err)
- }
-
- // Always insert into the intra-query cache, to maintain consistency within the same query.
- insertIntoHTTPSendCache(c.bctx, c.key, result)
-
- // We ignore errors when populating the inter-query cache, because we've already populated the intra-cache,
- // and query consistency is our primary concern.
- _ = insertIntoHTTPSendInterQueryCache(c.bctx, c.key, value, respBody, c.forceCacheParams)
- return result, nil
-}
-
-func (c *interQueryCache) InsertErrorIntoCache(err error) {
- insertErrorIntoHTTPSendCache(c.bctx, c.key, err)
-}
-
-// ExecuteHTTPRequest executes a HTTP request
-func (c *interQueryCache) ExecuteHTTPRequest() (*http.Response, error) {
- var err error
- c.httpReq, c.httpClient, err = createHTTPRequest(c.bctx, c.req)
- if err != nil {
- return nil, handleHTTPSendErr(c.bctx, err)
- }
-
- return executeHTTPRequest(c.httpReq, c.httpClient, c.req)
-}
-
-type intraQueryCache struct {
- bctx BuiltinContext
- req ast.Object
- key ast.Object
-}
-
-func newIntraQueryCache(bctx BuiltinContext, req ast.Object, key ast.Object) (*intraQueryCache, error) {
- return &intraQueryCache{bctx: bctx, req: req, key: key}, nil
-}
-
-// CheckCache checks the cache for the value of the key set on this object
-func (c *intraQueryCache) CheckCache() (ast.Value, error) {
- return checkHTTPSendCache(c.bctx, c.key)
-}
-
-// InsertIntoCache inserts the key set on this object into the cache with the given value
-func (c *intraQueryCache) InsertIntoCache(value *http.Response) (ast.Value, error) {
- forceJSONDecode, err := getBoolValFromReqObj(c.key, ast.StringTerm("force_json_decode"))
- if err != nil {
- return nil, handleHTTPSendErr(c.bctx, err)
- }
- forceYAMLDecode, err := getBoolValFromReqObj(c.key, ast.StringTerm("force_yaml_decode"))
- if err != nil {
- return nil, handleHTTPSendErr(c.bctx, err)
- }
-
- result, _, err := formatHTTPResponseToAST(value, forceJSONDecode, forceYAMLDecode)
- if err != nil {
- return nil, handleHTTPSendErr(c.bctx, err)
- }
-
- if cacheableCodes.Contains(ast.IntNumberTerm(value.StatusCode)) {
- insertIntoHTTPSendCache(c.bctx, c.key, result)
- }
-
- return result, nil
-}
-
-func (c *intraQueryCache) InsertErrorIntoCache(err error) {
- insertErrorIntoHTTPSendCache(c.bctx, c.key, err)
-}
-
-// ExecuteHTTPRequest executes a HTTP request
-func (c *intraQueryCache) ExecuteHTTPRequest() (*http.Response, error) {
- httpReq, httpClient, err := createHTTPRequest(c.bctx, c.req)
- if err != nil {
- return nil, handleHTTPSendErr(c.bctx, err)
- }
- return executeHTTPRequest(httpReq, httpClient, c.req)
-}
-
-func useInterQueryCache(req ast.Object) (bool, *forceCacheParams, error) {
- value, err := getBoolValFromReqObj(req, ast.StringTerm("cache"))
- if err != nil {
- return false, nil, err
- }
-
- valueForceCache, err := getBoolValFromReqObj(req, ast.StringTerm("force_cache"))
- if err != nil {
- return false, nil, err
- }
-
- if valueForceCache {
- forceCacheParams, err := newForceCacheParams(req)
- return true, forceCacheParams, err
- }
-
- return value, nil, nil
-}
-
-type forceCacheParams struct {
- forceCacheDurationSeconds int32
-}
-
-func newForceCacheParams(req ast.Object) (*forceCacheParams, error) {
- term := req.Get(ast.StringTerm("force_cache_duration_seconds"))
- if term == nil {
- return nil, fmt.Errorf("'force_cache' set but 'force_cache_duration_seconds' parameter is missing")
- }
-
- forceCacheDurationSeconds := term.String()
-
- value, err := strconv.ParseInt(forceCacheDurationSeconds, 10, 32)
- if err != nil {
- return nil, err
- }
-
- return &forceCacheParams{forceCacheDurationSeconds: int32(value)}, nil
-}
-
-func getRaiseErrorValue(req ast.Object) (bool, error) {
- result := ast.Boolean(true)
- var ok bool
- if v := req.Get(ast.StringTerm("raise_error")); v != nil {
- if result, ok = v.Value.(ast.Boolean); !ok {
- return false, fmt.Errorf("invalid value for raise_error field")
- }
- }
- return bool(result), nil
-}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/instrumentation.go b/vendor/github.com/open-policy-agent/opa/topdown/instrumentation.go
index 6eacc338ef..845f8da612 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/instrumentation.go
+++ b/vendor/github.com/open-policy-agent/opa/topdown/instrumentation.go
@@ -4,60 +4,18 @@
package topdown
-import "github.com/open-policy-agent/opa/metrics"
-
-const (
- evalOpPlug = "eval_op_plug"
- evalOpResolve = "eval_op_resolve"
- evalOpRuleIndex = "eval_op_rule_index"
- evalOpBuiltinCall = "eval_op_builtin_call"
- evalOpVirtualCacheHit = "eval_op_virtual_cache_hit"
- evalOpVirtualCacheMiss = "eval_op_virtual_cache_miss"
- evalOpBaseCacheHit = "eval_op_base_cache_hit"
- evalOpBaseCacheMiss = "eval_op_base_cache_miss"
- evalOpComprehensionCacheSkip = "eval_op_comprehension_cache_skip"
- evalOpComprehensionCacheBuild = "eval_op_comprehension_cache_build"
- evalOpComprehensionCacheHit = "eval_op_comprehension_cache_hit"
- evalOpComprehensionCacheMiss = "eval_op_comprehension_cache_miss"
- partialOpSaveUnify = "partial_op_save_unify"
- partialOpSaveSetContains = "partial_op_save_set_contains"
- partialOpSaveSetContainsRec = "partial_op_save_set_contains_rec"
- partialOpCopyPropagation = "partial_op_copy_propagation"
+import (
+ "github.com/open-policy-agent/opa/v1/metrics"
+ v1 "github.com/open-policy-agent/opa/v1/topdown"
)
// Instrumentation implements helper functions to instrument query evaluation
// to diagnose performance issues. Instrumentation may be expensive in some
// cases, so it is disabled by default.
-type Instrumentation struct {
- m metrics.Metrics
-}
+type Instrumentation = v1.Instrumentation
// NewInstrumentation returns a new Instrumentation object. Performance
// diagnostics recorded on this Instrumentation object will stored in m.
func NewInstrumentation(m metrics.Metrics) *Instrumentation {
- return &Instrumentation{
- m: m,
- }
-}
-
-func (instr *Instrumentation) startTimer(name string) {
- if instr == nil {
- return
- }
- instr.m.Timer(name).Start()
-}
-
-func (instr *Instrumentation) stopTimer(name string) {
- if instr == nil {
- return
- }
- delta := instr.m.Timer(name).Stop()
- instr.m.Histogram(name).Update(delta)
-}
-
-func (instr *Instrumentation) counterIncr(name string) {
- if instr == nil {
- return
- }
- instr.m.Counter(name).Incr()
+ return v1.NewInstrumentation(m)
}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/print.go b/vendor/github.com/open-policy-agent/opa/topdown/print.go
index 765b344b3a..5eacd180d9 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/print.go
+++ b/vendor/github.com/open-policy-agent/opa/topdown/print.go
@@ -5,82 +5,12 @@
package topdown
import (
- "fmt"
"io"
- "strings"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
"github.com/open-policy-agent/opa/topdown/print"
+ v1 "github.com/open-policy-agent/opa/v1/topdown"
)
func NewPrintHook(w io.Writer) print.Hook {
- return printHook{w: w}
-}
-
-type printHook struct {
- w io.Writer
-}
-
-func (h printHook) Print(_ print.Context, msg string) error {
- _, err := fmt.Fprintln(h.w, msg)
- return err
-}
-
-func builtinPrint(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
-
- if bctx.PrintHook == nil {
- return iter(nil)
- }
-
- arr, err := builtins.ArrayOperand(operands[0].Value, 1)
- if err != nil {
- return err
- }
-
- buf := make([]string, arr.Len())
-
- err = builtinPrintCrossProductOperands(bctx, buf, arr, 0, func(buf []string) error {
- pctx := print.Context{
- Context: bctx.Context,
- Location: bctx.Location,
- }
- return bctx.PrintHook.Print(pctx, strings.Join(buf, " "))
- })
- if err != nil {
- return err
- }
-
- return iter(nil)
-}
-
-func builtinPrintCrossProductOperands(bctx BuiltinContext, buf []string, operands *ast.Array, i int, f func([]string) error) error {
-
- if i >= operands.Len() {
- return f(buf)
- }
-
- xs, ok := operands.Elem(i).Value.(ast.Set)
- if !ok {
- return Halt{Err: internalErr(bctx.Location, fmt.Sprintf("illegal argument type: %v", ast.TypeName(operands.Elem(i).Value)))}
- }
-
- if xs.Len() == 0 {
- buf[i] = ""
- return builtinPrintCrossProductOperands(bctx, buf, operands, i+1, f)
- }
-
- return xs.Iter(func(x *ast.Term) error {
- switch v := x.Value.(type) {
- case ast.String:
- buf[i] = string(v)
- default:
- buf[i] = v.String()
- }
- return builtinPrintCrossProductOperands(bctx, buf, operands, i+1, f)
- })
-}
-
-func init() {
- RegisterBuiltinFunc(ast.InternalPrint.Name, builtinPrint)
+ return v1.NewPrintHook(w)
}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/print/doc.go b/vendor/github.com/open-policy-agent/opa/topdown/print/doc.go
new file mode 100644
index 0000000000..c2ee0eca7f
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/print/doc.go
@@ -0,0 +1,8 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended.
+// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead.
+// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information.
+package print
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/print/print.go b/vendor/github.com/open-policy-agent/opa/topdown/print/print.go
index 0fb6abdca8..66ffbb176f 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/print/print.go
+++ b/vendor/github.com/open-policy-agent/opa/topdown/print/print.go
@@ -1,21 +1,14 @@
package print
import (
- "context"
-
- "github.com/open-policy-agent/opa/ast"
+ v1 "github.com/open-policy-agent/opa/v1/topdown/print"
)
// Context provides the Hook implementation context about the print() call.
-type Context struct {
- Context context.Context // request context passed when query executed
- Location *ast.Location // location of print call
-}
+type Context = v1.Context
// Hook defines the interface that callers can implement to receive print
// statement outputs. If the hook returns an error, it will be surfaced if
// strict builtin error checking is enabled (otherwise, it will not halt
// execution.)
-type Hook interface {
- Print(Context, string) error
-}
+type Hook = v1.Hook
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/query.go b/vendor/github.com/open-policy-agent/opa/topdown/query.go
index 8406cfdd87..d24060991f 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/query.go
+++ b/vendor/github.com/open-policy-agent/opa/topdown/query.go
@@ -1,599 +1,24 @@
package topdown
import (
- "context"
- "crypto/rand"
- "io"
- "sort"
- "time"
-
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/metrics"
- "github.com/open-policy-agent/opa/resolver"
- "github.com/open-policy-agent/opa/storage"
- "github.com/open-policy-agent/opa/topdown/builtins"
- "github.com/open-policy-agent/opa/topdown/cache"
- "github.com/open-policy-agent/opa/topdown/copypropagation"
- "github.com/open-policy-agent/opa/topdown/print"
- "github.com/open-policy-agent/opa/tracing"
+ "github.com/open-policy-agent/opa/v1/ast"
+ v1 "github.com/open-policy-agent/opa/v1/topdown"
)
// QueryResultSet represents a collection of results returned by a query.
-type QueryResultSet []QueryResult
+type QueryResultSet = v1.QueryResultSet
// QueryResult represents a single result returned by a query. The result
// contains bindings for all variables that appear in the query.
-type QueryResult map[ast.Var]*ast.Term
+type QueryResult = v1.QueryResult
// Query provides a configurable interface for performing query evaluation.
-type Query struct {
- seed io.Reader
- time time.Time
- cancel Cancel
- query ast.Body
- queryCompiler ast.QueryCompiler
- compiler *ast.Compiler
- store storage.Store
- txn storage.Transaction
- input *ast.Term
- external *resolverTrie
- tracers []QueryTracer
- plugTraceVars bool
- unknowns []*ast.Term
- partialNamespace string
- skipSaveNamespace bool
- metrics metrics.Metrics
- instr *Instrumentation
- disableInlining []ast.Ref
- shallowInlining bool
- genvarprefix string
- runtime *ast.Term
- builtins map[string]*Builtin
- indexing bool
- earlyExit bool
- interQueryBuiltinCache cache.InterQueryCache
- interQueryBuiltinValueCache cache.InterQueryValueCache
- ndBuiltinCache builtins.NDBCache
- strictBuiltinErrors bool
- builtinErrorList *[]Error
- strictObjects bool
- printHook print.Hook
- tracingOpts tracing.Options
- virtualCache VirtualCache
-}
+type Query = v1.Query
// Builtin represents a built-in function that queries can call.
-type Builtin struct {
- Decl *ast.Builtin
- Func BuiltinFunc
-}
+type Builtin = v1.Builtin
// NewQuery returns a new Query object that can be run.
func NewQuery(query ast.Body) *Query {
- return &Query{
- query: query,
- genvarprefix: ast.WildcardPrefix,
- indexing: true,
- earlyExit: true,
- external: newResolverTrie(),
- }
-}
-
-// WithQueryCompiler sets the queryCompiler used for the query.
-func (q *Query) WithQueryCompiler(queryCompiler ast.QueryCompiler) *Query {
- q.queryCompiler = queryCompiler
- return q
-}
-
-// WithCompiler sets the compiler to use for the query.
-func (q *Query) WithCompiler(compiler *ast.Compiler) *Query {
- q.compiler = compiler
- return q
-}
-
-// WithStore sets the store to use for the query.
-func (q *Query) WithStore(store storage.Store) *Query {
- q.store = store
- return q
-}
-
-// WithTransaction sets the transaction to use for the query. All queries
-// should be performed over a consistent snapshot of the storage layer.
-func (q *Query) WithTransaction(txn storage.Transaction) *Query {
- q.txn = txn
- return q
-}
-
-// WithCancel sets the cancellation object to use for the query. Set this if
-// you need to abort queries based on a deadline. This is optional.
-func (q *Query) WithCancel(cancel Cancel) *Query {
- q.cancel = cancel
- return q
-}
-
-// WithInput sets the input object to use for the query. References rooted at
-// input will be evaluated against this value. This is optional.
-func (q *Query) WithInput(input *ast.Term) *Query {
- q.input = input
- return q
-}
-
-// WithTracer adds a query tracer to use during evaluation. This is optional.
-// Deprecated: Use WithQueryTracer instead.
-func (q *Query) WithTracer(tracer Tracer) *Query {
- qt, ok := tracer.(QueryTracer)
- if !ok {
- qt = WrapLegacyTracer(tracer)
- }
- return q.WithQueryTracer(qt)
-}
-
-// WithQueryTracer adds a query tracer to use during evaluation. This is optional.
-// Disabled QueryTracers will be ignored.
-func (q *Query) WithQueryTracer(tracer QueryTracer) *Query {
- if !tracer.Enabled() {
- return q
- }
-
- q.tracers = append(q.tracers, tracer)
-
- // If *any* of the tracers require local variable metadata we need to
- // enabled plugging local trace variables.
- conf := tracer.Config()
- if conf.PlugLocalVars {
- q.plugTraceVars = true
- }
-
- return q
-}
-
-// WithMetrics sets the metrics collection to add evaluation metrics to. This
-// is optional.
-func (q *Query) WithMetrics(m metrics.Metrics) *Query {
- q.metrics = m
- return q
-}
-
-// WithInstrumentation sets the instrumentation configuration to enable on the
-// evaluation process. By default, instrumentation is turned off.
-func (q *Query) WithInstrumentation(instr *Instrumentation) *Query {
- q.instr = instr
- return q
-}
-
-// WithUnknowns sets the initial set of variables or references to treat as
-// unknown during query evaluation. This is required for partial evaluation.
-func (q *Query) WithUnknowns(terms []*ast.Term) *Query {
- q.unknowns = terms
- return q
-}
-
-// WithPartialNamespace sets the namespace to use for supporting rules
-// generated as part of the partial evaluation process. The ns value must be a
-// valid package path component.
-func (q *Query) WithPartialNamespace(ns string) *Query {
- q.partialNamespace = ns
- return q
-}
-
-// WithSkipPartialNamespace disables namespacing of saved support rules that are generated
-// from the original policy (rules which are completely synthetic are still namespaced.)
-func (q *Query) WithSkipPartialNamespace(yes bool) *Query {
- q.skipSaveNamespace = yes
- return q
-}
-
-// WithDisableInlining adds a set of paths to the query that should be excluded from
-// inlining. Inlining during partial evaluation can be expensive in some cases
-// (e.g., when a cross-product is computed.) Disabling inlining avoids expensive
-// computation at the cost of generating support rules.
-func (q *Query) WithDisableInlining(paths []ast.Ref) *Query {
- q.disableInlining = paths
- return q
-}
-
-// WithShallowInlining disables aggressive inlining performed during partial evaluation.
-// When shallow inlining is enabled rules that depend (transitively) on unknowns are not inlined.
-// Only rules/values that are completely known will be inlined.
-func (q *Query) WithShallowInlining(yes bool) *Query {
- q.shallowInlining = yes
- return q
-}
-
-// WithRuntime sets the runtime data to execute the query with. The runtime data
-// can be returned by the `opa.runtime` built-in function.
-func (q *Query) WithRuntime(runtime *ast.Term) *Query {
- q.runtime = runtime
- return q
-}
-
-// WithBuiltins adds a set of built-in functions that can be called by the
-// query.
-func (q *Query) WithBuiltins(builtins map[string]*Builtin) *Query {
- q.builtins = builtins
- return q
-}
-
-// WithIndexing will enable or disable using rule indexing for the evaluation
-// of the query. The default is enabled.
-func (q *Query) WithIndexing(enabled bool) *Query {
- q.indexing = enabled
- return q
-}
-
-// WithEarlyExit will enable or disable using 'early exit' for the evaluation
-// of the query. The default is enabled.
-func (q *Query) WithEarlyExit(enabled bool) *Query {
- q.earlyExit = enabled
- return q
-}
-
-// WithSeed sets a reader that will seed randomization required by built-in functions.
-// If a seed is not provided crypto/rand.Reader is used.
-func (q *Query) WithSeed(r io.Reader) *Query {
- q.seed = r
- return q
-}
-
-// WithTime sets the time that will be returned by the time.now_ns() built-in function.
-func (q *Query) WithTime(x time.Time) *Query {
- q.time = x
- return q
-}
-
-// WithInterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize.
-func (q *Query) WithInterQueryBuiltinCache(c cache.InterQueryCache) *Query {
- q.interQueryBuiltinCache = c
- return q
-}
-
-// WithInterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize.
-func (q *Query) WithInterQueryBuiltinValueCache(c cache.InterQueryValueCache) *Query {
- q.interQueryBuiltinValueCache = c
- return q
-}
-
-// WithNDBuiltinCache sets the non-deterministic builtin cache.
-func (q *Query) WithNDBuiltinCache(c builtins.NDBCache) *Query {
- q.ndBuiltinCache = c
- return q
-}
-
-// WithStrictBuiltinErrors tells the evaluator to treat all built-in function errors as fatal errors.
-func (q *Query) WithStrictBuiltinErrors(yes bool) *Query {
- q.strictBuiltinErrors = yes
- return q
-}
-
-// WithBuiltinErrorList supplies a pointer to an Error slice to store built-in function errors
-// encountered during evaluation. This error slice can be inspected after evaluation to determine
-// which built-in function errors occurred.
-func (q *Query) WithBuiltinErrorList(list *[]Error) *Query {
- q.builtinErrorList = list
- return q
-}
-
-// WithResolver configures an external resolver to use for the given ref.
-func (q *Query) WithResolver(ref ast.Ref, r resolver.Resolver) *Query {
- q.external.Put(ref, r)
- return q
-}
-
-func (q *Query) WithPrintHook(h print.Hook) *Query {
- q.printHook = h
- return q
-}
-
-// WithDistributedTracingOpts sets the options to be used by distributed tracing.
-func (q *Query) WithDistributedTracingOpts(tr tracing.Options) *Query {
- q.tracingOpts = tr
- return q
-}
-
-// WithStrictObjects tells the evaluator to avoid the "lazy object" optimization
-// applied when reading objects from the store. It will result in higher memory
-// usage and should only be used temporarily while adjusting code that breaks
-// because of the optimization.
-func (q *Query) WithStrictObjects(yes bool) *Query {
- q.strictObjects = yes
- return q
-}
-
-// WithVirtualCache sets the VirtualCache to use during evaluation. This is
-// optional, and if not set, the default cache is used.
-func (q *Query) WithVirtualCache(vc VirtualCache) *Query {
- q.virtualCache = vc
- return q
-}
-
-// PartialRun executes partial evaluation on the query with respect to unknown
-// values. Partial evaluation attempts to evaluate as much of the query as
-// possible without requiring values for the unknowns set on the query. The
-// result of partial evaluation is a new set of queries that can be evaluated
-// once the unknown value is known. In addition to new queries, partial
-// evaluation may produce additional support modules that should be used in
-// conjunction with the partially evaluated queries.
-func (q *Query) PartialRun(ctx context.Context) (partials []ast.Body, support []*ast.Module, err error) {
- if q.partialNamespace == "" {
- q.partialNamespace = "partial" // lazily initialize partial namespace
- }
- if q.seed == nil {
- q.seed = rand.Reader
- }
- if !q.time.IsZero() {
- q.time = time.Now()
- }
- if q.metrics == nil {
- q.metrics = metrics.New()
- }
-
- f := &queryIDFactory{}
- b := newBindings(0, q.instr)
-
- var vc VirtualCache
- if q.virtualCache != nil {
- vc = q.virtualCache
- } else {
- vc = NewVirtualCache()
- }
-
- e := &eval{
- ctx: ctx,
- metrics: q.metrics,
- seed: q.seed,
- time: ast.NumberTerm(int64ToJSONNumber(q.time.UnixNano())),
- cancel: q.cancel,
- query: q.query,
- queryCompiler: q.queryCompiler,
- queryIDFact: f,
- queryID: f.Next(),
- bindings: b,
- compiler: q.compiler,
- store: q.store,
- baseCache: newBaseCache(),
- targetStack: newRefStack(),
- txn: q.txn,
- input: q.input,
- external: q.external,
- tracers: q.tracers,
- traceEnabled: len(q.tracers) > 0,
- plugTraceVars: q.plugTraceVars,
- instr: q.instr,
- builtins: q.builtins,
- builtinCache: builtins.Cache{},
- functionMocks: newFunctionMocksStack(),
- interQueryBuiltinCache: q.interQueryBuiltinCache,
- interQueryBuiltinValueCache: q.interQueryBuiltinValueCache,
- ndBuiltinCache: q.ndBuiltinCache,
- virtualCache: vc,
- comprehensionCache: newComprehensionCache(),
- saveSet: newSaveSet(q.unknowns, b, q.instr),
- saveStack: newSaveStack(),
- saveSupport: newSaveSupport(),
- saveNamespace: ast.StringTerm(q.partialNamespace),
- skipSaveNamespace: q.skipSaveNamespace,
- inliningControl: &inliningControl{
- shallow: q.shallowInlining,
- },
- genvarprefix: q.genvarprefix,
- runtime: q.runtime,
- indexing: q.indexing,
- earlyExit: q.earlyExit,
- builtinErrors: &builtinErrors{},
- printHook: q.printHook,
- strictObjects: q.strictObjects,
- }
-
- if len(q.disableInlining) > 0 {
- e.inliningControl.PushDisable(q.disableInlining, false)
- }
-
- e.caller = e
- q.metrics.Timer(metrics.RegoPartialEval).Start()
- defer q.metrics.Timer(metrics.RegoPartialEval).Stop()
-
- livevars := ast.NewVarSet()
- for _, t := range q.unknowns {
- switch v := t.Value.(type) {
- case ast.Var:
- livevars.Add(v)
- case ast.Ref:
- livevars.Add(v[0].Value.(ast.Var))
- }
- }
-
- ast.WalkVars(q.query, func(x ast.Var) bool {
- if !x.IsGenerated() {
- livevars.Add(x)
- }
- return false
- })
-
- p := copypropagation.New(livevars).WithCompiler(q.compiler)
-
- err = e.Run(func(e *eval) error {
-
- // Build output from saved expressions.
- body := ast.NewBody()
-
- for _, elem := range e.saveStack.Stack[len(e.saveStack.Stack)-1] {
- body.Append(elem.Plug(e.bindings))
- }
-
- // Include bindings as exprs so that when caller evals the result, they
- // can obtain values for the vars in their query.
- bindingExprs := []*ast.Expr{}
- _ = e.bindings.Iter(e.bindings, func(a, b *ast.Term) error {
- bindingExprs = append(bindingExprs, ast.Equality.Expr(a, b))
- return nil
- }) // cannot return error
-
- // Sort binding expressions so that results are deterministic.
- sort.Slice(bindingExprs, func(i, j int) bool {
- return bindingExprs[i].Compare(bindingExprs[j]) < 0
- })
-
- for i := range bindingExprs {
- body.Append(bindingExprs[i])
- }
-
- // Skip this rule body if it fails to type-check.
- // Type-checking failure means the rule body will never succeed.
- if !e.compiler.PassesTypeCheck(body) {
- return nil
- }
-
- if !q.shallowInlining {
- body = applyCopyPropagation(p, e.instr, body)
- }
-
- partials = append(partials, body)
- return nil
- })
-
- support = e.saveSupport.List()
-
- if len(e.builtinErrors.errs) > 0 {
- if q.strictBuiltinErrors {
- err = e.builtinErrors.errs[0]
- } else if q.builtinErrorList != nil {
- // If a builtinErrorList has been supplied, we must use pointer indirection
- // to append to it. builtinErrorList is a slice pointer so that errors can be
- // appended to it without returning a new slice and changing the interface
- // of PartialRun.
- for _, err := range e.builtinErrors.errs {
- if tdError, ok := err.(*Error); ok {
- *(q.builtinErrorList) = append(*(q.builtinErrorList), *tdError)
- } else {
- *(q.builtinErrorList) = append(*(q.builtinErrorList), Error{
- Code: BuiltinErr,
- Message: err.Error(),
- })
- }
- }
- }
- }
-
- for i := range support {
- sort.Slice(support[i].Rules, func(j, k int) bool {
- return support[i].Rules[j].Compare(support[i].Rules[k]) < 0
- })
- }
-
- return partials, support, err
-}
-
-// Run is a wrapper around Iter that accumulates query results and returns them
-// in one shot.
-func (q *Query) Run(ctx context.Context) (QueryResultSet, error) {
- qrs := QueryResultSet{}
- return qrs, q.Iter(ctx, func(qr QueryResult) error {
- qrs = append(qrs, qr)
- return nil
- })
-}
-
-// Iter executes the query and invokes the iter function with query results
-// produced by evaluating the query.
-func (q *Query) Iter(ctx context.Context, iter func(QueryResult) error) error {
- // Query evaluation must not be allowed if the compiler has errors and is in an undefined, possibly inconsistent state
- if q.compiler != nil && len(q.compiler.Errors) > 0 {
- return &Error{
- Code: InternalErr,
- Message: "compiler has errors",
- }
- }
-
- if q.seed == nil {
- q.seed = rand.Reader
- }
- if q.time.IsZero() {
- q.time = time.Now()
- }
- if q.metrics == nil {
- q.metrics = metrics.New()
- }
-
- f := &queryIDFactory{}
-
- var vc VirtualCache
- if q.virtualCache != nil {
- vc = q.virtualCache
- } else {
- vc = NewVirtualCache()
- }
-
- e := &eval{
- ctx: ctx,
- metrics: q.metrics,
- seed: q.seed,
- time: ast.NumberTerm(int64ToJSONNumber(q.time.UnixNano())),
- cancel: q.cancel,
- query: q.query,
- queryCompiler: q.queryCompiler,
- queryIDFact: f,
- queryID: f.Next(),
- bindings: newBindings(0, q.instr),
- compiler: q.compiler,
- store: q.store,
- baseCache: newBaseCache(),
- targetStack: newRefStack(),
- txn: q.txn,
- input: q.input,
- external: q.external,
- tracers: q.tracers,
- traceEnabled: len(q.tracers) > 0,
- plugTraceVars: q.plugTraceVars,
- instr: q.instr,
- builtins: q.builtins,
- builtinCache: builtins.Cache{},
- functionMocks: newFunctionMocksStack(),
- interQueryBuiltinCache: q.interQueryBuiltinCache,
- interQueryBuiltinValueCache: q.interQueryBuiltinValueCache,
- ndBuiltinCache: q.ndBuiltinCache,
- virtualCache: vc,
- comprehensionCache: newComprehensionCache(),
- genvarprefix: q.genvarprefix,
- runtime: q.runtime,
- indexing: q.indexing,
- earlyExit: q.earlyExit,
- builtinErrors: &builtinErrors{},
- printHook: q.printHook,
- tracingOpts: q.tracingOpts,
- strictObjects: q.strictObjects,
- }
- e.caller = e
- q.metrics.Timer(metrics.RegoQueryEval).Start()
- err := e.Run(func(e *eval) error {
- qr := QueryResult{}
- _ = e.bindings.Iter(nil, func(k, v *ast.Term) error {
- qr[k.Value.(ast.Var)] = v
- return nil
- }) // cannot return error
- return iter(qr)
- })
-
- if len(e.builtinErrors.errs) > 0 {
- if q.strictBuiltinErrors {
- err = e.builtinErrors.errs[0]
- } else if q.builtinErrorList != nil {
- // If a builtinErrorList has been supplied, we must use pointer indirection
- // to append to it. builtinErrorList is a slice pointer so that errors can be
- // appended to it without returning a new slice and changing the interface
- // of Iter.
- for _, err := range e.builtinErrors.errs {
- if tdError, ok := err.(*Error); ok {
- *(q.builtinErrorList) = append(*(q.builtinErrorList), *tdError)
- } else {
- *(q.builtinErrorList) = append(*(q.builtinErrorList), Error{
- Code: BuiltinErr,
- Message: err.Error(),
- })
- }
- }
- }
- }
-
- q.metrics.Timer(metrics.RegoQueryEval).Stop()
- return err
+ return v1.NewQuery(query)
}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/trace.go b/vendor/github.com/open-policy-agent/opa/topdown/trace.go
index 277c94b626..4d4cc295e2 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/trace.go
+++ b/vendor/github.com/open-policy-agent/opa/topdown/trace.go
@@ -5,898 +5,108 @@
package topdown
import (
- "bytes"
- "fmt"
"io"
- "slices"
- "strings"
- iStrs "github.com/open-policy-agent/opa/internal/strings"
-
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
-)
-
-const (
- minLocationWidth = 5 // len("query")
- maxIdealLocationWidth = 64
- columnPadding = 4
- maxExprVarWidth = 32
- maxPrettyExprVarWidth = 64
+ v1 "github.com/open-policy-agent/opa/v1/topdown"
)
// Op defines the types of tracing events.
-type Op string
+type Op = v1.Op
const (
// EnterOp is emitted when a new query is about to be evaluated.
- EnterOp Op = "Enter"
+ EnterOp = v1.EnterOp
// ExitOp is emitted when a query has evaluated to true.
- ExitOp Op = "Exit"
+ ExitOp = v1.ExitOp
// EvalOp is emitted when an expression is about to be evaluated.
- EvalOp Op = "Eval"
+ EvalOp = v1.EvalOp
// RedoOp is emitted when an expression, rule, or query is being re-evaluated.
- RedoOp Op = "Redo"
+ RedoOp = v1.RedoOp
// SaveOp is emitted when an expression is saved instead of evaluated
// during partial evaluation.
- SaveOp Op = "Save"
+ SaveOp = v1.SaveOp
// FailOp is emitted when an expression evaluates to false.
- FailOp Op = "Fail"
+ FailOp = v1.FailOp
// DuplicateOp is emitted when a query has produced a duplicate value. The search
// will stop at the point where the duplicate was emitted and backtrack.
- DuplicateOp Op = "Duplicate"
+ DuplicateOp = v1.DuplicateOp
// NoteOp is emitted when an expression invokes a tracing built-in function.
- NoteOp Op = "Note"
+ NoteOp = v1.NoteOp
// IndexOp is emitted during an expression evaluation to represent lookup
// matches.
- IndexOp Op = "Index"
+ IndexOp = v1.IndexOp
// WasmOp is emitted when resolving a ref using an external
// Resolver.
- WasmOp Op = "Wasm"
+ WasmOp = v1.WasmOp
// UnifyOp is emitted when two terms are unified. Node will be set to an
// equality expression with the two terms. This Node will not have location
// info.
- UnifyOp Op = "Unify"
- FailedAssertionOp Op = "FailedAssertion"
+ UnifyOp = v1.UnifyOp
+ FailedAssertionOp = v1.FailedAssertionOp
)
// VarMetadata provides some user facing information about
// a variable in some policy.
-type VarMetadata struct {
- Name ast.Var `json:"name"`
- Location *ast.Location `json:"location"`
-}
+type VarMetadata = v1.VarMetadata
// Event contains state associated with a tracing event.
-type Event struct {
- Op Op // Identifies type of event.
- Node ast.Node // Contains AST node relevant to the event.
- Location *ast.Location // The location of the Node this event relates to.
- QueryID uint64 // Identifies the query this event belongs to.
- ParentID uint64 // Identifies the parent query this event belongs to.
- Locals *ast.ValueMap // Contains local variable bindings from the query context. Nil if variables were not included in the trace event.
- LocalMetadata map[ast.Var]VarMetadata // Contains metadata for the local variable bindings. Nil if variables were not included in the trace event.
- Message string // Contains message for Note events.
- Ref *ast.Ref // Identifies the subject ref for the event. Only applies to Index and Wasm operations.
-
- input *ast.Term
- bindings *bindings
- localVirtualCacheSnapshot *ast.ValueMap
-}
-
-func (evt *Event) WithInput(input *ast.Term) *Event {
- evt.input = input
- return evt
-}
-
-// HasRule returns true if the Event contains an ast.Rule.
-func (evt *Event) HasRule() bool {
- _, ok := evt.Node.(*ast.Rule)
- return ok
-}
-
-// HasBody returns true if the Event contains an ast.Body.
-func (evt *Event) HasBody() bool {
- _, ok := evt.Node.(ast.Body)
- return ok
-}
-
-// HasExpr returns true if the Event contains an ast.Expr.
-func (evt *Event) HasExpr() bool {
- _, ok := evt.Node.(*ast.Expr)
- return ok
-}
-
-// Equal returns true if this event is equal to the other event.
-func (evt *Event) Equal(other *Event) bool {
- if evt.Op != other.Op {
- return false
- }
- if evt.QueryID != other.QueryID {
- return false
- }
- if evt.ParentID != other.ParentID {
- return false
- }
- if !evt.equalNodes(other) {
- return false
- }
- return evt.Locals.Equal(other.Locals)
-}
-
-func (evt *Event) String() string {
- return fmt.Sprintf("%v %v %v (qid=%v, pqid=%v)", evt.Op, evt.Node, evt.Locals, evt.QueryID, evt.ParentID)
-}
-
-// Input returns the input object as it was at the event.
-func (evt *Event) Input() *ast.Term {
- return evt.input
-}
-
-// Plug plugs event bindings into the provided ast.Term. Because bindings are mutable, this only makes sense to do when
-// the event is emitted rather than on recorded trace events as the bindings are going to be different by then.
-func (evt *Event) Plug(term *ast.Term) *ast.Term {
- return evt.bindings.Plug(term)
-}
-
-func (evt *Event) equalNodes(other *Event) bool {
- switch a := evt.Node.(type) {
- case ast.Body:
- if b, ok := other.Node.(ast.Body); ok {
- return a.Equal(b)
- }
- case *ast.Rule:
- if b, ok := other.Node.(*ast.Rule); ok {
- return a.Equal(b)
- }
- case *ast.Expr:
- if b, ok := other.Node.(*ast.Expr); ok {
- return a.Equal(b)
- }
- case nil:
- return other.Node == nil
- }
- return false
-}
+type Event = v1.Event
// Tracer defines the interface for tracing in the top-down evaluation engine.
// Deprecated: Use QueryTracer instead.
-type Tracer interface {
- Enabled() bool
- Trace(*Event)
-}
+type Tracer = v1.Tracer
// QueryTracer defines the interface for tracing in the top-down evaluation engine.
// The implementation can provide additional configuration to modify the tracing
// behavior for query evaluations.
-type QueryTracer interface {
- Enabled() bool
- TraceEvent(Event)
- Config() TraceConfig
-}
+type QueryTracer = v1.QueryTracer
// TraceConfig defines some common configuration for Tracer implementations
-type TraceConfig struct {
- PlugLocalVars bool // Indicate whether to plug local variable bindings before calling into the tracer.
-}
-
-// legacyTracer Implements the QueryTracer interface by wrapping an older Tracer instance.
-type legacyTracer struct {
- t Tracer
-}
-
-func (l *legacyTracer) Enabled() bool {
- return l.t.Enabled()
-}
-
-func (l *legacyTracer) Config() TraceConfig {
- return TraceConfig{
- PlugLocalVars: true, // For backwards compatibility old tracers will plug local variables
- }
-}
-
-func (l *legacyTracer) TraceEvent(evt Event) {
- l.t.Trace(&evt)
-}
+type TraceConfig = v1.TraceConfig
// WrapLegacyTracer will create a new QueryTracer which wraps an
// older Tracer instance.
func WrapLegacyTracer(tracer Tracer) QueryTracer {
- return &legacyTracer{t: tracer}
+ return v1.WrapLegacyTracer(tracer)
}
// BufferTracer implements the Tracer and QueryTracer interface by
// simply buffering all events received.
-type BufferTracer []*Event
+type BufferTracer = v1.BufferTracer
// NewBufferTracer returns a new BufferTracer.
func NewBufferTracer() *BufferTracer {
- return &BufferTracer{}
-}
-
-// Enabled always returns true if the BufferTracer is instantiated.
-func (b *BufferTracer) Enabled() bool {
- return b != nil
-}
-
-// Trace adds the event to the buffer.
-// Deprecated: Use TraceEvent instead.
-func (b *BufferTracer) Trace(evt *Event) {
- *b = append(*b, evt)
-}
-
-// TraceEvent adds the event to the buffer.
-func (b *BufferTracer) TraceEvent(evt Event) {
- *b = append(*b, &evt)
-}
-
-// Config returns the Tracers standard configuration
-func (b *BufferTracer) Config() TraceConfig {
- return TraceConfig{PlugLocalVars: true}
+ return v1.NewBufferTracer()
}
// PrettyTrace pretty prints the trace to the writer.
func PrettyTrace(w io.Writer, trace []*Event) {
- PrettyTraceWithOpts(w, trace, PrettyTraceOptions{})
+ v1.PrettyTrace(w, trace)
}
// PrettyTraceWithLocation prints the trace to the writer and includes location information
func PrettyTraceWithLocation(w io.Writer, trace []*Event) {
- PrettyTraceWithOpts(w, trace, PrettyTraceOptions{Locations: true})
-}
-
-type PrettyTraceOptions struct {
- Locations bool // Include location information
- ExprVariables bool // Include variables found in the expression
- LocalVariables bool // Include all local variables
-}
-
-type traceRow []string
-
-func (r *traceRow) add(s string) {
- *r = append(*r, s)
-}
-
-type traceTable struct {
- rows []traceRow
- maxWidths []int
+ v1.PrettyTraceWithLocation(w, trace)
}
-func (t *traceTable) add(row traceRow) {
- t.rows = append(t.rows, row)
- for i := range row {
- if i >= len(t.maxWidths) {
- t.maxWidths = append(t.maxWidths, len(row[i]))
- } else if len(row[i]) > t.maxWidths[i] {
- t.maxWidths[i] = len(row[i])
- }
- }
-}
-
-func (t *traceTable) write(w io.Writer, padding int) {
- for _, row := range t.rows {
- for i, cell := range row {
- width := t.maxWidths[i] + padding
- if i < len(row)-1 {
- _, _ = fmt.Fprintf(w, "%-*s ", width, cell)
- } else {
- _, _ = fmt.Fprintf(w, "%s", cell)
- }
- }
- _, _ = fmt.Fprintln(w)
- }
-}
+type PrettyTraceOptions = v1.PrettyTraceOptions
func PrettyTraceWithOpts(w io.Writer, trace []*Event, opts PrettyTraceOptions) {
- depths := depths{}
-
- // FIXME: Can we shorten each location as we process each trace event instead of beforehand?
- filePathAliases, _ := getShortenedFileNames(trace)
-
- table := traceTable{}
-
- for _, event := range trace {
- depth := depths.GetOrSet(event.QueryID, event.ParentID)
- row := traceRow{}
-
- if opts.Locations {
- location := formatLocation(event, filePathAliases)
- row.add(location)
- }
-
- row.add(formatEvent(event, depth))
-
- if opts.ExprVariables {
- vars := exprLocalVars(event)
- keys := sortedKeys(vars)
-
- buf := new(bytes.Buffer)
- buf.WriteString("{")
- for i, k := range keys {
- if i > 0 {
- buf.WriteString(", ")
- }
- _, _ = fmt.Fprintf(buf, "%v: %s", k, iStrs.Truncate(vars.Get(k).String(), maxExprVarWidth))
- }
- buf.WriteString("}")
- row.add(buf.String())
- }
-
- if opts.LocalVariables {
- if locals := event.Locals; locals != nil {
- keys := sortedKeys(locals)
-
- buf := new(bytes.Buffer)
- buf.WriteString("{")
- for i, k := range keys {
- if i > 0 {
- buf.WriteString(", ")
- }
- _, _ = fmt.Fprintf(buf, "%v: %s", k, iStrs.Truncate(locals.Get(k).String(), maxExprVarWidth))
- }
- buf.WriteString("}")
- row.add(buf.String())
- } else {
- row.add("{}")
- }
- }
-
- table.add(row)
- }
-
- table.write(w, columnPadding)
-}
-
-func sortedKeys(vm *ast.ValueMap) []ast.Value {
- keys := make([]ast.Value, 0, vm.Len())
- vm.Iter(func(k, _ ast.Value) bool {
- keys = append(keys, k)
- return false
- })
- slices.SortFunc(keys, func(a, b ast.Value) int {
- return strings.Compare(a.String(), b.String())
- })
- return keys
-}
-
-func exprLocalVars(e *Event) *ast.ValueMap {
- vars := ast.NewValueMap()
-
- findVars := func(term *ast.Term) bool {
- //if r, ok := term.Value.(ast.Ref); ok {
- // fmt.Printf("ref: %v\n", r)
- // //return true
- //}
- if name, ok := term.Value.(ast.Var); ok {
- if meta, ok := e.LocalMetadata[name]; ok {
- if val := e.Locals.Get(name); val != nil {
- vars.Put(meta.Name, val)
- }
- }
- }
- return false
- }
-
- if r, ok := e.Node.(*ast.Rule); ok {
- // We're only interested in vars in the head, not the body
- ast.WalkTerms(r.Head, findVars)
- return vars
- }
-
- // The local cache snapshot only contains a snapshot for those refs present in the event node,
- // so they can all be added to the vars map.
- e.localVirtualCacheSnapshot.Iter(func(k, v ast.Value) bool {
- vars.Put(k, v)
- return false
- })
-
- ast.WalkTerms(e.Node, findVars)
-
- return vars
-}
-
-func formatEvent(event *Event, depth int) string {
- padding := formatEventPadding(event, depth)
- if event.Op == NoteOp {
- return fmt.Sprintf("%v%v %q", padding, event.Op, event.Message)
- }
-
- var details interface{}
- if node, ok := event.Node.(*ast.Rule); ok {
- details = node.Path()
- } else if event.Ref != nil {
- details = event.Ref
- } else {
- details = rewrite(event).Node
- }
-
- template := "%v%v %v"
- opts := []interface{}{padding, event.Op, details}
-
- if event.Message != "" {
- template += " %v"
- opts = append(opts, event.Message)
- }
-
- return fmt.Sprintf(template, opts...)
+ v1.PrettyTraceWithOpts(w, trace, opts)
}
-func formatEventPadding(event *Event, depth int) string {
- spaces := formatEventSpaces(event, depth)
- if spaces > 1 {
- return strings.Repeat("| ", spaces-1)
- }
- return ""
-}
-
-func formatEventSpaces(event *Event, depth int) int {
- switch event.Op {
- case EnterOp:
- return depth
- case RedoOp:
- if _, ok := event.Node.(*ast.Expr); !ok {
- return depth
- }
- }
- return depth + 1
-}
-
-// getShortenedFileNames will return a map of file paths to shortened aliases
-// that were found in the trace. It also returns the longest location expected
-func getShortenedFileNames(trace []*Event) (map[string]string, int) {
- // Get a deduplicated list of all file paths
- // and the longest file path size
- fpAliases := map[string]string{}
- var canShorten []string
- longestLocation := 0
- for _, event := range trace {
- if event.Location != nil {
- if event.Location.File != "" {
- // length of ":"
- curLen := len(event.Location.File) + numDigits10(event.Location.Row) + 1
- if curLen > longestLocation {
- longestLocation = curLen
- }
-
- if _, ok := fpAliases[event.Location.File]; ok {
- continue
- }
-
- canShorten = append(canShorten, event.Location.File)
-
- // Default to just alias their full path
- fpAliases[event.Location.File] = event.Location.File
- } else {
- // length of ":"
- curLen := minLocationWidth + numDigits10(event.Location.Row) + 1
- if curLen > longestLocation {
- longestLocation = curLen
- }
- }
- }
- }
-
- if len(canShorten) > 0 && longestLocation > maxIdealLocationWidth {
- fpAliases, longestLocation = iStrs.TruncateFilePaths(maxIdealLocationWidth, longestLocation, canShorten...)
- }
-
- return fpAliases, longestLocation
-}
-
-func numDigits10(n int) int {
- if n < 10 {
- return 1
- }
- return numDigits10(n/10) + 1
-}
-
-func formatLocation(event *Event, fileAliases map[string]string) string {
-
- location := event.Location
- if location == nil {
- return ""
- }
-
- if location.File == "" {
- return fmt.Sprintf("query:%v", location.Row)
- }
-
- return fmt.Sprintf("%v:%v", fileAliases[location.File], location.Row)
-}
-
-// depths is a helper for computing the depth of an event. Events within the
-// same query all have the same depth. The depth of query is
-// depth(parent(query))+1.
-type depths map[uint64]int
-
-func (ds depths) GetOrSet(qid uint64, pqid uint64) int {
- depth := ds[qid]
- if depth == 0 {
- depth = ds[pqid]
- depth++
- ds[qid] = depth
- }
- return depth
-}
-
-func builtinTrace(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
-
- str, err := builtins.StringOperand(operands[0].Value, 1)
- if err != nil {
- return handleBuiltinErr(ast.Trace.Name, bctx.Location, err)
- }
-
- if !bctx.TraceEnabled {
- return iter(ast.BooleanTerm(true))
- }
-
- evt := Event{
- Op: NoteOp,
- Location: bctx.Location,
- QueryID: bctx.QueryID,
- ParentID: bctx.ParentID,
- Message: string(str),
- }
-
- for i := range bctx.QueryTracers {
- bctx.QueryTracers[i].TraceEvent(evt)
- }
-
- return iter(ast.BooleanTerm(true))
-}
-
-func rewrite(event *Event) *Event {
-
- cpy := *event
-
- var node ast.Node
-
- switch v := event.Node.(type) {
- case *ast.Expr:
- expr := v.Copy()
-
- // Hide generated local vars in 'key' position that have not been
- // rewritten.
- if ev, ok := v.Terms.(*ast.Every); ok {
- if kv, ok := ev.Key.Value.(ast.Var); ok {
- if rw, ok := cpy.LocalMetadata[kv]; !ok || rw.Name.IsGenerated() {
- expr.Terms.(*ast.Every).Key = nil
- }
- }
- }
- node = expr
- case ast.Body:
- node = v.Copy()
- case *ast.Rule:
- node = v.Copy()
- }
-
- _, _ = ast.TransformVars(node, func(v ast.Var) (ast.Value, error) {
- if meta, ok := cpy.LocalMetadata[v]; ok {
- return meta.Name, nil
- }
- return v, nil
- })
-
- cpy.Node = node
-
- return &cpy
-}
-
-type varInfo struct {
- VarMetadata
- val ast.Value
- exprLoc *ast.Location
- col int // 0-indexed column
-}
-
-func (v varInfo) Value() string {
- if v.val != nil {
- return v.val.String()
- }
- return "undefined"
-}
-
-func (v varInfo) Title() string {
- if v.exprLoc != nil && v.exprLoc.Text != nil {
- return string(v.exprLoc.Text)
- }
- return string(v.Name)
-}
-
-func padLocationText(loc *ast.Location) string {
- if loc == nil {
- return ""
- }
-
- text := string(loc.Text)
-
- if loc.Col == 0 {
- return text
- }
-
- buf := new(bytes.Buffer)
- j := 0
- for i := 1; i < loc.Col; i++ {
- if len(loc.Tabs) > 0 && j < len(loc.Tabs) && loc.Tabs[j] == i {
- buf.WriteString("\t")
- j++
- } else {
- buf.WriteString(" ")
- }
- }
-
- buf.WriteString(text)
- return buf.String()
-}
-
-type PrettyEventOpts struct {
- PrettyVars bool
-}
-
-func walkTestTerms(x interface{}, f func(*ast.Term) bool) {
- var vis *ast.GenericVisitor
- vis = ast.NewGenericVisitor(func(x interface{}) bool {
- switch x := x.(type) {
- case ast.Call:
- for _, t := range x[1:] {
- vis.Walk(t)
- }
- return true
- case *ast.Expr:
- if x.IsCall() {
- for _, o := range x.Operands() {
- vis.Walk(o)
- }
- for i := range x.With {
- vis.Walk(x.With[i])
- }
- return true
- }
- case *ast.Term:
- return f(x)
- case *ast.With:
- vis.Walk(x.Value)
- return true
- }
- return false
- })
- vis.Walk(x)
-}
+type PrettyEventOpts = v1.PrettyEventOpts
func PrettyEvent(w io.Writer, e *Event, opts PrettyEventOpts) error {
- if !opts.PrettyVars {
- _, _ = fmt.Fprintln(w, padLocationText(e.Location))
- return nil
- }
-
- buf := new(bytes.Buffer)
- exprVars := map[string]varInfo{}
-
- findVars := func(unknownAreUndefined bool) func(term *ast.Term) bool {
- return func(term *ast.Term) bool {
- if term.Location == nil {
- return false
- }
-
- switch v := term.Value.(type) {
- case *ast.ArrayComprehension, *ast.SetComprehension, *ast.ObjectComprehension:
- // we don't report on the internals of a comprehension, as it's already evaluated, and we won't have the local vars.
- return true
- case ast.Var:
- var info *varInfo
- if meta, ok := e.LocalMetadata[v]; ok {
- info = &varInfo{
- VarMetadata: meta,
- val: e.Locals.Get(v),
- exprLoc: term.Location,
- }
- } else if unknownAreUndefined {
- info = &varInfo{
- VarMetadata: VarMetadata{Name: v},
- exprLoc: term.Location,
- col: term.Location.Col,
- }
- }
-
- if info != nil {
- if v, exists := exprVars[info.Title()]; !exists || v.val == nil {
- if term.Location != nil {
- info.col = term.Location.Col
- }
- exprVars[info.Title()] = *info
- }
- }
- }
- return false
- }
- }
-
- expr, ok := e.Node.(*ast.Expr)
- if !ok || expr == nil {
- return nil
- }
-
- base := expr.BaseCogeneratedExpr()
- exprText := padLocationText(base.Location)
- buf.WriteString(exprText)
-
- e.localVirtualCacheSnapshot.Iter(func(k, v ast.Value) bool {
- var info *varInfo
- switch k := k.(type) {
- case ast.Ref:
- info = &varInfo{
- VarMetadata: VarMetadata{Name: ast.Var(k.String())},
- val: v,
- exprLoc: k[0].Location,
- col: k[0].Location.Col,
- }
- case *ast.ArrayComprehension:
- info = &varInfo{
- VarMetadata: VarMetadata{Name: ast.Var(k.String())},
- val: v,
- exprLoc: k.Term.Location,
- col: k.Term.Location.Col,
- }
- case *ast.SetComprehension:
- info = &varInfo{
- VarMetadata: VarMetadata{Name: ast.Var(k.String())},
- val: v,
- exprLoc: k.Term.Location,
- col: k.Term.Location.Col,
- }
- case *ast.ObjectComprehension:
- info = &varInfo{
- VarMetadata: VarMetadata{Name: ast.Var(k.String())},
- val: v,
- exprLoc: k.Key.Location,
- col: k.Key.Location.Col,
- }
- }
-
- if info != nil {
- exprVars[info.Title()] = *info
- }
-
- return false
- })
-
- // If the expression is negated, we can't confidently assert that vars with unknown values are 'undefined',
- // since the compiler might have opted out of the necessary rewrite.
- walkTestTerms(expr, findVars(!expr.Negated))
- coExprs := expr.CogeneratedExprs()
- for _, coExpr := range coExprs {
- // Only the current "co-expr" can have undefined vars, if we don't know the value for a var in any other co-expr,
- // it's unknown, not undefined. A var can be unknown if it hasn't been assigned a value yet, because the co-expr
- // hasn't been evaluated yet (the fail happened before it).
- walkTestTerms(coExpr, findVars(false))
- }
-
- printPrettyVars(buf, exprVars)
- _, _ = fmt.Fprint(w, buf.String())
- return nil
-}
-
-func printPrettyVars(w *bytes.Buffer, exprVars map[string]varInfo) {
- containsTabs := false
- varRows := make(map[int]interface{})
- for _, info := range exprVars {
- if len(info.exprLoc.Tabs) > 0 {
- containsTabs = true
- }
- varRows[info.exprLoc.Row] = nil
- }
-
- if containsTabs && len(varRows) > 1 {
- // We can't (currently) reliably point to var locations when they are on different rows that contain tabs.
- // So we'll just print them in alphabetical order instead.
- byName := make([]varInfo, 0, len(exprVars))
- for _, info := range exprVars {
- byName = append(byName, info)
- }
- slices.SortStableFunc(byName, func(a, b varInfo) int {
- return strings.Compare(a.Title(), b.Title())
- })
-
- w.WriteString("\n\nWhere:\n")
- for _, info := range byName {
- w.WriteString(fmt.Sprintf("\n%s: %s", info.Title(), iStrs.Truncate(info.Value(), maxPrettyExprVarWidth)))
- }
-
- return
- }
-
- byCol := make([]varInfo, 0, len(exprVars))
- for _, info := range exprVars {
- byCol = append(byCol, info)
- }
- slices.SortFunc(byCol, func(a, b varInfo) int {
- // sort first by column, then by reverse row (to present vars in the same order they appear in the expr)
- if a.col == b.col {
- if a.exprLoc.Row == b.exprLoc.Row {
- return strings.Compare(a.Title(), b.Title())
- }
- return b.exprLoc.Row - a.exprLoc.Row
- }
- return a.col - b.col
- })
-
- if len(byCol) == 0 {
- return
- }
-
- w.WriteString("\n")
- printArrows(w, byCol, -1)
- for i := len(byCol) - 1; i >= 0; i-- {
- w.WriteString("\n")
- printArrows(w, byCol, i)
- }
-}
-
-func printArrows(w *bytes.Buffer, l []varInfo, printValueAt int) {
- prevCol := 0
- var slice []varInfo
- if printValueAt >= 0 {
- slice = l[:printValueAt+1]
- } else {
- slice = l
- }
- isFirst := true
- for i, info := range slice {
-
- isLast := i >= len(slice)-1
- col := info.col
-
- if !isLast && col == l[i+1].col {
- // We're sharing the same column with another, subsequent var
- continue
- }
-
- spaces := col - 1
- if i > 0 && !isFirst {
- spaces = (col - prevCol) - 1
- }
-
- for j := 0; j < spaces; j++ {
- tab := false
- for _, t := range info.exprLoc.Tabs {
- if t == j+prevCol+1 {
- w.WriteString("\t")
- tab = true
- break
- }
- }
- if !tab {
- w.WriteString(" ")
- }
- }
-
- if isLast && printValueAt >= 0 {
- valueStr := iStrs.Truncate(info.Value(), maxPrettyExprVarWidth)
- if (i > 0 && col == l[i-1].col) || (i < len(l)-1 && col == l[i+1].col) {
- // There is another var on this column, so we need to include the name to differentiate them.
- w.WriteString(fmt.Sprintf("%s: %s", info.Title(), valueStr))
- } else {
- w.WriteString(valueStr)
- }
- } else {
- w.WriteString("|")
- }
- prevCol = col
- isFirst = false
- }
-}
-
-func init() {
- RegisterBuiltinFunc(ast.Trace.Name, builtinTrace)
+ return v1.PrettyEvent(w, e, opts)
}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/walk.go b/vendor/github.com/open-policy-agent/opa/topdown/walk.go
deleted file mode 100644
index 0f3b3544b5..0000000000
--- a/vendor/github.com/open-policy-agent/opa/topdown/walk.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2017 The OPA Authors. All rights reserved.
-// Use of this source code is governed by an Apache2
-// license that can be found in the LICENSE file.
-
-package topdown
-
-import (
- "github.com/open-policy-agent/opa/ast"
-)
-
-func evalWalk(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
- input := operands[0]
-
- if pathIsWildcard(operands) {
- // When the path assignment is a wildcard: walk(input, [_, value])
- // we may skip the path construction entirely, and simply return
- // same pointer in each iteration. This is a much more efficient
- // path when only the values are needed.
- return walkNoPath(input, iter)
- }
-
- filter := getOutputPath(operands)
- return walk(filter, nil, input, iter)
-}
-
-func walk(filter, path *ast.Array, input *ast.Term, iter func(*ast.Term) error) error {
-
- if filter == nil || filter.Len() == 0 {
- if path == nil {
- path = ast.NewArray()
- }
-
- if err := iter(ast.ArrayTerm(ast.NewTerm(path.Copy()), input)); err != nil {
- return err
- }
- }
-
- if filter != nil && filter.Len() > 0 {
- key := filter.Elem(0)
- filter = filter.Slice(1, -1)
- if key.IsGround() {
- if term := input.Get(key); term != nil {
- path = pathAppend(path, key)
- return walk(filter, path, term, iter)
- }
- return nil
- }
- }
-
- switch v := input.Value.(type) {
- case *ast.Array:
- for i := 0; i < v.Len(); i++ {
- path = pathAppend(path, ast.IntNumberTerm(i))
- if err := walk(filter, path, v.Elem(i), iter); err != nil {
- return err
- }
- path = path.Slice(0, path.Len()-1)
- }
- case ast.Object:
- return v.Iter(func(k, v *ast.Term) error {
- path = pathAppend(path, k)
- if err := walk(filter, path, v, iter); err != nil {
- return err
- }
- path = path.Slice(0, path.Len()-1)
- return nil
- })
- case ast.Set:
- return v.Iter(func(elem *ast.Term) error {
- path = pathAppend(path, elem)
- if err := walk(filter, path, elem, iter); err != nil {
- return err
- }
- path = path.Slice(0, path.Len()-1)
- return nil
- })
- }
-
- return nil
-}
-
-var emptyArr = ast.ArrayTerm()
-
-func walkNoPath(input *ast.Term, iter func(*ast.Term) error) error {
- if err := iter(ast.ArrayTerm(emptyArr, input)); err != nil {
- return err
- }
-
- switch v := input.Value.(type) {
- case ast.Object:
- return v.Iter(func(_, v *ast.Term) error {
- return walkNoPath(v, iter)
- })
- case *ast.Array:
- for i := 0; i < v.Len(); i++ {
- if err := walkNoPath(v.Elem(i), iter); err != nil {
- return err
- }
- }
- case ast.Set:
- return v.Iter(func(elem *ast.Term) error {
- return walkNoPath(elem, iter)
- })
- }
-
- return nil
-}
-
-func pathAppend(path *ast.Array, key *ast.Term) *ast.Array {
- if path == nil {
- return ast.NewArray(key)
- }
-
- return path.Append(key)
-}
-
-func getOutputPath(operands []*ast.Term) *ast.Array {
- if len(operands) == 2 {
- if arr, ok := operands[1].Value.(*ast.Array); ok && arr.Len() == 2 {
- if path, ok := arr.Elem(0).Value.(*ast.Array); ok {
- return path
- }
- }
- }
- return nil
-}
-
-func pathIsWildcard(operands []*ast.Term) bool {
- if len(operands) == 2 {
- if arr, ok := operands[1].Value.(*ast.Array); ok && arr.Len() == 2 {
- if v, ok := arr.Elem(0).Value.(ast.Var); ok {
- return v.IsWildcard()
- }
- }
- }
- return false
-}
-
-func init() {
- RegisterBuiltinFunc(ast.WalkBuiltin.Name, evalWalk)
-}
diff --git a/vendor/github.com/open-policy-agent/opa/util/backoff.go b/vendor/github.com/open-policy-agent/opa/util/backoff.go
deleted file mode 100644
index 6fbf63ef77..0000000000
--- a/vendor/github.com/open-policy-agent/opa/util/backoff.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2018 The OPA Authors. All rights reserved.
-// Use of this source code is governed by an Apache2
-// license that can be found in the LICENSE file.
-
-package util
-
-import (
- "math/rand"
- "time"
-)
-
-func init() {
- // NOTE(sr): We don't need good random numbers here; it's used for jittering
- // the backup timing a bit. But anyways, let's make it random enough; without
- // a call to rand.Seed() we'd get the same stream of numbers for each program
- // run. (Or not, if some other packages happens to seed the global randomness
- // source.)
- // Note(philipc): rand.Seed() was deprecated in Go 1.20, so we've switched to
- // using the recommended rand.New(rand.NewSource(seed)) style.
- rand.New(rand.NewSource(time.Now().UnixNano()))
-}
-
-// DefaultBackoff returns a delay with an exponential backoff based on the
-// number of retries.
-func DefaultBackoff(base, max float64, retries int) time.Duration {
- return Backoff(base, max, .2, 1.6, retries)
-}
-
-// Backoff returns a delay with an exponential backoff based on the number of
-// retries. Same algorithm used in gRPC.
-func Backoff(base, max, jitter, factor float64, retries int) time.Duration {
- if retries == 0 {
- return 0
- }
-
- backoff, max := base, max
- for backoff < max && retries > 0 {
- backoff *= factor
- retries--
- }
- if backoff > max {
- backoff = max
- }
-
- // Randomize backoff delays so that if a cluster of requests start at
- // the same time, they won't operate in lockstep.
- backoff *= 1 + jitter*(rand.Float64()*2-1)
- if backoff < 0 {
- return 0
- }
-
- return time.Duration(backoff)
-}
diff --git a/vendor/github.com/open-policy-agent/opa/util/hashmap.go b/vendor/github.com/open-policy-agent/opa/util/hashmap.go
deleted file mode 100644
index 8875a6323e..0000000000
--- a/vendor/github.com/open-policy-agent/opa/util/hashmap.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2016 The OPA Authors. All rights reserved.
-// Use of this source code is governed by an Apache2
-// license that can be found in the LICENSE file.
-
-package util
-
-import (
- "fmt"
- "strings"
-)
-
-// T is a concise way to refer to T.
-type T interface{}
-
-type hashEntry struct {
- k T
- v T
- next *hashEntry
-}
-
-// HashMap represents a key/value map.
-type HashMap struct {
- eq func(T, T) bool
- hash func(T) int
- table map[int]*hashEntry
- size int
-}
-
-// NewHashMap returns a new empty HashMap.
-func NewHashMap(eq func(T, T) bool, hash func(T) int) *HashMap {
- return &HashMap{
- eq: eq,
- hash: hash,
- table: make(map[int]*hashEntry),
- size: 0,
- }
-}
-
-// Copy returns a shallow copy of this HashMap.
-func (h *HashMap) Copy() *HashMap {
- cpy := NewHashMap(h.eq, h.hash)
- h.Iter(func(k, v T) bool {
- cpy.Put(k, v)
- return false
- })
- return cpy
-}
-
-// Equal returns true if this HashMap equals the other HashMap.
-// Two hash maps are equal if they contain the same key/value pairs.
-func (h *HashMap) Equal(other *HashMap) bool {
- if h.Len() != other.Len() {
- return false
- }
- return !h.Iter(func(k, v T) bool {
- ov, ok := other.Get(k)
- if !ok {
- return true
- }
- return !h.eq(v, ov)
- })
-}
-
-// Get returns the value for k.
-func (h *HashMap) Get(k T) (T, bool) {
- hash := h.hash(k)
- for entry := h.table[hash]; entry != nil; entry = entry.next {
- if h.eq(entry.k, k) {
- return entry.v, true
- }
- }
- return nil, false
-}
-
-// Delete removes the key k.
-func (h *HashMap) Delete(k T) {
- hash := h.hash(k)
- var prev *hashEntry
- for entry := h.table[hash]; entry != nil; entry = entry.next {
- if h.eq(entry.k, k) {
- if prev != nil {
- prev.next = entry.next
- } else {
- h.table[hash] = entry.next
- }
- h.size--
- return
- }
- prev = entry
- }
-}
-
-// Hash returns the hash code for this hash map.
-func (h *HashMap) Hash() int {
- var hash int
- h.Iter(func(k, v T) bool {
- hash += h.hash(k) + h.hash(v)
- return false
- })
- return hash
-}
-
-// Iter invokes the iter function for each element in the HashMap.
-// If the iter function returns true, iteration stops and the return value is true.
-// If the iter function never returns true, iteration proceeds through all elements
-// and the return value is false.
-func (h *HashMap) Iter(iter func(T, T) bool) bool {
- for _, entry := range h.table {
- for ; entry != nil; entry = entry.next {
- if iter(entry.k, entry.v) {
- return true
- }
- }
- }
- return false
-}
-
-// Len returns the current size of this HashMap.
-func (h *HashMap) Len() int {
- return h.size
-}
-
-// Put inserts a key/value pair into this HashMap. If the key is already present, the existing
-// value is overwritten.
-func (h *HashMap) Put(k T, v T) {
- hash := h.hash(k)
- head := h.table[hash]
- for entry := head; entry != nil; entry = entry.next {
- if h.eq(entry.k, k) {
- entry.v = v
- return
- }
- }
- h.table[hash] = &hashEntry{k: k, v: v, next: head}
- h.size++
-}
-
-func (h *HashMap) String() string {
- var buf []string
- h.Iter(func(k T, v T) bool {
- buf = append(buf, fmt.Sprintf("%v: %v", k, v))
- return false
- })
- return "{" + strings.Join(buf, ", ") + "}"
-}
-
-// Update returns a new HashMap with elements from the other HashMap put into this HashMap.
-// If the other HashMap contains elements with the same key as this HashMap, the value
-// from the other HashMap overwrites the value from this HashMap.
-func (h *HashMap) Update(other *HashMap) *HashMap {
- updated := h.Copy()
- other.Iter(func(k, v T) bool {
- updated.Put(k, v)
- return false
- })
- return updated
-}
diff --git a/vendor/github.com/open-policy-agent/opa/util/maps.go b/vendor/github.com/open-policy-agent/opa/util/maps.go
deleted file mode 100644
index d943b4d0a8..0000000000
--- a/vendor/github.com/open-policy-agent/opa/util/maps.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package util
-
-// Values returns a slice of values from any map. Copied from golang.org/x/exp/maps.
-func Values[M ~map[K]V, K comparable, V any](m M) []V {
- r := make([]V, 0, len(m))
- for _, v := range m {
- r = append(r, v)
- }
- return r
-}
diff --git a/vendor/github.com/open-policy-agent/opa/util/read_gzip_body.go b/vendor/github.com/open-policy-agent/opa/util/read_gzip_body.go
deleted file mode 100644
index 217638b363..0000000000
--- a/vendor/github.com/open-policy-agent/opa/util/read_gzip_body.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package util
-
-import (
- "bytes"
- "compress/gzip"
- "encoding/binary"
- "fmt"
- "io"
- "net/http"
- "strings"
- "sync"
-
- "github.com/open-policy-agent/opa/util/decoding"
-)
-
-var gzipReaderPool = sync.Pool{
- New: func() interface{} {
- reader := new(gzip.Reader)
- return reader
- },
-}
-
-// Note(philipc): Originally taken from server/server.go
-// The DecodingLimitHandler handles validating that the gzip payload is within the
-// allowed max size limit. Thus, in the event of a forged payload size trailer,
-// the worst that can happen is that we waste memory up to the allowed max gzip
-// payload size, but not an unbounded amount of memory, as was potentially
-// possible before.
-func ReadMaybeCompressedBody(r *http.Request) ([]byte, error) {
- var content *bytes.Buffer
- // Note(philipc): If the request body is of unknown length (such as what
- // happens when 'Transfer-Encoding: chunked' is set), we have to do an
- // incremental read of the body. In this case, we can't be too clever, we
- // just do the best we can with whatever is streamed over to us.
- // Fetch gzip payload size limit from request context.
- if maxLength, ok := decoding.GetServerDecodingMaxLen(r.Context()); ok {
- bs, err := io.ReadAll(io.LimitReader(r.Body, maxLength))
- if err != nil {
- return bs, err
- }
- content = bytes.NewBuffer(bs)
- } else {
- // Read content from the request body into a buffer of known size.
- content = bytes.NewBuffer(make([]byte, 0, r.ContentLength))
- if _, err := io.CopyN(content, r.Body, r.ContentLength); err != nil {
- return content.Bytes(), err
- }
- }
-
- // Decompress gzip content by reading from the buffer.
- if strings.Contains(r.Header.Get("Content-Encoding"), "gzip") {
- // Fetch gzip payload size limit from request context.
- gzipMaxLength, _ := decoding.GetServerDecodingGzipMaxLen(r.Context())
-
- // Note(philipc): The last 4 bytes of a well-formed gzip blob will
- // always be a little-endian uint32, representing the decompressed
- // content size, modulo 2^32. We validate that the size is safe,
- // earlier in DecodingLimitHandler.
- sizeTrailerField := binary.LittleEndian.Uint32(content.Bytes()[content.Len()-4:])
- if sizeTrailerField > uint32(gzipMaxLength) {
- return content.Bytes(), fmt.Errorf("gzip payload too large")
- }
- // Pull a gzip decompressor from the pool, and assign it to the current
- // buffer, using Reset(). Later, return it back to the pool for another
- // request to use.
- gzReader := gzipReaderPool.Get().(*gzip.Reader)
- if err := gzReader.Reset(content); err != nil {
- return nil, err
- }
- defer gzReader.Close()
- defer gzipReaderPool.Put(gzReader)
- decompressedContent := bytes.NewBuffer(make([]byte, 0, sizeTrailerField))
- if _, err := io.CopyN(decompressedContent, gzReader, int64(sizeTrailerField)); err != nil {
- return decompressedContent.Bytes(), err
- }
- return decompressedContent.Bytes(), nil
- }
-
- // Request was not compressed; return the content bytes.
- return content.Bytes(), nil
-}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go b/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go
new file mode 100644
index 0000000000..36f854c618
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go
@@ -0,0 +1,1034 @@
+// Copyright 2022 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "slices"
+ "strings"
+
+ "github.com/open-policy-agent/opa/internal/deepcopy"
+ astJSON "github.com/open-policy-agent/opa/v1/ast/json"
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+const (
+ annotationScopePackage = "package"
+ annotationScopeRule = "rule"
+ annotationScopeDocument = "document"
+ annotationScopeSubpackages = "subpackages"
+)
+
+type (
+ // Annotations represents metadata attached to other AST nodes such as rules.
+ Annotations struct {
+ Scope string `json:"scope"`
+ Title string `json:"title,omitempty"`
+ Entrypoint bool `json:"entrypoint,omitempty"`
+ Description string `json:"description,omitempty"`
+ Organizations []string `json:"organizations,omitempty"`
+ RelatedResources []*RelatedResourceAnnotation `json:"related_resources,omitempty"`
+ Authors []*AuthorAnnotation `json:"authors,omitempty"`
+ Schemas []*SchemaAnnotation `json:"schemas,omitempty"`
+ Compile *CompileAnnotation `json:"compile,omitempty"`
+ Custom map[string]any `json:"custom,omitempty"`
+ Location *Location `json:"location,omitempty"`
+
+ comments []*Comment
+ node Node
+ }
+
+ // SchemaAnnotation contains a schema declaration for the document identified by the path.
+ SchemaAnnotation struct {
+ Path Ref `json:"path"`
+ Schema Ref `json:"schema,omitempty"`
+ Definition *any `json:"definition,omitempty"`
+ }
+
+ CompileAnnotation struct {
+ Unknowns []Ref `json:"unknowns,omitempty"`
+ MaskRule Ref `json:"mask_rule,omitempty"` // NOTE: This doesn't need to start with "data.package", it can be relative
+ }
+
+ AuthorAnnotation struct {
+ Name string `json:"name"`
+ Email string `json:"email,omitempty"`
+ }
+
+ RelatedResourceAnnotation struct {
+ Ref url.URL `json:"ref"`
+ Description string `json:"description,omitempty"`
+ }
+
+ AnnotationSet struct {
+ byRule map[*Rule][]*Annotations
+ byPackage map[int]*Annotations
+ byPath *annotationTreeNode
+ modules []*Module // Modules this set was constructed from
+ }
+
+ annotationTreeNode struct {
+ Value *Annotations
+ Children map[Value]*annotationTreeNode // we assume key elements are hashable (vars and strings only!)
+ }
+
+ AnnotationsRef struct {
+ Path Ref `json:"path"` // The path of the node the annotations are applied to
+ Annotations *Annotations `json:"annotations,omitempty"`
+ Location *Location `json:"location,omitempty"` // The location of the node the annotations are applied to
+
+ node Node // The node the annotations are applied to
+ }
+
+ AnnotationsRefSet []*AnnotationsRef
+
+ FlatAnnotationsRefSet AnnotationsRefSet
+)
+
+func (a *Annotations) String() string {
+ bs, _ := a.MarshalJSON()
+ return string(bs)
+}
+
+// Loc returns the location of this annotation.
+func (a *Annotations) Loc() *Location {
+ return a.Location
+}
+
+// SetLoc updates the location of this annotation.
+func (a *Annotations) SetLoc(l *Location) {
+ a.Location = l
+}
+
+// EndLoc returns the location of this annotation's last comment line.
+func (a *Annotations) EndLoc() *Location {
+ count := len(a.comments)
+ if count == 0 {
+ return a.Location
+ }
+ return a.comments[count-1].Location
+}
+
+// Compare returns an integer indicating if a is less than, equal to, or greater
+// than other.
+func (a *Annotations) Compare(other *Annotations) int {
+
+ if a == nil && other == nil {
+ return 0
+ }
+
+ if a == nil {
+ return -1
+ }
+
+ if other == nil {
+ return 1
+ }
+
+ if cmp := scopeCompare(a.Scope, other.Scope); cmp != 0 {
+ return cmp
+ }
+
+ if cmp := strings.Compare(a.Title, other.Title); cmp != 0 {
+ return cmp
+ }
+
+ if cmp := strings.Compare(a.Description, other.Description); cmp != 0 {
+ return cmp
+ }
+
+ if cmp := compareStringLists(a.Organizations, other.Organizations); cmp != 0 {
+ return cmp
+ }
+
+ if cmp := compareRelatedResources(a.RelatedResources, other.RelatedResources); cmp != 0 {
+ return cmp
+ }
+
+ if cmp := compareAuthors(a.Authors, other.Authors); cmp != 0 {
+ return cmp
+ }
+
+ if cmp := compareSchemas(a.Schemas, other.Schemas); cmp != 0 {
+ return cmp
+ }
+
+ if cmp := a.Compile.Compare(other.Compile); cmp != 0 {
+ return cmp
+ }
+
+ if a.Entrypoint != other.Entrypoint {
+ if a.Entrypoint {
+ return 1
+ }
+ return -1
+ }
+
+ if cmp := util.Compare(a.Custom, other.Custom); cmp != 0 {
+ return cmp
+ }
+
+ return 0
+}
+
+// GetTargetPath returns the path of the node these Annotations are applied to (the target)
+func (a *Annotations) GetTargetPath() Ref {
+ switch n := a.node.(type) {
+ case *Package:
+ return n.Path
+ case *Rule:
+ return n.Ref().GroundPrefix()
+ default:
+ return nil
+ }
+}
+
+func (a *Annotations) MarshalJSON() ([]byte, error) {
+ if a == nil {
+ return []byte(`{"scope":""}`), nil
+ }
+
+ data := map[string]any{
+ "scope": a.Scope,
+ }
+
+ if a.Title != "" {
+ data["title"] = a.Title
+ }
+
+ if a.Description != "" {
+ data["description"] = a.Description
+ }
+
+ if a.Entrypoint {
+ data["entrypoint"] = a.Entrypoint
+ }
+
+ if len(a.Organizations) > 0 {
+ data["organizations"] = a.Organizations
+ }
+
+ if len(a.RelatedResources) > 0 {
+ data["related_resources"] = a.RelatedResources
+ }
+
+ if len(a.Authors) > 0 {
+ data["authors"] = a.Authors
+ }
+
+ if len(a.Schemas) > 0 {
+ data["schemas"] = a.Schemas
+ }
+
+ if len(a.Custom) > 0 {
+ data["custom"] = a.Custom
+ }
+
+ if astJSON.GetOptions().MarshalOptions.IncludeLocation.Annotations {
+ if a.Location != nil {
+ data["location"] = a.Location
+ }
+ }
+
+ return json.Marshal(data)
+}
+
+func NewAnnotationsRef(a *Annotations) *AnnotationsRef {
+ var loc *Location
+ if a.node != nil {
+ loc = a.node.Loc()
+ }
+
+ return &AnnotationsRef{
+ Location: loc,
+ Path: a.GetTargetPath(),
+ Annotations: a,
+ node: a.node,
+ }
+}
+
+func (ar *AnnotationsRef) GetPackage() *Package {
+ switch n := ar.node.(type) {
+ case *Package:
+ return n
+ case *Rule:
+ return n.Module.Package
+ default:
+ return nil
+ }
+}
+
+func (ar *AnnotationsRef) GetRule() *Rule {
+ switch n := ar.node.(type) {
+ case *Rule:
+ return n
+ default:
+ return nil
+ }
+}
+
+func (ar *AnnotationsRef) MarshalJSON() ([]byte, error) {
+ data := map[string]any{
+ "path": ar.Path,
+ }
+
+ if ar.Annotations != nil {
+ data["annotations"] = ar.Annotations
+ }
+
+ if astJSON.GetOptions().MarshalOptions.IncludeLocation.AnnotationsRef {
+ if ar.Location != nil {
+ data["location"] = ar.Location
+ }
+
+ // The location set for the schema ref terms is wrong (always set to
+ // row 1) and not really useful anyway.. so strip it out before marshalling
+ for _, schema := range ar.Annotations.Schemas {
+ if schema.Path != nil {
+ for _, term := range schema.Path {
+ term.Location = nil
+ }
+ }
+ }
+ }
+
+ return json.Marshal(data)
+}
+
+func scopeCompare(s1, s2 string) int {
+ o1 := scopeOrder(s1)
+ o2 := scopeOrder(s2)
+
+ if o2 < o1 {
+ return 1
+ } else if o2 > o1 {
+ return -1
+ }
+
+ if s1 < s2 {
+ return -1
+ } else if s2 < s1 {
+ return 1
+ }
+
+ return 0
+}
+
+func scopeOrder(s string) int {
+ if s == annotationScopeRule {
+ return 1
+ }
+ return 0
+}
+
+func compareAuthors(a, b []*AuthorAnnotation) int {
+ if len(a) > len(b) {
+ return 1
+ } else if len(a) < len(b) {
+ return -1
+ }
+
+ for i := range a {
+ if cmp := a[i].Compare(b[i]); cmp != 0 {
+ return cmp
+ }
+ }
+
+ return 0
+}
+
+func compareRelatedResources(a, b []*RelatedResourceAnnotation) int {
+ if len(a) > len(b) {
+ return 1
+ } else if len(a) < len(b) {
+ return -1
+ }
+
+ for i := range a {
+ if cmp := a[i].Compare(b[i]); cmp != 0 {
+ return cmp
+ }
+ }
+
+ return 0
+}
+
+func compareSchemas(a, b []*SchemaAnnotation) int {
+ maxLen := min(len(b), len(a))
+
+ for i := range maxLen {
+ if cmp := a[i].Compare(b[i]); cmp != 0 {
+ return cmp
+ }
+ }
+
+ if len(a) > len(b) {
+ return 1
+ } else if len(a) < len(b) {
+ return -1
+ }
+
+ return 0
+}
+
+func compareStringLists(a, b []string) int {
+ if len(a) > len(b) {
+ return 1
+ } else if len(a) < len(b) {
+ return -1
+ }
+
+ for i := range a {
+ if cmp := strings.Compare(a[i], b[i]); cmp != 0 {
+ return cmp
+ }
+ }
+
+ return 0
+}
+
+// Copy returns a deep copy of s.
+func (a *Annotations) Copy(node Node) *Annotations {
+ cpy := *a
+
+ cpy.Organizations = make([]string, len(a.Organizations))
+ copy(cpy.Organizations, a.Organizations)
+
+ cpy.RelatedResources = make([]*RelatedResourceAnnotation, len(a.RelatedResources))
+ for i := range a.RelatedResources {
+ cpy.RelatedResources[i] = a.RelatedResources[i].Copy()
+ }
+
+ cpy.Authors = make([]*AuthorAnnotation, len(a.Authors))
+ for i := range a.Authors {
+ cpy.Authors[i] = a.Authors[i].Copy()
+ }
+
+ cpy.Schemas = make([]*SchemaAnnotation, len(a.Schemas))
+ for i := range a.Schemas {
+ cpy.Schemas[i] = a.Schemas[i].Copy()
+ }
+
+ cpy.Compile = a.Compile.Copy()
+
+ if a.Custom != nil {
+ cpy.Custom = deepcopy.Map(a.Custom)
+ }
+
+ cpy.node = node
+
+ return &cpy
+}
+
+// toObject constructs an AST Object from the annotation.
+func (a *Annotations) toObject() (*Object, *Error) {
+ obj := NewObject()
+
+ if a == nil {
+ return &obj, nil
+ }
+
+ if len(a.Scope) > 0 {
+ switch a.Scope {
+ case annotationScopeDocument:
+ obj.Insert(InternedTerm("scope"), InternedTerm("document"))
+ case annotationScopePackage:
+ obj.Insert(InternedTerm("scope"), InternedTerm("package"))
+ case annotationScopeRule:
+ obj.Insert(InternedTerm("scope"), InternedTerm("rule"))
+ case annotationScopeSubpackages:
+ obj.Insert(InternedTerm("scope"), InternedTerm("subpackages"))
+ default:
+ obj.Insert(InternedTerm("scope"), StringTerm(a.Scope))
+ }
+ }
+
+ if len(a.Title) > 0 {
+ obj.Insert(InternedTerm("title"), StringTerm(a.Title))
+ }
+
+ if a.Entrypoint {
+ obj.Insert(InternedTerm("entrypoint"), InternedTerm(true))
+ }
+
+ if len(a.Description) > 0 {
+ obj.Insert(InternedTerm("description"), StringTerm(a.Description))
+ }
+
+ if len(a.Organizations) > 0 {
+ orgs := make([]*Term, 0, len(a.Organizations))
+ for _, org := range a.Organizations {
+ orgs = append(orgs, StringTerm(org))
+ }
+ obj.Insert(InternedTerm("organizations"), ArrayTerm(orgs...))
+ }
+
+ if len(a.RelatedResources) > 0 {
+ rrs := make([]*Term, 0, len(a.RelatedResources))
+ for _, rr := range a.RelatedResources {
+ rrObj := NewObject(Item(InternedTerm("ref"), StringTerm(rr.Ref.String())))
+ if len(rr.Description) > 0 {
+ rrObj.Insert(InternedTerm("description"), StringTerm(rr.Description))
+ }
+ rrs = append(rrs, NewTerm(rrObj))
+ }
+ obj.Insert(InternedTerm("related_resources"), ArrayTerm(rrs...))
+ }
+
+ if len(a.Authors) > 0 {
+ as := make([]*Term, 0, len(a.Authors))
+ for _, author := range a.Authors {
+ aObj := NewObject()
+ if len(author.Name) > 0 {
+ aObj.Insert(InternedTerm("name"), StringTerm(author.Name))
+ }
+ if len(author.Email) > 0 {
+ aObj.Insert(InternedTerm("email"), StringTerm(author.Email))
+ }
+ as = append(as, NewTerm(aObj))
+ }
+ obj.Insert(InternedTerm("authors"), ArrayTerm(as...))
+ }
+
+ if len(a.Schemas) > 0 {
+ ss := make([]*Term, 0, len(a.Schemas))
+ for _, s := range a.Schemas {
+ sObj := NewObject()
+ if len(s.Path) > 0 {
+ sObj.Insert(InternedTerm("path"), NewTerm(s.Path.toArray()))
+ }
+ if len(s.Schema) > 0 {
+ sObj.Insert(InternedTerm("schema"), NewTerm(s.Schema.toArray()))
+ }
+ if s.Definition != nil {
+ def, err := InterfaceToValue(s.Definition)
+ if err != nil {
+ return nil, NewError(CompileErr, a.Location, "invalid definition in schema annotation: %s", err.Error())
+ }
+ sObj.Insert(InternedTerm("definition"), NewTerm(def))
+ }
+ ss = append(ss, NewTerm(sObj))
+ }
+ obj.Insert(InternedTerm("schemas"), ArrayTerm(ss...))
+ }
+
+ if len(a.Custom) > 0 {
+ c, err := InterfaceToValue(a.Custom)
+ if err != nil {
+ return nil, NewError(CompileErr, a.Location, "invalid custom annotation %s", err.Error())
+ }
+ obj.Insert(InternedTerm("custom"), NewTerm(c))
+ }
+
+ return &obj, nil
+}
+
+func attachRuleAnnotations(mod *Module) {
+ // make a copy of the annotations
+ cpy := make([]*Annotations, len(mod.Annotations))
+ for i, a := range mod.Annotations {
+ cpy[i] = a.Copy(a.node)
+ }
+
+ for _, rule := range mod.Rules {
+ var j int
+ var found bool
+ for i, a := range cpy {
+ if rule.Ref().GroundPrefix().Equal(a.GetTargetPath()) {
+ if a.Scope == annotationScopeDocument {
+ rule.Annotations = append(rule.Annotations, a)
+ } else if a.Scope == annotationScopeRule && rule.Loc().Row > a.Location.Row {
+ j = i
+ found = true
+ rule.Annotations = append(rule.Annotations, a)
+ }
+ }
+ }
+
+ if found && j < len(cpy) {
+ cpy = slices.Delete(cpy, j, j+1)
+ }
+ }
+}
+
+func attachAnnotationsNodes(mod *Module) Errors {
+ var errs Errors
+
+ // Find first non-annotation statement following each annotation and attach
+ // the annotation to that statement.
+ for _, a := range mod.Annotations {
+ for _, stmt := range mod.stmts {
+ _, ok := stmt.(*Annotations)
+ if !ok {
+ if stmt.Loc().Row > a.Location.Row {
+ a.node = stmt
+ break
+ }
+ }
+ }
+
+ if a.Scope == "" {
+ switch a.node.(type) {
+ case *Rule:
+ if a.Entrypoint {
+ a.Scope = annotationScopeDocument
+ } else {
+ a.Scope = annotationScopeRule
+ }
+ case *Package:
+ a.Scope = annotationScopePackage
+ case *Import:
+ // Note that this isn't a valid scope, but set here so that the
+ // validate function called below can print an error message with
+ // a context that makes sense ("invalid scope: 'import'" instead of
+ // "invalid scope: '')
+ a.Scope = "import"
+ }
+ }
+
+ if err := validateAnnotationScopeAttachment(a); err != nil {
+ errs = append(errs, err)
+ }
+
+ if err := validateAnnotationEntrypointAttachment(a); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ return errs
+}
+
+func validateAnnotationScopeAttachment(a *Annotations) *Error {
+
+ switch a.Scope {
+ case annotationScopeRule, annotationScopeDocument:
+ if _, ok := a.node.(*Rule); ok {
+ return nil
+ }
+ return newScopeAttachmentErr(a, "rule")
+ case annotationScopePackage, annotationScopeSubpackages:
+ if _, ok := a.node.(*Package); ok {
+ return nil
+ }
+ return newScopeAttachmentErr(a, "package")
+ }
+
+ return NewError(ParseErr, a.Loc(), "invalid annotation scope '%v'. Use one of '%s', '%s', '%s', or '%s'",
+ a.Scope, annotationScopeRule, annotationScopeDocument, annotationScopePackage, annotationScopeSubpackages)
+}
+
+func validateAnnotationEntrypointAttachment(a *Annotations) *Error {
+ if a.Entrypoint && !(a.Scope == annotationScopeDocument || a.Scope == annotationScopePackage) {
+ return NewError(
+ ParseErr, a.Loc(), "annotation entrypoint applied to non-document or package scope '%v'", a.Scope)
+ }
+ return nil
+}
+
+// Copy returns a deep copy of a.
+func (a *AuthorAnnotation) Copy() *AuthorAnnotation {
+ cpy := *a
+ return &cpy
+}
+
+// Compare returns an integer indicating if s is less than, equal to, or greater
+// than other.
+func (a *AuthorAnnotation) Compare(other *AuthorAnnotation) int {
+ if cmp := strings.Compare(a.Name, other.Name); cmp != 0 {
+ return cmp
+ }
+
+ if cmp := strings.Compare(a.Email, other.Email); cmp != 0 {
+ return cmp
+ }
+
+ return 0
+}
+
+func (a *AuthorAnnotation) String() string {
+ if len(a.Email) == 0 {
+ return a.Name
+ } else if len(a.Name) == 0 {
+ return fmt.Sprintf("<%s>", a.Email)
+ }
+ return fmt.Sprintf("%s <%s>", a.Name, a.Email)
+}
+
+// Copy returns a deep copy of rr.
+func (rr *RelatedResourceAnnotation) Copy() *RelatedResourceAnnotation {
+ cpy := *rr
+ return &cpy
+}
+
+// Compare returns an integer indicating if s is less than, equal to, or greater
+// than other.
+func (rr *RelatedResourceAnnotation) Compare(other *RelatedResourceAnnotation) int {
+ if cmp := strings.Compare(rr.Description, other.Description); cmp != 0 {
+ return cmp
+ }
+
+ if cmp := strings.Compare(rr.Ref.String(), other.Ref.String()); cmp != 0 {
+ return cmp
+ }
+
+ return 0
+}
+
+func (rr *RelatedResourceAnnotation) String() string {
+ bs, _ := json.Marshal(rr)
+ return string(bs)
+}
+
+func (rr *RelatedResourceAnnotation) MarshalJSON() ([]byte, error) {
+ d := map[string]any{
+ "ref": rr.Ref.String(),
+ }
+
+ if len(rr.Description) > 0 {
+ d["description"] = rr.Description
+ }
+
+ return json.Marshal(d)
+}
+
+// Copy returns a deep copy of s.
+func (s *SchemaAnnotation) Copy() *SchemaAnnotation {
+ cpy := *s
+ return &cpy
+}
+
+// Compare returns an integer indicating if s is less than, equal to, or greater
+// than other.
+func (s *SchemaAnnotation) Compare(other *SchemaAnnotation) int {
+ if cmp := s.Path.Compare(other.Path); cmp != 0 {
+ return cmp
+ }
+
+ if cmp := s.Schema.Compare(other.Schema); cmp != 0 {
+ return cmp
+ }
+
+ if s.Definition != nil && other.Definition == nil {
+ return -1
+ } else if s.Definition == nil && other.Definition != nil {
+ return 1
+ } else if s.Definition != nil && other.Definition != nil {
+ return util.Compare(*s.Definition, *other.Definition)
+ }
+
+ return 0
+}
+
+func (s *SchemaAnnotation) String() string {
+ bs, _ := json.Marshal(s)
+ return string(bs)
+}
+
+// Copy returns a deep copy of s.
+func (c *CompileAnnotation) Copy() *CompileAnnotation {
+ if c == nil {
+ return nil
+ }
+ cpy := *c
+ for i := range c.Unknowns {
+ cpy.Unknowns[i] = c.Unknowns[i].Copy()
+ }
+ return &cpy
+}
+
+// Compare returns an integer indicating if s is less than, equal to, or greater
+// than other.
+func (c *CompileAnnotation) Compare(other *CompileAnnotation) int {
+ switch {
+ case c == nil && other == nil:
+ return 0
+ case c != nil && other == nil:
+ return 1
+ case c == nil && other != nil:
+ return -1
+ }
+
+ if cmp := slices.CompareFunc(c.Unknowns, other.Unknowns,
+ func(x, y Ref) int {
+ return x.Compare(y)
+ }); cmp != 0 {
+ return cmp
+ }
+ return c.MaskRule.Compare(other.MaskRule)
+}
+
+func (c *CompileAnnotation) String() string {
+ bs, _ := json.Marshal(c)
+ return string(bs)
+}
+
+func newAnnotationSet() *AnnotationSet {
+ return &AnnotationSet{
+ byRule: map[*Rule][]*Annotations{},
+ byPackage: map[int]*Annotations{},
+ byPath: newAnnotationTree(),
+ }
+}
+
+func BuildAnnotationSet(modules []*Module) (*AnnotationSet, Errors) {
+ as := newAnnotationSet()
+ var errs Errors
+ for _, m := range modules {
+ for _, a := range m.Annotations {
+ if err := as.add(a); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ }
+ if len(errs) > 0 {
+ return nil, errs
+ }
+ as.modules = modules
+ return as, nil
+}
+
+// NOTE(philipc): During copy propagation, the underlying Nodes can be
+// stripped away from the annotations, leading to nil deref panics. We
+// silently ignore these cases for now, as a workaround.
+func (as *AnnotationSet) add(a *Annotations) *Error {
+ switch a.Scope {
+ case annotationScopeRule:
+ if rule, ok := a.node.(*Rule); ok {
+ as.byRule[rule] = append(as.byRule[rule], a)
+ }
+ case annotationScopePackage:
+ if pkg, ok := a.node.(*Package); ok {
+ hash := pkg.Path.Hash()
+ if exist, ok := as.byPackage[hash]; ok {
+ return errAnnotationRedeclared(a, exist.Location)
+ }
+ as.byPackage[hash] = a
+ }
+ case annotationScopeDocument:
+ if rule, ok := a.node.(*Rule); ok {
+ path := rule.Ref().GroundPrefix()
+ x := as.byPath.get(path)
+ if x != nil {
+ return errAnnotationRedeclared(a, x.Value.Location)
+ }
+ as.byPath.insert(path, a)
+ }
+ case annotationScopeSubpackages:
+ if pkg, ok := a.node.(*Package); ok {
+ x := as.byPath.get(pkg.Path)
+ if x != nil && x.Value != nil {
+ return errAnnotationRedeclared(a, x.Value.Location)
+ }
+ as.byPath.insert(pkg.Path, a)
+ }
+ }
+ return nil
+}
+
+func (as *AnnotationSet) GetRuleScope(r *Rule) []*Annotations {
+ if as == nil {
+ return nil
+ }
+ return as.byRule[r]
+}
+
+func (as *AnnotationSet) GetSubpackagesScope(path Ref) []*Annotations {
+ if as == nil {
+ return nil
+ }
+ return as.byPath.ancestors(path)
+}
+
+func (as *AnnotationSet) GetDocumentScope(path Ref) *Annotations {
+ if as == nil {
+ return nil
+ }
+ if node := as.byPath.get(path); node != nil {
+ return node.Value
+ }
+ return nil
+}
+
+func (as *AnnotationSet) GetPackageScope(pkg *Package) *Annotations {
+ if as == nil {
+ return nil
+ }
+ return as.byPackage[pkg.Path.Hash()]
+}
+
+// Flatten returns a flattened list view of this AnnotationSet.
+// The returned slice is sorted, first by the annotations' target path, then by their target location
+func (as *AnnotationSet) Flatten() FlatAnnotationsRefSet {
+ // This preallocation often won't be optimal, but it's superior to starting with a nil slice.
+ refs := make([]*AnnotationsRef, 0, len(as.byPath.Children)+len(as.byRule)+len(as.byPackage))
+
+ refs = as.byPath.flatten(refs)
+
+ for _, a := range as.byPackage {
+ refs = append(refs, NewAnnotationsRef(a))
+ }
+
+ for _, as := range as.byRule {
+ for _, a := range as {
+ refs = append(refs, NewAnnotationsRef(a))
+ }
+ }
+
+ // Sort by path, then annotation location, for stable output
+ slices.SortStableFunc(refs, (*AnnotationsRef).Compare)
+
+ return refs
+}
+
+// Chain returns the chain of annotations leading up to the given rule.
+// The returned slice is ordered as follows
+// 0. Entries for the given rule, ordered from the METADATA block declared immediately above the rule, to the block declared farthest away (always at least one entry)
+// 1. The 'document' scope entry, if any
+// 2. The 'package' scope entry, if any
+// 3. Entries for the 'subpackages' scope, if any; ordered from the closest package path to the fartest. E.g.: 'do.re.mi', 'do.re', 'do'
+// The returned slice is guaranteed to always contain at least one entry, corresponding to the given rule.
+func (as *AnnotationSet) Chain(rule *Rule) AnnotationsRefSet {
+ var refs []*AnnotationsRef
+
+ ruleAnnots := as.GetRuleScope(rule)
+
+ if len(ruleAnnots) >= 1 {
+ for _, a := range ruleAnnots {
+ refs = append(refs, NewAnnotationsRef(a))
+ }
+ } else {
+ // Make sure there is always a leading entry representing the passed rule, even if it has no annotations
+ refs = append(refs, &AnnotationsRef{
+ Location: rule.Location,
+ Path: rule.Ref().GroundPrefix(),
+ node: rule,
+ })
+ }
+
+ if len(refs) > 1 {
+ // Sort by annotation location; chain must start with annotations declared closest to rule, then going outward
+ slices.SortStableFunc(refs, func(a, b *AnnotationsRef) int {
+ return -a.Annotations.Location.Compare(b.Annotations.Location)
+ })
+ }
+
+ docAnnots := as.GetDocumentScope(rule.Ref().GroundPrefix())
+ if docAnnots != nil {
+ refs = append(refs, NewAnnotationsRef(docAnnots))
+ }
+
+ pkg := rule.Module.Package
+ pkgAnnots := as.GetPackageScope(pkg)
+ if pkgAnnots != nil {
+ refs = append(refs, NewAnnotationsRef(pkgAnnots))
+ }
+
+ subPkgAnnots := as.GetSubpackagesScope(pkg.Path)
+ // We need to reverse the order, as subPkgAnnots ordering will start at the root,
+ // whereas we want to end at the root.
+ for i := len(subPkgAnnots) - 1; i >= 0; i-- {
+ refs = append(refs, NewAnnotationsRef(subPkgAnnots[i]))
+ }
+
+ return refs
+}
+
+func (ars FlatAnnotationsRefSet) Insert(ar *AnnotationsRef) FlatAnnotationsRefSet {
+ result := make(FlatAnnotationsRefSet, 0, len(ars)+1)
+
+ // insertion sort, first by path, then location
+ for i, current := range ars {
+ if ar.Compare(current) < 0 {
+ result = append(result, ar)
+ result = append(result, ars[i:]...)
+ break
+ }
+ result = append(result, current)
+ }
+
+ if len(result) < len(ars)+1 {
+ result = append(result, ar)
+ }
+
+ return result
+}
+
+func newAnnotationTree() *annotationTreeNode {
+ return &annotationTreeNode{
+ Value: nil,
+ Children: map[Value]*annotationTreeNode{},
+ }
+}
+
+func (t *annotationTreeNode) insert(path Ref, value *Annotations) {
+ node := t
+ for _, k := range path {
+ child, ok := node.Children[k.Value]
+ if !ok {
+ child = newAnnotationTree()
+ node.Children[k.Value] = child
+ }
+ node = child
+ }
+ node.Value = value
+}
+
+func (t *annotationTreeNode) get(path Ref) *annotationTreeNode {
+ node := t
+ for _, k := range path {
+ if node == nil {
+ return nil
+ }
+ child, ok := node.Children[k.Value]
+ if !ok {
+ return nil
+ }
+ node = child
+ }
+ return node
+}
+
+// ancestors returns a slice of annotations in ascending order, starting with the root of ref; e.g.: 'root', 'root.foo', 'root.foo.bar'.
+func (t *annotationTreeNode) ancestors(path Ref) (result []*Annotations) {
+ node := t
+ for _, k := range path {
+ if node == nil {
+ return result
+ }
+ child, ok := node.Children[k.Value]
+ if !ok {
+ return result
+ }
+ if child.Value != nil {
+ result = append(result, child.Value)
+ }
+ node = child
+ }
+ return result
+}
+
+func (t *annotationTreeNode) flatten(refs []*AnnotationsRef) []*AnnotationsRef {
+ if a := t.Value; a != nil {
+ refs = append(refs, NewAnnotationsRef(a))
+ }
+ for _, c := range t.Children {
+ refs = c.flatten(refs)
+ }
+ return refs
+}
+
+func (ar *AnnotationsRef) Compare(other *AnnotationsRef) int {
+ if c := ar.Path.Compare(other.Path); c != 0 {
+ return c
+ }
+
+ if c := ar.Annotations.Location.Compare(other.Annotations.Location); c != 0 {
+ return c
+ }
+
+ return ar.Annotations.Compare(other.Annotations)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go b/vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go
new file mode 100644
index 0000000000..3d72aeab1f
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go
@@ -0,0 +1,3637 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "strings"
+
+ "github.com/open-policy-agent/opa/v1/types"
+)
+
+// Builtins is the registry of built-in functions supported by OPA.
+// Call RegisterBuiltin to add a new built-in.
+var Builtins []*Builtin
+
+// RegisterBuiltin adds a new built-in function to the registry.
+// NOTE: The underlying map storing built-ins is **not** thread-safe,
+// and it's recommended to call this only during initialization, and never
+// later. Registering built-ins after that point is unsupported and will
+// likely lead to concurrent map read/write panics.
+func RegisterBuiltin(b *Builtin) {
+ Builtins = append(Builtins, b)
+ BuiltinMap[b.Name] = b
+ if len(b.Infix) > 0 {
+ BuiltinMap[b.Infix] = b
+
+ InternStringTerm(b.Infix)
+ }
+
+ InternStringTerm(b.Name)
+ if strings.Contains(b.Name, ".") {
+ InternStringTerm(strings.Split(b.Name, ".")...)
+ }
+}
+
+// DefaultBuiltins is the registry of built-in functions supported in OPA
+// by default. When adding a new built-in function to OPA, update this
+// list.
+var DefaultBuiltins = [...]*Builtin{
+ // Unification/equality ("=")
+ Equality,
+
+ // Assignment (":=")
+ Assign,
+
+ // Membership, infix "in": `x in xs`
+ Member,
+ MemberWithKey,
+
+ // Comparisons
+ GreaterThan,
+ GreaterThanEq,
+ LessThan,
+ LessThanEq,
+ NotEqual,
+ Equal,
+
+ // Arithmetic
+ Plus,
+ Minus,
+ Multiply,
+ Divide,
+ Ceil,
+ Floor,
+ Round,
+ Abs,
+ Rem,
+
+ // Bitwise Arithmetic
+ BitsOr,
+ BitsAnd,
+ BitsNegate,
+ BitsXOr,
+ BitsShiftLeft,
+ BitsShiftRight,
+
+ // Binary
+ And,
+ Or,
+
+ // Aggregates
+ Count,
+ Sum,
+ Product,
+ Max,
+ Min,
+ Any,
+ All,
+
+ // Arrays
+ ArrayConcat,
+ ArraySlice,
+ ArrayReverse,
+
+ // Conversions
+ ToNumber,
+
+ // Casts (DEPRECATED)
+ CastObject,
+ CastNull,
+ CastBoolean,
+ CastString,
+ CastSet,
+ CastArray,
+
+ // Regular Expressions
+ RegexIsValid,
+ RegexMatch,
+ RegexMatchDeprecated,
+ RegexSplit,
+ GlobsMatch,
+ RegexTemplateMatch,
+ RegexFind,
+ RegexFindAllStringSubmatch,
+ RegexReplace,
+
+ // Sets
+ SetDiff,
+ Intersection,
+ Union,
+
+ // Strings
+ AnyPrefixMatch,
+ AnySuffixMatch,
+ Concat,
+ FormatInt,
+ IndexOf,
+ IndexOfN,
+ Substring,
+ Lower,
+ Upper,
+ Contains,
+ StringCount,
+ StartsWith,
+ EndsWith,
+ Split,
+ Replace,
+ ReplaceN,
+ Trim,
+ TrimLeft,
+ TrimPrefix,
+ TrimRight,
+ TrimSuffix,
+ TrimSpace,
+ Sprintf,
+ StringReverse,
+ RenderTemplate,
+
+ // Numbers
+ NumbersRange,
+ NumbersRangeStep,
+ RandIntn,
+
+ // Encoding
+ JSONMarshal,
+ JSONMarshalWithOptions,
+ JSONUnmarshal,
+ JSONIsValid,
+ Base64Encode,
+ Base64Decode,
+ Base64IsValid,
+ Base64UrlEncode,
+ Base64UrlEncodeNoPad,
+ Base64UrlDecode,
+ URLQueryDecode,
+ URLQueryEncode,
+ URLQueryEncodeObject,
+ URLQueryDecodeObject,
+ YAMLMarshal,
+ YAMLUnmarshal,
+ YAMLIsValid,
+ HexEncode,
+ HexDecode,
+
+ // Object Manipulation
+ ObjectUnion,
+ ObjectUnionN,
+ ObjectRemove,
+ ObjectFilter,
+ ObjectGet,
+ ObjectKeys,
+ ObjectSubset,
+
+ // JSON Object Manipulation
+ JSONFilter,
+ JSONRemove,
+ JSONPatch,
+
+ // Tokens
+ JWTDecode,
+ JWTVerifyRS256,
+ JWTVerifyRS384,
+ JWTVerifyRS512,
+ JWTVerifyPS256,
+ JWTVerifyPS384,
+ JWTVerifyPS512,
+ JWTVerifyES256,
+ JWTVerifyES384,
+ JWTVerifyES512,
+ JWTVerifyEdDSA,
+ JWTVerifyHS256,
+ JWTVerifyHS384,
+ JWTVerifyHS512,
+ JWTDecodeVerify,
+ JWTEncodeSignRaw,
+ JWTEncodeSign,
+
+ // Time
+ NowNanos,
+ ParseNanos,
+ ParseRFC3339Nanos,
+ ParseDurationNanos,
+ Format,
+ Date,
+ Clock,
+ Weekday,
+ AddDate,
+ Diff,
+
+ // Crypto
+ CryptoX509ParseCertificates,
+ CryptoX509ParseAndVerifyCertificates,
+ CryptoX509ParseAndVerifyCertificatesWithOptions,
+ CryptoMd5,
+ CryptoSha1,
+ CryptoSha256,
+ CryptoX509ParseCertificateRequest,
+ CryptoX509ParseRSAPrivateKey,
+ CryptoX509ParseKeyPair,
+ CryptoParsePrivateKeys,
+ CryptoHmacMd5,
+ CryptoHmacSha1,
+ CryptoHmacSha256,
+ CryptoHmacSha512,
+ CryptoHmacEqual,
+
+ // Graphs
+ WalkBuiltin,
+ ReachableBuiltin,
+ ReachablePathsBuiltin,
+
+ // Sort
+ Sort,
+
+ // Types
+ IsNumber,
+ IsString,
+ IsBoolean,
+ IsArray,
+ IsSet,
+ IsObject,
+ IsNull,
+ TypeNameBuiltin,
+
+ // HTTP
+ HTTPSend,
+
+ // GraphQL
+ GraphQLParse,
+ GraphQLParseAndVerify,
+ GraphQLParseQuery,
+ GraphQLParseSchema,
+ GraphQLIsValid,
+ GraphQLSchemaIsValid,
+
+ // JSON Schema
+ JSONSchemaVerify,
+ JSONMatchSchema,
+
+ // Cloud Provider Helpers
+ ProvidersAWSSignReqObj,
+
+ // Rego
+ RegoParseModule,
+ RegoMetadataChain,
+ RegoMetadataRule,
+
+ // OPA
+ OPARuntime,
+
+ // Tracing
+ Trace,
+
+ // Networking
+ NetCIDROverlap,
+ NetCIDRIntersects,
+ NetCIDRContains,
+ NetCIDRContainsMatches,
+ NetCIDRExpand,
+ NetCIDRMerge,
+ NetLookupIPAddr,
+ NetCIDRIsValid,
+
+ // Glob
+ GlobMatch,
+ GlobQuoteMeta,
+
+ // Units
+ UnitsParse,
+ UnitsParseBytes,
+
+ // UUIDs
+ UUIDRFC4122,
+ UUIDParse,
+
+ // SemVers
+ SemVerIsValid,
+ SemVerCompare,
+
+ // Printing
+ Print,
+ InternalPrint,
+
+ // Testing
+ InternalTestCase,
+}
+
+// BuiltinMap provides a convenient mapping of built-in names to
+// built-in definitions.
+var BuiltinMap map[string]*Builtin
+
+// Deprecated: Builtins can now be directly annotated with the
+// Nondeterministic property, and when set to true, will be ignored
+// for partial evaluation.
+var IgnoreDuringPartialEval = []*Builtin{
+ RandIntn,
+ UUIDRFC4122,
+ JWTDecodeVerify,
+ JWTEncodeSignRaw,
+ JWTEncodeSign,
+ NowNanos,
+ HTTPSend,
+ OPARuntime,
+ NetLookupIPAddr,
+}
+
+/**
+ * Unification
+ */
+
+// Equality represents the "=" operator.
+var Equality = &Builtin{
+ Name: "eq",
+ Infix: "=",
+ Decl: types.NewFunction(
+ types.Args(types.A, types.A),
+ types.B,
+ ),
+ CanSkipBctx: true,
+}
+
+/**
+ * Assignment
+ */
+
+// Assign represents the assignment (":=") operator.
+var Assign = &Builtin{
+ Name: "assign",
+ Infix: ":=",
+ Decl: types.NewFunction(
+ types.Args(types.A, types.A),
+ types.B,
+ ),
+ CanSkipBctx: true,
+}
+
+// Member represents the `in` (infix) operator.
+var Member = &Builtin{
+ Name: "internal.member_2",
+ Infix: "in",
+ Decl: types.NewFunction(
+ types.Args(
+ types.A,
+ types.A,
+ ),
+ types.B,
+ ),
+ CanSkipBctx: true,
+}
+
+// MemberWithKey represents the `in` (infix) operator when used
+// with two terms on the lhs, i.e., `k, v in obj`.
+var MemberWithKey = &Builtin{
+ Name: "internal.member_3",
+ Infix: "in",
+ Decl: types.NewFunction(
+ types.Args(
+ types.A,
+ types.A,
+ types.A,
+ ),
+ types.B,
+ ),
+ CanSkipBctx: true,
+}
+
+/**
+ * Comparisons
+ */
+var comparison = category("comparison")
+
+var GreaterThan = &Builtin{
+ Name: "gt",
+ Infix: ">",
+ Categories: comparison,
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.A),
+ types.Named("y", types.A),
+ ),
+ types.Named("result", types.B).Description("true if `x` is greater than `y`; false otherwise"),
+ ),
+ CanSkipBctx: true,
+}
+
+var GreaterThanEq = &Builtin{
+ Name: "gte",
+ Infix: ">=",
+ Categories: comparison,
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.A),
+ types.Named("y", types.A),
+ ),
+ types.Named("result", types.B).Description("true if `x` is greater or equal to `y`; false otherwise"),
+ ),
+ CanSkipBctx: true,
+}
+
+// LessThan represents the "<" comparison operator.
+var LessThan = &Builtin{
+ Name: "lt",
+ Infix: "<",
+ Categories: comparison,
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.A),
+ types.Named("y", types.A),
+ ),
+ types.Named("result", types.B).Description("true if `x` is less than `y`; false otherwise"),
+ ),
+ CanSkipBctx: true,
+}
+
+var LessThanEq = &Builtin{
+ Name: "lte",
+ Infix: "<=",
+ Categories: comparison,
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.A),
+ types.Named("y", types.A),
+ ),
+ types.Named("result", types.B).Description("true if `x` is less than or equal to `y`; false otherwise"),
+ ),
+ CanSkipBctx: true,
+}
+
+var NotEqual = &Builtin{
+ Name: "neq",
+ Infix: "!=",
+ Categories: comparison,
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.A),
+ types.Named("y", types.A),
+ ),
+ types.Named("result", types.B).Description("true if `x` is not equal to `y`; false otherwise"),
+ ),
+ CanSkipBctx: true,
+}
+
+// Equal represents the "==" comparison operator.
+var Equal = &Builtin{
+ Name: "equal",
+ Infix: "==",
+ Categories: comparison,
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.A),
+ types.Named("y", types.A),
+ ),
+ types.Named("result", types.B).Description("true if `x` is equal to `y`; false otherwise"),
+ ),
+ CanSkipBctx: true,
+}
+
+/**
+ * Arithmetic
+ */
+var number = category("numbers")
+
+var Plus = &Builtin{
+ Name: "plus",
+ Infix: "+",
+ Description: "Plus adds two numbers together.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.N),
+ types.Named("y", types.N),
+ ),
+ types.Named("z", types.N).Description("the sum of `x` and `y`"),
+ ),
+ Categories: number,
+ CanSkipBctx: true,
+}
+
+var Minus = &Builtin{
+ Name: "minus",
+ Infix: "-",
+ Description: "Minus subtracts the second number from the first number or computes the difference between two sets.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.NewAny(types.N, types.SetOfAny)),
+ types.Named("y", types.NewAny(types.N, types.SetOfAny)),
+ ),
+ types.Named("z", types.NewAny(types.N, types.SetOfAny)).Description("the difference of `x` and `y`"),
+ ),
+ Categories: category("sets", "numbers"),
+ CanSkipBctx: true,
+}
+
+var Multiply = &Builtin{
+ Name: "mul",
+ Infix: "*",
+ Description: "Multiplies two numbers.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.N),
+ types.Named("y", types.N),
+ ),
+ types.Named("z", types.N).Description("the product of `x` and `y`"),
+ ),
+ Categories: number,
+ CanSkipBctx: true,
+}
+
+var Divide = &Builtin{
+ Name: "div",
+ Infix: "/",
+ Description: "Divides the first number by the second number.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.N).Description("the dividend"),
+ types.Named("y", types.N).Description("the divisor"),
+ ),
+ types.Named("z", types.N).Description("the result of `x` divided by `y`"),
+ ),
+ Categories: number,
+ CanSkipBctx: true,
+}
+
+var Round = &Builtin{
+ Name: "round",
+ Description: "Rounds the number to the nearest integer.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.N).Description("the number to round"),
+ ),
+ types.Named("y", types.N).Description("the result of rounding `x`"),
+ ),
+ Categories: number,
+ CanSkipBctx: true,
+}
+
+var Ceil = &Builtin{
+ Name: "ceil",
+ Description: "Rounds the number _up_ to the nearest integer.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.N).Description("the number to round"),
+ ),
+ types.Named("y", types.N).Description("the result of rounding `x` _up_"),
+ ),
+ Categories: number,
+ CanSkipBctx: true,
+}
+
+var Floor = &Builtin{
+ Name: "floor",
+ Description: "Rounds the number _down_ to the nearest integer.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.N).Description("the number to round"),
+ ),
+ types.Named("y", types.N).Description("the result of rounding `x` _down_"),
+ ),
+ Categories: number,
+ CanSkipBctx: true,
+}
+
+var Abs = &Builtin{
+ Name: "abs",
+ Description: "Returns the number without its sign.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.N).Description("the number to take the absolute value of"),
+ ),
+ types.Named("y", types.N).Description("the absolute value of `x`"),
+ ),
+ Categories: number,
+ CanSkipBctx: true,
+}
+
+var Rem = &Builtin{
+ Name: "rem",
+ Infix: "%",
+ Description: "Returns the remainder for of `x` divided by `y`, for `y != 0`.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.N),
+ types.Named("y", types.N),
+ ),
+ types.Named("z", types.N).Description("the remainder"),
+ ),
+ Categories: number,
+ CanSkipBctx: true,
+}
+
+/**
+ * Bitwise
+ */
+
+var BitsOr = &Builtin{
+ Name: "bits.or",
+ Description: "Returns the bitwise \"OR\" of two integers.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.N).Description("the first integer"),
+ types.Named("y", types.N).Description("the second integer"),
+ ),
+ types.Named("z", types.N).Description("the bitwise OR of `x` and `y`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var BitsAnd = &Builtin{
+ Name: "bits.and",
+ Description: "Returns the bitwise \"AND\" of two integers.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.N).Description("the first integer"),
+ types.Named("y", types.N).Description("the second integer"),
+ ),
+ types.Named("z", types.N).Description("the bitwise AND of `x` and `y`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var BitsNegate = &Builtin{
+ Name: "bits.negate",
+ Description: "Returns the bitwise negation (flip) of an integer.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.N).Description("the integer to negate"),
+ ),
+ types.Named("z", types.N).Description("the bitwise negation of `x`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var BitsXOr = &Builtin{
+ Name: "bits.xor",
+ Description: "Returns the bitwise \"XOR\" (exclusive-or) of two integers.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.N).Description("the first integer"),
+ types.Named("y", types.N).Description("the second integer"),
+ ),
+ types.Named("z", types.N).Description("the bitwise XOR of `x` and `y`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var BitsShiftLeft = &Builtin{
+ Name: "bits.lsh",
+ Description: "Returns a new integer with its bits shifted `s` bits to the left.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.N).Description("the integer to shift"),
+ types.Named("s", types.N).Description("the number of bits to shift"),
+ ),
+ types.Named("z", types.N).Description("the result of shifting `x` `s` bits to the left"),
+ ),
+ CanSkipBctx: true,
+}
+
+var BitsShiftRight = &Builtin{
+ Name: "bits.rsh",
+ Description: "Returns a new integer with its bits shifted `s` bits to the right.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.N).Description("the integer to shift"),
+ types.Named("s", types.N).Description("the number of bits to shift"),
+ ),
+ types.Named("z", types.N).Description("the result of shifting `x` `s` bits to the right"),
+ ),
+ CanSkipBctx: true,
+}
+
+/**
+ * Sets
+ */
+
+var sets = category("sets")
+
+var And = &Builtin{
+ Name: "and",
+ Infix: "&",
+ Description: "Returns the intersection of two sets.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.SetOfAny).Description("the first set"),
+ types.Named("y", types.SetOfAny).Description("the second set"),
+ ),
+ types.Named("z", types.SetOfAny).Description("the intersection of `x` and `y`"),
+ ),
+ Categories: sets,
+ CanSkipBctx: true,
+}
+
+// Or performs a union operation on sets.
+var Or = &Builtin{
+ Name: "or",
+ Infix: "|",
+ Description: "Returns the union of two sets.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.SetOfAny),
+ types.Named("y", types.SetOfAny),
+ ),
+ types.Named("z", types.SetOfAny).Description("the union of `x` and `y`"),
+ ),
+ Categories: sets,
+ CanSkipBctx: true,
+}
+
+var Intersection = &Builtin{
+ Name: "intersection",
+ Description: "Returns the intersection of the given input sets.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("xs", types.NewSet(types.SetOfAny)).Description("set of sets to intersect"),
+ ),
+ types.Named("y", types.SetOfAny).Description("the intersection of all `xs` sets"),
+ ),
+ Categories: sets,
+ CanSkipBctx: true,
+}
+
+var Union = &Builtin{
+ Name: "union",
+ Description: "Returns the union of the given input sets.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("xs", types.NewSet(types.SetOfAny)).Description("set of sets to merge"),
+ ),
+ types.Named("y", types.SetOfAny).Description("the union of all `xs` sets"),
+ ),
+ Categories: sets,
+ CanSkipBctx: true,
+}
+
+/**
+ * Aggregates
+ */
+
+var aggregates = category("aggregates")
+
+var Count = &Builtin{
+ Name: "count",
+ Description: "Count takes a collection or string and returns the number of elements (or characters) in it.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("collection", types.NewAny(
+ types.SetOfAny,
+ types.NewArray(nil, types.A),
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ types.S,
+ )).Description("the set/array/object/string to be counted"),
+ ),
+ types.Named("n", types.N).Description("the count of elements, key/val pairs, or characters, respectively."),
+ ),
+ Categories: aggregates,
+ CanSkipBctx: true,
+}
+
+var Sum = &Builtin{
+ Name: "sum",
+ Description: "Sums elements of an array or set of numbers.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("collection", types.NewAny(
+ types.SetOfNum,
+ types.NewArray(nil, types.N),
+ )).Description("the set or array of numbers to sum"),
+ ),
+ types.Named("n", types.N).Description("the sum of all elements"),
+ ),
+ Categories: aggregates,
+ CanSkipBctx: true,
+}
+
+var Product = &Builtin{
+ Name: "product",
+ Description: "Multiplies elements of an array or set of numbers",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("collection", types.NewAny(
+ types.SetOfNum,
+ types.NewArray(nil, types.N),
+ )).Description("the set or array of numbers to multiply"),
+ ),
+ types.Named("n", types.N).Description("the product of all elements"),
+ ),
+ Categories: aggregates,
+ CanSkipBctx: true,
+}
+
+var Max = &Builtin{
+ Name: "max",
+ Description: "Returns the maximum value in a collection.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("collection", types.NewAny(
+ types.SetOfAny,
+ types.NewArray(nil, types.A),
+ )).Description("the set or array to be searched"),
+ ),
+ types.Named("n", types.A).Description("the maximum of all elements"),
+ ),
+ Categories: aggregates,
+ CanSkipBctx: true,
+}
+
+var Min = &Builtin{
+ Name: "min",
+ Description: "Returns the minimum value in a collection.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("collection", types.NewAny(
+ types.SetOfAny,
+ types.NewArray(nil, types.A),
+ )).Description("the set or array to be searched"),
+ ),
+ types.Named("n", types.A).Description("the minimum of all elements"),
+ ),
+ Categories: aggregates,
+ CanSkipBctx: true,
+}
+
+/**
+ * Sorting
+ */
+
+var Sort = &Builtin{
+ Name: "sort",
+ Description: "Returns a sorted array.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("collection", types.NewAny(
+ types.NewArray(nil, types.A),
+ types.SetOfAny,
+ )).Description("the array or set to be sorted"),
+ ),
+ types.Named("n", types.NewArray(nil, types.A)).Description("the sorted array"),
+ ),
+ Categories: aggregates,
+ CanSkipBctx: true,
+}
+
+/**
+ * Arrays
+ */
+
+var ArrayConcat = &Builtin{
+ Name: "array.concat",
+ Description: "Concatenates two arrays.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.NewArray(nil, types.A)).Description("the first array"),
+ types.Named("y", types.NewArray(nil, types.A)).Description("the second array"),
+ ),
+ types.Named("z", types.NewArray(nil, types.A)).Description("the concatenation of `x` and `y`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var ArraySlice = &Builtin{
+ Name: "array.slice",
+ Description: "Returns a slice of a given array. If `start` is greater or equal than `stop`, `slice` is `[]`.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be sliced"),
+ types.Named("start", types.N).Description("the start index of the returned slice; if less than zero, it's clamped to 0"),
+ types.Named("stop", types.N).Description("the stop index of the returned slice; if larger than `count(arr)`, it's clamped to `count(arr)`"),
+ ),
+ types.Named("slice", types.NewArray(nil, types.A)).Description("the subslice of `array`, from `start` to `end`, including `arr[start]`, but excluding `arr[end]`"),
+ ),
+ CanSkipBctx: true,
+} // NOTE(sr): this function really needs examples
+
+var ArrayReverse = &Builtin{
+ Name: "array.reverse",
+ Description: "Returns the reverse of a given array.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be reversed"),
+ ),
+ types.Named("rev", types.NewArray(nil, types.A)).Description("an array containing the elements of `arr` in reverse order"),
+ ),
+ CanSkipBctx: true,
+}
+
+/**
+ * Conversions
+ */
+var conversions = category("conversions")
+
+var ToNumber = &Builtin{
+ Name: "to_number",
+ Description: "Converts a string, bool, or number value to a number: Strings are converted to numbers using `strconv.Atoi`, Boolean `false` is converted to 0 and `true` is converted to 1.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.NewAny(
+ types.N,
+ types.S,
+ types.B,
+ types.Nl,
+ )).Description("value to convert"),
+ ),
+ types.Named("num", types.N).Description("the numeric representation of `x`"),
+ ),
+ Categories: conversions,
+ CanSkipBctx: true,
+}
+
+/**
+ * Regular Expressions
+ */
+
+var RegexMatch = &Builtin{
+ Name: "regex.match",
+ Description: "Matches a string against a regular expression.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("pattern", types.S).Description("regular expression"),
+ types.Named("value", types.S).Description("value to match against `pattern`"),
+ ),
+ types.Named("result", types.B).Description("true if `value` matches `pattern`"),
+ ),
+}
+
+var RegexIsValid = &Builtin{
+ Name: "regex.is_valid",
+ Description: "Checks if a string is a valid regular expression: the detailed syntax for patterns is defined by https://github.com/google/re2/wiki/Syntax.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("pattern", types.S).Description("regular expression"),
+ ),
+ types.Named("result", types.B).Description("true if `pattern` is a valid regular expression"),
+ ),
+ CanSkipBctx: true,
+}
+
+var RegexFindAllStringSubmatch = &Builtin{
+ Name: "regex.find_all_string_submatch_n",
+ Description: "Returns all successive matches of the expression.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("pattern", types.S).Description("regular expression"),
+ types.Named("value", types.S).Description("string to match"),
+ types.Named("number", types.N).Description("number of matches to return; `-1` means all matches"),
+ ),
+ types.Named("output", types.NewArray(nil, types.NewArray(nil, types.S))).Description("array of all matches"),
+ ),
+ CanSkipBctx: false,
+}
+
+var RegexTemplateMatch = &Builtin{
+ Name: "regex.template_match",
+ Description: "Matches a string against a pattern, where there pattern may be glob-like",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("template", types.S).Description("template expression containing `0..n` regular expressions"),
+ types.Named("value", types.S).Description("string to match"),
+ types.Named("delimiter_start", types.S).Description("start delimiter of the regular expression in `template`"),
+ types.Named("delimiter_end", types.S).Description("end delimiter of the regular expression in `template`"),
+ ),
+ types.Named("result", types.B).Description("true if `value` matches the `template`"),
+ ),
+ CanSkipBctx: true,
+} // TODO(sr): example:`regex.template_match("urn:foo:{.*}", "urn:foo:bar:baz", "{", "}")`` returns ``true``.
+
+var RegexSplit = &Builtin{
+ Name: "regex.split",
+ Description: "Splits the input string by the occurrences of the given pattern.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("pattern", types.S).Description("regular expression"),
+ types.Named("value", types.S).Description("string to match"),
+ ),
+ types.Named("output", types.NewArray(nil, types.S)).Description("the parts obtained by splitting `value`"),
+ ),
+ CanSkipBctx: false,
+}
+
+// RegexFind takes two strings and a number, the pattern, the value and number of match values to
+// return, -1 means all match values.
+var RegexFind = &Builtin{
+ Name: "regex.find_n",
+ Description: "Returns the specified number of matches when matching the input against the pattern.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("pattern", types.S).Description("regular expression"),
+ types.Named("value", types.S).Description("string to match"),
+ types.Named("number", types.N).Description("number of matches to return, if `-1`, returns all matches"),
+ ),
+ types.Named("output", types.NewArray(nil, types.S)).Description("collected matches"),
+ ),
+ CanSkipBctx: false,
+}
+
+// GlobsMatch takes two strings regexp-style strings and evaluates to true if their
+// intersection matches a non-empty set of non-empty strings.
+// Examples:
+// - "a.a." and ".b.b" -> true.
+// - "[a-z]*" and [0-9]+" -> not true.
+var GlobsMatch = &Builtin{
+ Name: "regex.globs_match",
+ Description: `Checks if the intersection of two glob-style regular expressions matches a non-empty set of non-empty strings.
+The set of regex symbols is limited for this builtin: only ` + "`.`, `*`, `+`, `[`, `-`, `]` and `\\` are treated as special symbols.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("glob1", types.S).Description("first glob-style regular expression"),
+ types.Named("glob2", types.S).Description("second glob-style regular expression"),
+ ),
+ types.Named("result", types.B).Description("true if the intersection of `glob1` and `glob2` matches a non-empty set of non-empty strings"),
+ ),
+ CanSkipBctx: true,
+}
+
+/**
+ * Strings
+ */
+var stringsCat = category("strings")
+
+var AnyPrefixMatch = &Builtin{
+ Name: "strings.any_prefix_match",
+ Description: "Returns true if any of the search strings begins with any of the base strings.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("search", types.NewAny(
+ types.S,
+ types.SetOfStr,
+ types.NewArray(nil, types.S),
+ )).Description("search string(s)"),
+ types.Named("base", types.NewAny(
+ types.S,
+ types.SetOfStr,
+ types.NewArray(nil, types.S),
+ )).Description("base string(s)"),
+ ),
+ types.Named("result", types.B).Description("result of the prefix check"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var AnySuffixMatch = &Builtin{
+ Name: "strings.any_suffix_match",
+ Description: "Returns true if any of the search strings ends with any of the base strings.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("search", types.NewAny(
+ types.S,
+ types.SetOfStr,
+ types.NewArray(nil, types.S),
+ )).Description("search string(s)"),
+ types.Named("base", types.NewAny(
+ types.S,
+ types.SetOfStr,
+ types.NewArray(nil, types.S),
+ )).Description("base string(s)"),
+ ),
+ types.Named("result", types.B).Description("result of the suffix check"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var Concat = &Builtin{
+ Name: "concat",
+ Description: "Joins a set or array of strings with a delimiter.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("delimiter", types.S).Description("string to use as a delimiter"),
+ types.Named("collection", types.NewAny(
+ types.SetOfStr,
+ types.NewArray(nil, types.S),
+ )).Description("strings to join"),
+ ),
+ types.Named("output", types.S).Description("the joined string"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var FormatInt = &Builtin{
+ Name: "format_int",
+ Description: "Returns the string representation of the number in the given base after rounding it down to an integer value.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("number", types.N).Description("number to format"),
+ types.Named("base", types.N).Description("base of number representation to use"),
+ ),
+ types.Named("output", types.S).Description("formatted number"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var IndexOf = &Builtin{
+ Name: "indexof",
+ Description: "Returns the index of a substring contained inside a string.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("haystack", types.S).Description("string to search in"),
+ types.Named("needle", types.S).Description("substring to look for"),
+ ),
+ types.Named("output", types.N).Description("index of first occurrence, `-1` if not found"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var IndexOfN = &Builtin{
+ Name: "indexof_n",
+ Description: "Returns a list of all the indexes of a substring contained inside a string.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("haystack", types.S).Description("string to search in"),
+ types.Named("needle", types.S).Description("substring to look for"),
+ ),
+ types.Named("output", types.NewArray(nil, types.N)).Description("all indices at which `needle` occurs in `haystack`, may be empty"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var Substring = &Builtin{
+ Name: "substring",
+ Description: "Returns the portion of a string for a given `offset` and a `length`. If `length < 0`, `output` is the remainder of the string.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("value", types.S).Description("string to extract substring from"),
+ types.Named("offset", types.N).Description("offset, must be positive"),
+ types.Named("length", types.N).Description("length of the substring starting from `offset`"),
+ ),
+ types.Named("output", types.S).Description("substring of `value` from `offset`, of length `length`"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var Contains = &Builtin{
+ Name: "contains",
+ Description: "Returns `true` if the search string is included in the base string",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("haystack", types.S).Description("string to search in"),
+ types.Named("needle", types.S).Description("substring to look for"),
+ ),
+ types.Named("result", types.B).Description("result of the containment check"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var StringCount = &Builtin{
+ Name: "strings.count",
+ Description: "Returns the number of non-overlapping instances of a substring in a string.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("search", types.S).Description("string to search in"),
+ types.Named("substring", types.S).Description("substring to look for"),
+ ),
+ types.Named("output", types.N).Description("count of occurrences, `0` if not found"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var StartsWith = &Builtin{
+ Name: "startswith",
+ Description: "Returns true if the search string begins with the base string.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("search", types.S).Description("search string"),
+ types.Named("base", types.S).Description("base string"),
+ ),
+ types.Named("result", types.B).Description("result of the prefix check"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var EndsWith = &Builtin{
+ Name: "endswith",
+ Description: "Returns true if the search string ends with the base string.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("search", types.S).Description("search string"),
+ types.Named("base", types.S).Description("base string"),
+ ),
+ types.Named("result", types.B).Description("result of the suffix check"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var Lower = &Builtin{
+ Name: "lower",
+ Description: "Returns the input string but with all characters in lower-case.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("string that is converted to lower-case"),
+ ),
+ types.Named("y", types.S).Description("lower-case of x"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var Upper = &Builtin{
+ Name: "upper",
+ Description: "Returns the input string but with all characters in upper-case.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("string that is converted to upper-case"),
+ ),
+ types.Named("y", types.S).Description("upper-case of x"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var Split = &Builtin{
+ Name: "split",
+ Description: "Split returns an array containing elements of the input string split on a delimiter.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("string that is split"),
+ types.Named("delimiter", types.S).Description("delimiter used for splitting"),
+ ),
+ types.Named("ys", types.NewArray(nil, types.S)).Description("split parts"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var Replace = &Builtin{
+ Name: "replace",
+ Description: "Replace replaces all instances of a sub-string.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("string being processed"),
+ types.Named("old", types.S).Description("substring to replace"),
+ types.Named("new", types.S).Description("string to replace `old` with"),
+ ),
+ types.Named("y", types.S).Description("string with replaced substrings"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var ReplaceN = &Builtin{
+ Name: "strings.replace_n",
+ Description: `Replaces a string from a list of old, new string pairs.
+Replacements are performed in the order they appear in the target string, without overlapping matches.
+The old string comparisons are done in argument order.`,
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("patterns", types.NewObject(
+ nil,
+ types.NewDynamicProperty(
+ types.S,
+ types.S)),
+ ).Description("replacement pairs"),
+ types.Named("value", types.S).Description("string to replace substring matches in"),
+ ),
+ types.Named("output", types.S).Description("string with replaced substrings"),
+ ),
+ CanSkipBctx: true,
+}
+
+var RegexReplace = &Builtin{
+ Name: "regex.replace",
+ Description: `Find and replaces the text using the regular expression pattern.`,
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("s", types.S).Description("string being processed"),
+ types.Named("pattern", types.S).Description("regex pattern to be applied"),
+ types.Named("value", types.S).Description("regex value"),
+ ),
+ types.Named("output", types.S).Description("string with replaced substrings"),
+ ),
+ CanSkipBctx: false,
+}
+
+var Trim = &Builtin{
+ Name: "trim",
+ Description: "Returns `value` with all leading or trailing instances of the `cutset` characters removed.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("value", types.S).Description("string to trim"),
+ types.Named("cutset", types.S).Description("string of characters that are cut off"),
+ ),
+ types.Named("output", types.S).Description("string trimmed of `cutset` characters"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var TrimLeft = &Builtin{
+ Name: "trim_left",
+ Description: "Returns `value` with all leading instances of the `cutset` characters removed.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("value", types.S).Description("string to trim"),
+ types.Named("cutset", types.S).Description("string of characters that are cut off on the left"),
+ ),
+ types.Named("output", types.S).Description("string left-trimmed of `cutset` characters"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var TrimPrefix = &Builtin{
+ Name: "trim_prefix",
+ Description: "Returns `value` without the prefix. If `value` doesn't start with `prefix`, it is returned unchanged.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("value", types.S).Description("string to trim"),
+ types.Named("prefix", types.S).Description("prefix to cut off"),
+ ),
+ types.Named("output", types.S).Description("string with `prefix` cut off"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var TrimRight = &Builtin{
+ Name: "trim_right",
+ Description: "Returns `value` with all trailing instances of the `cutset` characters removed.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("value", types.S).Description("string to trim"),
+ types.Named("cutset", types.S).Description("string of characters that are cut off on the right"),
+ ),
+ types.Named("output", types.S).Description("string right-trimmed of `cutset` characters"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var TrimSuffix = &Builtin{
+ Name: "trim_suffix",
+ Description: "Returns `value` without the suffix. If `value` doesn't end with `suffix`, it is returned unchanged.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("value", types.S).Description("string to trim"),
+ types.Named("suffix", types.S).Description("suffix to cut off"),
+ ),
+ types.Named("output", types.S).Description("string with `suffix` cut off"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var TrimSpace = &Builtin{
+ Name: "trim_space",
+ Description: "Return the given string with all leading and trailing white space removed.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("value", types.S).Description("string to trim"),
+ ),
+ types.Named("output", types.S).Description("string leading and trailing white space cut off"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var Sprintf = &Builtin{
+ Name: "sprintf",
+ Description: "Returns the given string, formatted.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("format", types.S).Description("string with formatting verbs"),
+ types.Named("values", types.NewArray(nil, types.A)).Description("arguments to format into formatting verbs"),
+ ),
+ types.Named("output", types.S).Description("`format` formatted by the values in `values`"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var StringReverse = &Builtin{
+ Name: "strings.reverse",
+ Description: "Reverses a given string.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("string to reverse"),
+ ),
+ types.Named("y", types.S).Description("reversed string"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+var RenderTemplate = &Builtin{
+ Name: "strings.render_template",
+ Description: `Renders a templated string with given template variables injected. For a given templated string and key/value mapping, values will be injected into the template where they are referenced by key.
+ For examples of templating syntax, see https://pkg.go.dev/text/template`,
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("value", types.S).Description("a templated string"),
+ types.Named("vars", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("a mapping of template variable keys to values"),
+ ),
+ types.Named("result", types.S).Description("rendered template with template variables injected"),
+ ),
+ Categories: stringsCat,
+ CanSkipBctx: true,
+}
+
+/**
+ * Numbers
+ */
+
+// RandIntn returns a random number 0 - n
+// Marked non-deterministic because it relies on RNG internally.
+var RandIntn = &Builtin{
+ Name: "rand.intn",
+ Description: "Returns a random integer between `0` and `n` (`n` exclusive). If `n` is `0`, then `y` is always `0`. For any given argument pair (`str`, `n`), the output will be consistent throughout a query evaluation.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("str", types.S).Description("seed string for the random number"),
+ types.Named("n", types.N).Description("upper bound of the random number (exclusive)"),
+ ),
+ types.Named("y", types.N).Description("random integer in the range `[0, abs(n))`"),
+ ),
+ Categories: number,
+ Nondeterministic: true,
+ CanSkipBctx: false,
+}
+
+var NumbersRange = &Builtin{
+ Name: "numbers.range",
+ Description: "Returns an array of numbers in the given (inclusive) range. If `a==b`, then `range == [a]`; if `a > b`, then `range` is in descending order.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("a", types.N).Description("the start of the range"),
+ types.Named("b", types.N).Description("the end of the range (inclusive)"),
+ ),
+ types.Named("range", types.NewArray(nil, types.N)).Description("the range between `a` and `b`"),
+ ),
+ CanSkipBctx: false, // needed for context timeout check
+}
+
+var NumbersRangeStep = &Builtin{
+ Name: "numbers.range_step",
+ Description: `Returns an array of numbers in the given (inclusive) range incremented by a positive step.
+ If "a==b", then "range == [a]"; if "a > b", then "range" is in descending order.
+ If the provided "step" is less then 1, an error will be thrown.
+ If "b" is not in the range of the provided "step", "b" won't be included in the result.
+ `,
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("a", types.N).Description("the start of the range"),
+ types.Named("b", types.N).Description("the end of the range (inclusive)"),
+ types.Named("step", types.N).Description("the step between numbers in the range"),
+ ),
+ types.Named("range", types.NewArray(nil, types.N)).Description("the range between `a` and `b` in `step` increments"),
+ ),
+ CanSkipBctx: false, // needed for context timeout check
+}
+
+/**
+ * Units
+ */
+
+var UnitsParse = &Builtin{
+ Name: "units.parse",
+ Description: `Converts strings like "10G", "5K", "4M", "1500m", and the like into a number.
+This number can be a non-integer, such as 1.5, 0.22, etc. Scientific notation is supported,
+allowing values such as "1e-3K" (1) or "2.5e6M" (2.5 million M).
+
+Supports standard metric decimal and binary SI units (e.g., K, Ki, M, Mi, G, Gi, etc.) where
+m, K, M, G, T, P, and E are treated as decimal units and Ki, Mi, Gi, Ti, Pi, and Ei are treated as
+binary units.
+
+Note that 'm' and 'M' are case-sensitive to allow distinguishing between "milli" and "mega" units
+respectively. Other units are case-insensitive.`,
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("the unit to parse"),
+ ),
+ types.Named("y", types.N).Description("the parsed number"),
+ ),
+ CanSkipBctx: true,
+}
+
+var UnitsParseBytes = &Builtin{
+ Name: "units.parse_bytes",
+ Description: `Converts strings like "10GB", "5K", "4mb", or "1e6KB" into an integer number of bytes.
+
+Supports standard byte units (e.g., KB, KiB, etc.) where KB, MB, GB, and TB are treated as decimal
+units, and KiB, MiB, GiB, and TiB are treated as binary units. Scientific notation is supported,
+enabling values like "1.5e3MB" (1500MB) or "2e6GiB" (2 million GiB).
+
+The bytes symbol (b/B) in the unit is optional; omitting it will yield the same result (e.g., "Mi"
+and "MiB" are equivalent).`,
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("the byte unit to parse"),
+ ),
+ types.Named("y", types.N).Description("the parsed number"),
+ ),
+ CanSkipBctx: true,
+}
+
+//
+/**
+ * Type
+ */
+
+// UUIDRFC4122 returns a version 4 UUID string.
+// Marked non-deterministic because it relies on RNG internally.
+var UUIDRFC4122 = &Builtin{
+ Name: "uuid.rfc4122",
+ Description: "Returns a new UUIDv4.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("k", types.S).Description("seed string"),
+ ),
+ types.Named("output", types.S).Description("a version 4 UUID; for any given `k`, the output will be consistent throughout a query evaluation"),
+ ),
+ Nondeterministic: true,
+ CanSkipBctx: false,
+}
+
+var UUIDParse = &Builtin{
+ Name: "uuid.parse",
+ Description: "Parses the string value as an UUID and returns an object with the well-defined fields of the UUID if valid.",
+ Categories: nil,
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("uuid", types.S).Description("UUID string to parse"),
+ ),
+ types.Named("result", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("Properties of UUID if valid (version, variant, etc). Undefined otherwise."),
+ ),
+ Relation: false,
+ CanSkipBctx: true,
+}
+
+/**
+ * JSON
+ */
+
+var objectCat = category("object")
+
+var JSONFilter = &Builtin{
+ Name: "json.filter",
+ Description: "Filters the object. " +
+ "For example: `json.filter({\"a\": {\"b\": \"x\", \"c\": \"y\"}}, [\"a/b\"])` will result in `{\"a\": {\"b\": \"x\"}}`). " +
+ "Paths are not filtered in-order and are deduplicated before being evaluated.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("object", types.NewObject(
+ nil,
+ types.NewDynamicProperty(types.A, types.A),
+ )).Description("object to filter"),
+ types.Named("paths", types.NewAny(
+ types.NewArray(
+ nil,
+ types.NewAny(
+ types.S,
+ types.NewArray(
+ nil,
+ types.A,
+ ),
+ ),
+ ),
+ types.NewSet(
+ types.NewAny(
+ types.S,
+ types.NewArray(
+ nil,
+ types.A,
+ ),
+ ),
+ ),
+ )).Description("JSON string paths"),
+ ),
+ types.Named("filtered", types.A).Description("remaining data from `object` with only keys specified in `paths`"),
+ ),
+ Categories: objectCat,
+ CanSkipBctx: true,
+}
+
+var JSONRemove = &Builtin{
+ Name: "json.remove",
+ Description: "Removes paths from an object. " +
+ "For example: `json.remove({\"a\": {\"b\": \"x\", \"c\": \"y\"}}, [\"a/b\"])` will result in `{\"a\": {\"c\": \"y\"}}`. " +
+ "Paths are not removed in-order and are deduplicated before being evaluated.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("object", types.NewObject(
+ nil,
+ types.NewDynamicProperty(types.A, types.A),
+ )).Description("object to remove paths from"),
+ types.Named("paths", types.NewAny(
+ types.NewArray(
+ nil,
+ types.NewAny(
+ types.S,
+ types.NewArray(
+ nil,
+ types.A,
+ ),
+ ),
+ ),
+ types.NewSet(
+ types.NewAny(
+ types.S,
+ types.NewArray(
+ nil,
+ types.A,
+ ),
+ ),
+ ),
+ )).Description("JSON string paths"),
+ ),
+ types.Named("output", types.A).Description("result of removing all keys specified in `paths`"),
+ ),
+ Categories: objectCat,
+ CanSkipBctx: true,
+}
+
+var JSONPatch = &Builtin{
+ Name: "json.patch",
+ Description: "Patches an object according to RFC6902. " +
+ "For example: `json.patch({\"a\": {\"foo\": 1}}, [{\"op\": \"add\", \"path\": \"/a/bar\", \"value\": 2}])` results in `{\"a\": {\"foo\": 1, \"bar\": 2}`. " +
+ "The patches are applied atomically: if any of them fails, the result will be undefined. " +
+ "Additionally works on sets, where a value contained in the set is considered to be its path.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("object", types.A).Description("the object to patch"), // TODO(sr): types.A?
+ types.Named("patches", types.NewArray(
+ nil,
+ types.NewObject(
+ []*types.StaticProperty{
+ {Key: "op", Value: types.S},
+ {Key: "path", Value: types.A},
+ },
+ types.NewDynamicProperty(types.A, types.A),
+ ),
+ )).Description("the JSON patches to apply"),
+ ),
+ types.Named("output", types.A).Description("result obtained after consecutively applying all patch operations in `patches`"),
+ ),
+ Categories: objectCat,
+ CanSkipBctx: true,
+}
+
+var ObjectSubset = &Builtin{
+ Name: "object.subset",
+ Description: "Determines if an object `sub` is a subset of another object `super`." +
+ "Object `sub` is a subset of object `super` if and only if every key in `sub` is also in `super`, " +
+ "**and** for all keys which `sub` and `super` share, they have the same value. " +
+ "This function works with objects, sets, arrays and a set of array and set." +
+ "If both arguments are objects, then the operation is recursive, e.g. " +
+ "`{\"c\": {\"x\": {10, 15, 20}}` is a subset of `{\"a\": \"b\", \"c\": {\"x\": {10, 15, 20, 25}, \"y\": \"z\"}`. " +
+ "If both arguments are sets, then this function checks if every element of `sub` is a member of `super`, " +
+ "but does not attempt to recurse. If both arguments are arrays, " +
+ "then this function checks if `sub` appears contiguously in order within `super`, " +
+ "and also does not attempt to recurse. If `super` is array and `sub` is set, " +
+ "then this function checks if `super` contains every element of `sub` with no consideration of ordering, " +
+ "and also does not attempt to recurse.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("super", types.NewAny(types.NewObject(
+ nil,
+ types.NewDynamicProperty(types.A, types.A),
+ ), types.SetOfAny,
+ types.NewArray(nil, types.A),
+ )).Description("object to test if sub is a subset of"),
+ types.Named("sub", types.NewAny(types.NewObject(
+ nil,
+ types.NewDynamicProperty(types.A, types.A),
+ ), types.SetOfAny,
+ types.NewArray(nil, types.A),
+ )).Description("object to test if super is a superset of"),
+ ),
+ types.Named("result", types.A).Description("`true` if `sub` is a subset of `super`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var ObjectUnion = &Builtin{
+ Name: "object.union",
+ Description: "Creates a new object of the asymmetric union of two objects. " +
+ "For example: `object.union({\"a\": 1, \"b\": 2, \"c\": {\"d\": 3}}, {\"a\": 7, \"c\": {\"d\": 4, \"e\": 5}})` will result in `{\"a\": 7, \"b\": 2, \"c\": {\"d\": 4, \"e\": 5}}`.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("a", types.NewObject(
+ nil,
+ types.NewDynamicProperty(types.A, types.A),
+ )).Description("left-hand object"),
+ types.Named("b", types.NewObject(
+ nil,
+ types.NewDynamicProperty(types.A, types.A),
+ )).Description("right-hand object"),
+ ),
+ types.Named("output", types.A).Description("a new object which is the result of an asymmetric recursive union of two objects where conflicts are resolved by choosing the key from the right-hand object `b`"),
+ ), // TODO(sr): types.A? ^^^^^^^ (also below)
+ CanSkipBctx: true,
+}
+
+var ObjectUnionN = &Builtin{
+ Name: "object.union_n",
+ Description: "Creates a new object that is the asymmetric union of all objects merged from left to right. " +
+ "For example: `object.union_n([{\"a\": 1}, {\"b\": 2}, {\"a\": 3}])` will result in `{\"b\": 2, \"a\": 3}`.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("objects", types.NewArray(
+ nil,
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ )).Description("list of objects to merge"),
+ ),
+ types.Named("output", types.A).Description("asymmetric recursive union of all objects in `objects`, merged from left to right, where conflicts are resolved by choosing the key from the right-hand object"),
+ ),
+ CanSkipBctx: true,
+}
+
+var ObjectRemove = &Builtin{
+ Name: "object.remove",
+ Description: "Removes specified keys from an object.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("object", types.NewObject(
+ nil,
+ types.NewDynamicProperty(types.A, types.A),
+ )).Description("object to remove keys from"),
+ types.Named("keys", types.NewAny(
+ types.NewArray(nil, types.A),
+ types.SetOfAny,
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ )).Description("keys to remove from x"),
+ ),
+ types.Named("output", types.A).Description("result of removing the specified `keys` from `object`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var ObjectFilter = &Builtin{
+ Name: "object.filter",
+ Description: "Filters the object by keeping only specified keys. " +
+ "For example: `object.filter({\"a\": {\"b\": \"x\", \"c\": \"y\"}, \"d\": \"z\"}, [\"a\"])` will result in `{\"a\": {\"b\": \"x\", \"c\": \"y\"}}`).",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("object", types.NewObject(
+ nil,
+ types.NewDynamicProperty(types.A, types.A),
+ )).Description("object to filter keys"),
+ types.Named("keys", types.NewAny(
+ types.NewArray(nil, types.A),
+ types.SetOfAny,
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ )).Description("keys to keep in `object`"),
+ ),
+ types.Named("filtered", types.A).Description("remaining data from `object` with only keys specified in `keys`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var ObjectGet = &Builtin{
+ Name: "object.get",
+ Description: "Returns value of an object's key if present, otherwise a default. " +
+ "If the supplied `key` is an `array`, then `object.get` will search through a nested object or array using each key in turn. " +
+ "For example: `object.get({\"a\": [{ \"b\": true }]}, [\"a\", 0, \"b\"], false)` results in `true`.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("object", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("object to get `key` from"),
+ types.Named("key", types.A).Description("key to lookup in `object`"),
+ types.Named("default", types.A).Description("default to use when lookup fails"),
+ ),
+ types.Named("value", types.A).Description("`object[key]` if present, otherwise `default`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var ObjectKeys = &Builtin{
+ Name: "object.keys",
+ Description: "Returns a set of an object's keys. " +
+ "For example: `object.keys({\"a\": 1, \"b\": true, \"c\": \"d\")` results in `{\"a\", \"b\", \"c\"}`.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("object", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("object to get keys from"),
+ ),
+ types.Named("value", types.SetOfAny).Description("set of `object`'s keys"),
+ ),
+ CanSkipBctx: true,
+}
+
+/*
+ * Encoding
+ */
+var encoding = category("encoding")
+
+var JSONMarshal = &Builtin{
+ Name: "json.marshal",
+ Description: "Serializes the input term to JSON.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.A).Description("the term to serialize"),
+ ),
+ types.Named("y", types.S).Description("the JSON string representation of `x`"),
+ ),
+ Categories: encoding,
+ CanSkipBctx: true,
+}
+
+var JSONMarshalWithOptions = &Builtin{
+ Name: "json.marshal_with_options",
+ Description: "Serializes the input term JSON, with additional formatting options via the `opts` parameter. " +
+ "`opts` accepts keys `pretty` (enable multi-line/formatted JSON), `prefix` (string to prefix lines with, default empty string) and `indent` (string to indent with, default `\\t`).",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.A).Description("the term to serialize"),
+ types.Named("opts", types.NewObject(
+ []*types.StaticProperty{
+ types.NewStaticProperty("pretty", types.B),
+ types.NewStaticProperty("indent", types.S),
+ types.NewStaticProperty("prefix", types.S),
+ },
+ types.NewDynamicProperty(types.S, types.A),
+ )).Description("encoding options"),
+ ),
+ types.Named("y", types.S).Description("the JSON string representation of `x`, with configured prefix/indent string(s) as appropriate"),
+ ),
+ Categories: encoding,
+ CanSkipBctx: true,
+}
+
+var JSONUnmarshal = &Builtin{
+ Name: "json.unmarshal",
+ Description: "Deserializes the input string.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("a JSON string"),
+ ),
+ types.Named("y", types.A).Description("the term deserialized from `x`"),
+ ),
+ Categories: encoding,
+ CanSkipBctx: true,
+}
+
+var JSONIsValid = &Builtin{
+ Name: "json.is_valid",
+ Description: "Verifies the input string is a valid JSON document.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("a JSON string"),
+ ),
+ types.Named("result", types.B).Description("`true` if `x` is valid JSON, `false` otherwise"),
+ ),
+ Categories: encoding,
+ CanSkipBctx: true,
+}
+
+var Base64Encode = &Builtin{
+ Name: "base64.encode",
+ Description: "Serializes the input string into base64 encoding.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("string to encode"),
+ ),
+ types.Named("y", types.S).Description("base64 serialization of `x`"),
+ ),
+ Categories: encoding,
+ CanSkipBctx: true,
+}
+
+var Base64Decode = &Builtin{
+ Name: "base64.decode",
+ Description: "Deserializes the base64 encoded input string.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("string to decode"),
+ ),
+ types.Named("y", types.S).Description("base64 deserialization of `x`"),
+ ),
+ Categories: encoding,
+ CanSkipBctx: true,
+}
+
+var Base64IsValid = &Builtin{
+ Name: "base64.is_valid",
+ Description: "Verifies the input string is base64 encoded.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("string to check"),
+ ),
+ types.Named("result", types.B).Description("`true` if `x` is valid base64 encoded value, `false` otherwise"),
+ ),
+ Categories: encoding,
+ CanSkipBctx: true,
+}
+
+var Base64UrlEncode = &Builtin{
+ Name: "base64url.encode",
+ Description: "Serializes the input string into base64url encoding.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("string to encode"),
+ ),
+ types.Named("y", types.S).Description("base64url serialization of `x`"),
+ ),
+ Categories: encoding,
+ CanSkipBctx: true,
+}
+
+var Base64UrlEncodeNoPad = &Builtin{
+ Name: "base64url.encode_no_pad",
+ Description: "Serializes the input string into base64url encoding without padding.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("string to encode"),
+ ),
+ types.Named("y", types.S).Description("base64url serialization of `x`"),
+ ),
+ Categories: encoding,
+ CanSkipBctx: true,
+}
+
+var Base64UrlDecode = &Builtin{
+ Name: "base64url.decode",
+ Description: "Deserializes the base64url encoded input string.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("string to decode"),
+ ),
+ types.Named("y", types.S).Description("base64url deserialization of `x`"),
+ ),
+ Categories: encoding,
+ CanSkipBctx: true,
+}
+
+var URLQueryDecode = &Builtin{
+ Name: "urlquery.decode",
+ Description: "Decodes a URL-encoded input string.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("the URL-encoded string"),
+ ),
+ types.Named("y", types.S).Description("URL-encoding deserialization of `x`"),
+ ),
+ Categories: encoding,
+ CanSkipBctx: true,
+}
+
+var URLQueryEncode = &Builtin{
+ Name: "urlquery.encode",
+ Description: "Encodes the input string into a URL-encoded string.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("the string to encode"),
+ ),
+ types.Named("y", types.S).Description("URL-encoding serialization of `x`"),
+ ),
+ Categories: encoding,
+ CanSkipBctx: true,
+}
+
+var URLQueryEncodeObject = &Builtin{
+ Name: "urlquery.encode_object",
+ Description: "Encodes the given object into a URL encoded query string.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("object", types.NewObject(
+ nil,
+ types.NewDynamicProperty(
+ types.S,
+ types.NewAny(
+ types.S,
+ types.NewArray(nil, types.S),
+ types.SetOfStr,
+ ),
+ ),
+ ),
+ ).Description("the object to encode"),
+ ),
+ types.Named("y", types.S).Description("the URL-encoded serialization of `object`"),
+ ),
+ Categories: encoding,
+ CanSkipBctx: true,
+}
+
+var URLQueryDecodeObject = &Builtin{
+ Name: "urlquery.decode_object",
+ Description: "Decodes the given URL query string into an object.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("the query string"),
+ ),
+ types.Named("object", types.NewObject(nil, types.NewDynamicProperty(
+ types.S,
+ types.NewArray(nil, types.S)))).Description("the resulting object"),
+ ),
+ Categories: encoding,
+ CanSkipBctx: true,
+}
+
+var YAMLMarshal = &Builtin{
+ Name: "yaml.marshal",
+ Description: "Serializes the input term to YAML.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.A).Description("the term to serialize"),
+ ),
+ types.Named("y", types.S).Description("the YAML string representation of `x`"),
+ ),
+ Categories: encoding,
+ CanSkipBctx: true,
+}
+
+var YAMLUnmarshal = &Builtin{
+ Name: "yaml.unmarshal",
+ Description: "Deserializes the input string.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("a YAML string"),
+ ),
+ types.Named("y", types.A).Description("the term deserialized from `x`"),
+ ),
+ Categories: encoding,
+ CanSkipBctx: true,
+}
+
+// YAMLIsValid verifies the input string is a valid YAML document.
+var YAMLIsValid = &Builtin{
+ Name: "yaml.is_valid",
+ Description: "Verifies the input string is a valid YAML document.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("a YAML string"),
+ ),
+ types.Named("result", types.B).Description("`true` if `x` is valid YAML, `false` otherwise"),
+ ),
+ Categories: encoding,
+ CanSkipBctx: true,
+}
+
+var HexEncode = &Builtin{
+ Name: "hex.encode",
+ Description: "Serializes the input string using hex-encoding.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("string to encode"),
+ ),
+ types.Named("y", types.S).Description("serialization of `x` using hex-encoding"),
+ ),
+ Categories: encoding,
+ CanSkipBctx: true,
+}
+
+var HexDecode = &Builtin{
+ Name: "hex.decode",
+ Description: "Deserializes the hex-encoded input string.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("a hex-encoded string"),
+ ),
+ types.Named("y", types.S).Description("deserialized from `x`"),
+ ),
+ Categories: encoding,
+ CanSkipBctx: true,
+}
+
+/**
+ * Tokens
+ */
+var tokensCat = category("tokens")
+
+var JWTDecode = &Builtin{
+ Name: "io.jwt.decode",
+ Description: "Decodes a JSON Web Token and outputs it as an object.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("jwt", types.S).Description("JWT token to decode"),
+ ),
+ types.Named("output", types.NewArray([]types.Type{
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ types.S,
+ }, nil)).Description("`[header, payload, sig]`, where `header` and `payload` are objects; `sig` is the hexadecimal representation of the signature on the token."),
+ ),
+ Categories: tokensCat,
+ CanSkipBctx: true,
+}
+
+var JWTVerifyRS256 = &Builtin{
+ Name: "io.jwt.verify_rs256",
+ Description: "Verifies if a RS256 JWT signature is valid.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
+ types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"),
+ ),
+ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
+ ),
+ Categories: tokensCat,
+ CanSkipBctx: false,
+}
+
+var JWTVerifyRS384 = &Builtin{
+ Name: "io.jwt.verify_rs384",
+ Description: "Verifies if a RS384 JWT signature is valid.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
+ types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"),
+ ),
+ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
+ ),
+ Categories: tokensCat,
+ CanSkipBctx: false,
+}
+
+var JWTVerifyRS512 = &Builtin{
+ Name: "io.jwt.verify_rs512",
+ Description: "Verifies if a RS512 JWT signature is valid.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
+ types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"),
+ ),
+ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
+ ),
+ Categories: tokensCat,
+ CanSkipBctx: false,
+}
+
+var JWTVerifyPS256 = &Builtin{
+ Name: "io.jwt.verify_ps256",
+ Description: "Verifies if a PS256 JWT signature is valid.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
+ types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"),
+ ),
+ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
+ ),
+ Categories: tokensCat,
+ CanSkipBctx: false,
+}
+
+var JWTVerifyPS384 = &Builtin{
+ Name: "io.jwt.verify_ps384",
+ Description: "Verifies if a PS384 JWT signature is valid.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
+ types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"),
+ ),
+ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
+ ),
+ Categories: tokensCat,
+ CanSkipBctx: false,
+}
+
+var JWTVerifyPS512 = &Builtin{
+ Name: "io.jwt.verify_ps512",
+ Description: "Verifies if a PS512 JWT signature is valid.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
+ types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"),
+ ),
+ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
+ ),
+ Categories: tokensCat,
+ CanSkipBctx: false,
+}
+
+var JWTVerifyES256 = &Builtin{
+ Name: "io.jwt.verify_es256",
+ Description: "Verifies if a ES256 JWT signature is valid.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
+ types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"),
+ ),
+ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
+ ),
+ Categories: tokensCat,
+ CanSkipBctx: false,
+}
+
+var JWTVerifyES384 = &Builtin{
+ Name: "io.jwt.verify_es384",
+ Description: "Verifies if a ES384 JWT signature is valid.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
+ types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"),
+ ),
+ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
+ ),
+ Categories: tokensCat,
+ CanSkipBctx: false,
+}
+
+var JWTVerifyES512 = &Builtin{
+ Name: "io.jwt.verify_es512",
+ Description: "Verifies if a ES512 JWT signature is valid.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
+ types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"),
+ ),
+ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
+ ),
+ Categories: tokensCat,
+ CanSkipBctx: false,
+}
+
+var JWTVerifyEdDSA = &Builtin{
+ Name: "io.jwt.verify_eddsa",
+ Description: "Verifies if an EdDSA JWT signature is valid.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
+ types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"),
+ ),
+ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
+ ),
+ Categories: tokensCat,
+ CanSkipBctx: false,
+}
+
+var JWTVerifyHS256 = &Builtin{
+ Name: "io.jwt.verify_hs256",
+ Description: "Verifies if a HS256 (secret) JWT signature is valid.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
+ types.Named("secret", types.S).Description("plain text secret used to verify the signature"),
+ ),
+ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
+ ),
+ Categories: tokensCat,
+ CanSkipBctx: false,
+}
+
+var JWTVerifyHS384 = &Builtin{
+ Name: "io.jwt.verify_hs384",
+ Description: "Verifies if a HS384 (secret) JWT signature is valid.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
+ types.Named("secret", types.S).Description("plain text secret used to verify the signature"),
+ ),
+ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
+ ),
+ Categories: tokensCat,
+ CanSkipBctx: false,
+}
+
+var JWTVerifyHS512 = &Builtin{
+ Name: "io.jwt.verify_hs512",
+ Description: "Verifies if a HS512 (secret) JWT signature is valid.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"),
+ types.Named("secret", types.S).Description("plain text secret used to verify the signature"),
+ ),
+ types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"),
+ ),
+ Categories: tokensCat,
+ CanSkipBctx: false,
+}
+
+// Marked non-deterministic because it relies on time internally.
+var JWTDecodeVerify = &Builtin{
+ Name: "io.jwt.decode_verify",
+ Description: `Verifies a JWT signature under parameterized constraints and decodes the claims if it is valid.
+Supports the following algorithms: HS256, HS384, HS512, RS256, RS384, RS512, ES256, ES384, ES512, PS256, PS384, PS512, and EdDSA.`,
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("jwt", types.S).Description("JWT token whose signature is to be verified and whose claims are to be checked"),
+ types.Named("constraints", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("claim verification constraints"),
+ ),
+ types.Named("output", types.NewArray([]types.Type{
+ types.B,
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ }, nil)).Description("`[valid, header, payload]`: if the input token is verified and meets the requirements of `constraints` then `valid` is `true`; `header` and `payload` are objects containing the JOSE header and the JWT claim set; otherwise, `valid` is `false`, `header` and `payload` are `{}`"),
+ ),
+ Categories: tokensCat,
+ Nondeterministic: true,
+ CanSkipBctx: false,
+}
+
+var tokenSign = category("tokensign")
+
+// Marked non-deterministic because it relies on RNG internally.
+var JWTEncodeSignRaw = &Builtin{
+ Name: "io.jwt.encode_sign_raw",
+ Description: "Encodes and optionally signs a JSON Web Token.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("headers", types.S).Description("JWS Protected Header"),
+ types.Named("payload", types.S).Description("JWS Payload"),
+ types.Named("key", types.S).Description("JSON Web Key (RFC7517)"),
+ ),
+ types.Named("output", types.S).Description("signed JWT"),
+ ),
+ Categories: tokenSign,
+ Nondeterministic: true,
+ CanSkipBctx: false,
+}
+
+// Marked non-deterministic because it relies on RNG internally.
+var JWTEncodeSign = &Builtin{
+ Name: "io.jwt.encode_sign",
+ Description: "Encodes and optionally signs a JSON Web Token. Inputs are taken as objects, not encoded strings (see `io.jwt.encode_sign_raw`).",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("headers", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWS Protected Header"),
+ types.Named("payload", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWS Payload"),
+ types.Named("key", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JSON Web Key (RFC7517)"),
+ ),
+ types.Named("output", types.S).Description("signed JWT"),
+ ),
+ Categories: tokenSign,
+ Nondeterministic: true,
+ CanSkipBctx: false,
+}
+
+/**
+ * Time
+ */
+
+// Marked non-deterministic because it relies on time directly.
+var NowNanos = &Builtin{
+ Name: "time.now_ns",
+ Description: "Returns the current time since epoch in nanoseconds.",
+ Decl: types.NewFunction(
+ nil,
+ types.Named("now", types.N).Description("nanoseconds since epoch"),
+ ),
+ Nondeterministic: true,
+ CanSkipBctx: false,
+}
+
+var ParseNanos = &Builtin{
+ Name: "time.parse_ns",
+ Description: "Returns the time in nanoseconds parsed from the string in the given format. `undefined` if the result would be outside the valid time range that can fit within an `int64`.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("layout", types.S).Description("format used for parsing, see the [Go `time` package documentation](https://golang.org/pkg/time/#Parse) for more details"),
+ types.Named("value", types.S).Description("input to parse according to `layout`"),
+ ),
+ types.Named("ns", types.N).Description("`value` in nanoseconds since epoch"),
+ ),
+ CanSkipBctx: true,
+}
+
+var ParseRFC3339Nanos = &Builtin{
+ Name: "time.parse_rfc3339_ns",
+ Description: "Returns the time in nanoseconds parsed from the string in RFC3339 format. `undefined` if the result would be outside the valid time range that can fit within an `int64`.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("value", types.S).Description("input string to parse in RFC3339 format"),
+ ),
+ types.Named("ns", types.N).Description("`value` in nanoseconds since epoch"),
+ ),
+ CanSkipBctx: true,
+}
+
+var ParseDurationNanos = &Builtin{
+ Name: "time.parse_duration_ns",
+ Description: "Returns the duration in nanoseconds represented by a string.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("duration", types.S).Description("a duration like \"3m\"; see the [Go `time` package documentation](https://golang.org/pkg/time/#ParseDuration) for more details"),
+ ),
+ types.Named("ns", types.N).Description("the `duration` in nanoseconds"),
+ ),
+ CanSkipBctx: true,
+}
+
+var Format = &Builtin{
+ Name: "time.format",
+ Description: "Returns the formatted timestamp for the nanoseconds since epoch.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.NewAny(
+ types.N,
+ types.NewArray([]types.Type{types.N, types.S}, nil),
+ types.NewArray([]types.Type{types.N, types.S, types.S}, nil),
+ )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string; or a three-element array of ns, timezone string and a layout string or golang defined formatting constant (see golang supported time formats)"),
+ ),
+ types.Named("formatted timestamp", types.S).Description("the formatted timestamp represented for the nanoseconds since the epoch in the supplied timezone (or UTC)"),
+ ),
+ CanSkipBctx: true,
+}
+
+var Date = &Builtin{
+ Name: "time.date",
+ Description: "Returns the `[year, month, day]` for the nanoseconds since epoch.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.NewAny(
+ types.N,
+ types.NewArray([]types.Type{types.N, types.S}, nil),
+ )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"),
+ ),
+ types.Named("date", types.NewArray([]types.Type{types.N, types.N, types.N}, nil)).Description("an array of `year`, `month` (1-12), and `day` (1-31)"),
+ ),
+ CanSkipBctx: true,
+}
+
+var Clock = &Builtin{
+ Name: "time.clock",
+ Description: "Returns the `[hour, minute, second]` of the day for the nanoseconds since epoch.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.NewAny(
+ types.N,
+ types.NewArray([]types.Type{types.N, types.S}, nil),
+ )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"),
+ ),
+ types.Named("output", types.NewArray([]types.Type{types.N, types.N, types.N}, nil)).
+ Description("the `hour`, `minute` (0-59), and `second` (0-59) representing the time of day for the nanoseconds since epoch in the supplied timezone (or UTC)"),
+ ),
+ CanSkipBctx: true,
+}
+
+var Weekday = &Builtin{
+ Name: "time.weekday",
+ Description: "Returns the day of the week (Monday, Tuesday, ...) for the nanoseconds since epoch.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.NewAny(
+ types.N,
+ types.NewArray([]types.Type{types.N, types.S}, nil),
+ )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"),
+ ),
+ types.Named("day", types.S).Description("the weekday represented by `ns` nanoseconds since the epoch in the supplied timezone (or UTC)"),
+ ),
+ CanSkipBctx: true,
+}
+
+var AddDate = &Builtin{
+ Name: "time.add_date",
+ Description: "Returns the nanoseconds since epoch after adding years, months and days to nanoseconds. Month & day values outside their usual ranges after the operation and will be normalized - for example, October 32 would become November 1. `undefined` if the result would be outside the valid time range that can fit within an `int64`.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("ns", types.N).Description("nanoseconds since the epoch"),
+ types.Named("years", types.N).Description("number of years to add"),
+ types.Named("months", types.N).Description("number of months to add"),
+ types.Named("days", types.N).Description("number of days to add"),
+ ),
+ types.Named("output", types.N).Description("nanoseconds since the epoch representing the input time, with years, months and days added"),
+ ),
+ CanSkipBctx: true,
+}
+
+var Diff = &Builtin{
+ Name: "time.diff",
+ Description: "Returns the difference between two unix timestamps in nanoseconds (with optional timezone strings).",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("ns1", types.NewAny(
+ types.N,
+ types.NewArray([]types.Type{types.N, types.S}, nil),
+ )).Description("nanoseconds since the epoch; or a two-element array of the nanoseconds, and a timezone string"),
+ types.Named("ns2", types.NewAny(
+ types.N,
+ types.NewArray([]types.Type{types.N, types.S}, nil),
+ )).Description("nanoseconds since the epoch; or a two-element array of the nanoseconds, and a timezone string"),
+ ),
+ types.Named("output", types.NewArray([]types.Type{types.N, types.N, types.N, types.N, types.N, types.N}, nil)).Description("difference between `ns1` and `ns2` (in their supplied timezones, if supplied, or UTC) as array of numbers: `[years, months, days, hours, minutes, seconds]`"),
+ ),
+ CanSkipBctx: true,
+}
+
+/**
+ * Crypto.
+ */
+
+var CryptoX509ParseCertificates = &Builtin{
+ Name: "crypto.x509.parse_certificates",
+ Description: `Returns zero or more certificates from the given encoded string containing
+DER certificate data.
+
+If the input is empty, the function will return null. The input string should be a list of one or more
+concatenated PEM blocks. The whole input of concatenated PEM blocks can optionally be Base64 encoded.`,
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing one or more certificates or a PEM string of one or more certificates"),
+ ),
+ types.Named("output", types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)))).Description("parsed X.509 certificates represented as objects"),
+ ),
+ CanSkipBctx: true,
+}
+
+var CryptoX509ParseAndVerifyCertificates = &Builtin{
+ Name: "crypto.x509.parse_and_verify_certificates",
+ Description: `Returns one or more certificates from the given string containing PEM
+or base64 encoded DER certificates after verifying the supplied certificates form a complete
+certificate chain back to a trusted root.
+
+The first certificate is treated as the root and the last is treated as the leaf,
+with all others being treated as intermediates.`,
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing two or more certificates where the first is a root CA, the last is a leaf certificate, and all others are intermediate CAs"),
+ ),
+ types.Named("output", types.NewArray([]types.Type{
+ types.B,
+ types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))),
+ }, nil)).Description("array of `[valid, certs]`: if the input certificate chain could be verified then `valid` is `true` and `certs` is an array of X.509 certificates represented as objects; if the input certificate chain could not be verified then `valid` is `false` and `certs` is `[]`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var CryptoX509ParseAndVerifyCertificatesWithOptions = &Builtin{
+ Name: "crypto.x509.parse_and_verify_certificates_with_options",
+ Description: `Returns one or more certificates from the given string containing PEM
+or base64 encoded DER certificates after verifying the supplied certificates form a complete
+certificate chain back to a trusted root. A config option passed as the second argument can
+be used to configure the validation options used.
+
+The first certificate is treated as the root and the last is treated as the leaf,
+with all others being treated as intermediates.`,
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing two or more certificates where the first is a root CA, the last is a leaf certificate, and all others are intermediate CAs"),
+ types.Named("options", types.NewObject(
+ nil,
+ types.NewDynamicProperty(types.S, types.A),
+ )).Description("object containing extra configs to verify the validity of certificates. `options` object supports four fields which maps to same fields in [x509.VerifyOptions struct](https://pkg.go.dev/crypto/x509#VerifyOptions). `DNSName`, `CurrentTime`: Nanoseconds since the Unix Epoch as a number, `MaxConstraintComparisons` and `KeyUsages`. `KeyUsages` is list and can have possible values as in: `\"KeyUsageAny\"`, `\"KeyUsageServerAuth\"`, `\"KeyUsageClientAuth\"`, `\"KeyUsageCodeSigning\"`, `\"KeyUsageEmailProtection\"`, `\"KeyUsageIPSECEndSystem\"`, `\"KeyUsageIPSECTunnel\"`, `\"KeyUsageIPSECUser\"`, `\"KeyUsageTimeStamping\"`, `\"KeyUsageOCSPSigning\"`, `\"KeyUsageMicrosoftServerGatedCrypto\"`, `\"KeyUsageNetscapeServerGatedCrypto\"`, `\"KeyUsageMicrosoftCommercialCodeSigning\"`, `\"KeyUsageMicrosoftKernelCodeSigning\"` "),
+ ),
+ types.Named("output", types.NewArray([]types.Type{
+ types.B,
+ types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))),
+ }, nil)).Description("array of `[valid, certs]`: if the input certificate chain could be verified then `valid` is `true` and `certs` is an array of X.509 certificates represented as objects; if the input certificate chain could not be verified then `valid` is `false` and `certs` is `[]`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var CryptoX509ParseCertificateRequest = &Builtin{
+ Name: "crypto.x509.parse_certificate_request",
+ Description: "Returns a PKCS #10 certificate signing request from the given PEM-encoded PKCS#10 certificate signing request.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("csr", types.S).Description("base64 string containing either a PEM encoded or DER CSR or a string containing a PEM CSR"),
+ ),
+ types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("X.509 CSR represented as an object"),
+ ),
+ CanSkipBctx: true,
+}
+
+var CryptoX509ParseKeyPair = &Builtin{
+ Name: "crypto.x509.parse_keypair",
+ Description: "Returns a valid key pair",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("cert", types.S).Description("string containing PEM or base64 encoded DER certificates"),
+ types.Named("pem", types.S).Description("string containing PEM or base64 encoded DER keys"),
+ ),
+ types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("if key pair is valid, returns the tls.certificate(https://pkg.go.dev/crypto/tls#Certificate) as an object. If the key pair is invalid, nil and an error are returned."),
+ ),
+ CanSkipBctx: true,
+}
+
+var CryptoX509ParseRSAPrivateKey = &Builtin{
+ Name: "crypto.x509.parse_rsa_private_key",
+ Description: "Returns a JWK for signing a JWT from the given PEM-encoded RSA private key.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("pem", types.S).Description("base64 string containing a PEM encoded RSA private key"),
+ ),
+ types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWK as an object"),
+ ),
+ CanSkipBctx: true,
+}
+
+var CryptoParsePrivateKeys = &Builtin{
+ Name: "crypto.parse_private_keys",
+ Description: `Returns zero or more private keys from the given encoded string containing DER certificate data.
+
+If the input is empty, the function will return null. The input string should be a list of one or more concatenated PEM blocks. The whole input of concatenated PEM blocks can optionally be Base64 encoded.`,
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("keys", types.S).Description("PEM encoded data containing one or more private keys as concatenated blocks. Optionally Base64 encoded."),
+ ),
+ types.Named("output", types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)))).Description("parsed private keys represented as objects"),
+ ),
+ CanSkipBctx: true,
+}
+
+var CryptoMd5 = &Builtin{
+ Name: "crypto.md5",
+ Description: "Returns a string representing the input string hashed with the MD5 function",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("input string"),
+ ),
+ types.Named("y", types.S).Description("MD5-hash of `x`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var CryptoSha1 = &Builtin{
+ Name: "crypto.sha1",
+ Description: "Returns a string representing the input string hashed with the SHA1 function",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("input string"),
+ ),
+ types.Named("y", types.S).Description("SHA1-hash of `x`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var CryptoSha256 = &Builtin{
+ Name: "crypto.sha256",
+ Description: "Returns a string representing the input string hashed with the SHA256 function",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("input string"),
+ ),
+ types.Named("y", types.S).Description("SHA256-hash of `x`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var CryptoHmacMd5 = &Builtin{
+ Name: "crypto.hmac.md5",
+ Description: "Returns a string representing the MD5 HMAC of the input message using the input key.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("input string"),
+ types.Named("key", types.S).Description("key to use"),
+ ),
+ types.Named("y", types.S).Description("MD5-HMAC of `x`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var CryptoHmacSha1 = &Builtin{
+ Name: "crypto.hmac.sha1",
+ Description: "Returns a string representing the SHA1 HMAC of the input message using the input key.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("input string"),
+ types.Named("key", types.S).Description("key to use"),
+ ),
+ types.Named("y", types.S).Description("SHA1-HMAC of `x`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var CryptoHmacSha256 = &Builtin{
+ Name: "crypto.hmac.sha256",
+ Description: "Returns a string representing the SHA256 HMAC of the input message using the input key.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("input string"),
+ types.Named("key", types.S).Description("key to use"),
+ ),
+ types.Named("y", types.S).Description("SHA256-HMAC of `x`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var CryptoHmacSha512 = &Builtin{
+ Name: "crypto.hmac.sha512",
+ Description: "Returns a string representing the SHA512 HMAC of the input message using the input key.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.S).Description("input string"),
+ types.Named("key", types.S).Description("key to use"),
+ ),
+ types.Named("y", types.S).Description("SHA512-HMAC of `x`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var CryptoHmacEqual = &Builtin{
+ Name: "crypto.hmac.equal",
+ Description: "Returns a boolean representing the result of comparing two MACs for equality without leaking timing information.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("mac1", types.S).Description("mac1 to compare"),
+ types.Named("mac2", types.S).Description("mac2 to compare"),
+ ),
+ types.Named("result", types.B).Description("`true` if the MACs are equals, `false` otherwise"),
+ ),
+ CanSkipBctx: true,
+}
+
+/**
+ * Graphs.
+ */
+var graphs = category("graph")
+
+var WalkBuiltin = &Builtin{
+ Name: "walk",
+ Relation: true,
+ Description: "Generates `[path, value]` tuples for all nested documents of `x` (recursively). Queries can use `walk` to traverse documents nested under `x`.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.A).Description("value to walk"),
+ ),
+ types.Named("output", types.NewArray(
+ []types.Type{
+ types.NewArray(nil, types.A),
+ types.A,
+ },
+ nil,
+ )).Description("pairs of `path` and `value`: `path` is an array representing the pointer to `value` in `x`. If `path` is assigned a wildcard (`_`), the `walk` function will skip path creation entirely for faster evaluation."),
+ ),
+ Categories: graphs,
+ CanSkipBctx: true,
+}
+
+var ReachableBuiltin = &Builtin{
+ Name: "graph.reachable",
+ Description: "Computes the set of reachable nodes in the graph from a set of starting nodes.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("graph", types.NewObject(
+ nil,
+ types.NewDynamicProperty(
+ types.A,
+ types.NewAny(
+ types.SetOfAny,
+ types.NewArray(nil, types.A)),
+ )),
+ ).Description("object containing a set or array of neighboring vertices"),
+ types.Named("initial", types.NewAny(types.SetOfAny, types.NewArray(nil, types.A))).Description("set or array of root vertices"),
+ ),
+ types.Named("output", types.SetOfAny).Description("set of vertices reachable from the `initial` vertices in the directed `graph`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var ReachablePathsBuiltin = &Builtin{
+ Name: "graph.reachable_paths",
+ Description: "Computes the set of reachable paths in the graph from a set of starting nodes.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("graph", types.NewObject(
+ nil,
+ types.NewDynamicProperty(
+ types.A,
+ types.NewAny(
+ types.SetOfAny,
+ types.NewArray(nil, types.A)),
+ )),
+ ).Description("object containing a set or array of root vertices"),
+ types.Named("initial", types.NewAny(types.SetOfAny, types.NewArray(nil, types.A))).Description("initial paths"), // TODO(sr): copied. is that correct?
+ ),
+ types.Named("output", types.NewSet(types.NewArray(nil, types.A))).Description("paths reachable from the `initial` vertices in the directed `graph`"),
+ ),
+ CanSkipBctx: true,
+}
+
+/**
+ * Type
+ */
+var typesCat = category("types")
+
+var IsNumber = &Builtin{
+ Name: "is_number",
+ Description: "Returns `true` if the input value is a number.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.A).Description("input value"),
+ ),
+ types.Named("result", types.B).Description("`true` if `x` is a number, `false` otherwise."),
+ ),
+ Categories: typesCat,
+ CanSkipBctx: true,
+}
+
+var IsString = &Builtin{
+ Name: "is_string",
+ Description: "Returns `true` if the input value is a string.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.A).Description("input value"),
+ ),
+ types.Named("result", types.B).Description("`true` if `x` is a string, `false` otherwise."),
+ ),
+ Categories: typesCat,
+ CanSkipBctx: true,
+}
+
+var IsBoolean = &Builtin{
+ Name: "is_boolean",
+ Description: "Returns `true` if the input value is a boolean.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.A).Description("input value"),
+ ),
+ types.Named("result", types.B).Description("`true` if `x` is an boolean, `false` otherwise."),
+ ),
+ Categories: typesCat,
+ CanSkipBctx: true,
+}
+
+var IsArray = &Builtin{
+ Name: "is_array",
+ Description: "Returns `true` if the input value is an array.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.A).Description("input value"),
+ ),
+ types.Named("result", types.B).Description("`true` if `x` is an array, `false` otherwise."),
+ ),
+ Categories: typesCat,
+ CanSkipBctx: true,
+}
+
+var IsSet = &Builtin{
+ Name: "is_set",
+ Description: "Returns `true` if the input value is a set.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.A).Description("input value"),
+ ),
+ types.Named("result", types.B).Description("`true` if `x` is a set, `false` otherwise."),
+ ),
+ Categories: typesCat,
+ CanSkipBctx: true,
+}
+
+var IsObject = &Builtin{
+ Name: "is_object",
+ Description: "Returns true if the input value is an object",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.A).Description("input value"),
+ ),
+ types.Named("result", types.B).Description("`true` if `x` is an object, `false` otherwise."),
+ ),
+ Categories: typesCat,
+ CanSkipBctx: true,
+}
+
+var IsNull = &Builtin{
+ Name: "is_null",
+ Description: "Returns `true` if the input value is null.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.A).Description("input value"),
+ ),
+ types.Named("result", types.B).Description("`true` if `x` is null, `false` otherwise."),
+ ),
+ Categories: typesCat,
+ CanSkipBctx: true,
+}
+
+/**
+ * Type Name
+ */
+
+// TypeNameBuiltin returns the type of the input.
+var TypeNameBuiltin = &Builtin{
+ Name: "type_name",
+ Description: "Returns the type of its input value.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("x", types.A).Description("input value"),
+ ),
+ types.Named("type", types.S).Description(`one of "null", "boolean", "number", "string", "array", "object", "set"`),
+ ),
+ Categories: typesCat,
+ CanSkipBctx: true,
+}
+
+/**
+ * HTTP Request
+ */
+
+// Marked non-deterministic because HTTP request results can be non-deterministic.
+var HTTPSend = &Builtin{
+ Name: "http.send",
+ Description: "Returns a HTTP response to the given HTTP request.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("request", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).
+ Description("the HTTP request object"),
+ ),
+ types.Named("response", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).
+ Description("the HTTP response object"),
+ ),
+ Nondeterministic: true,
+ CanSkipBctx: false,
+}
+
+/**
+ * GraphQL
+ */
+
+// GraphQLParse returns a pair of AST objects from parsing/validation.
+var GraphQLParse = &Builtin{
+ Name: "graphql.parse",
+ Description: "Returns AST objects for a given GraphQL query and schema after validating the query against the schema. Returns undefined if errors were encountered during parsing or validation. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))).
+ Description("the GraphQL query"),
+ types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))).
+ Description("the GraphQL schema"),
+ ),
+ types.Named("output", types.NewArray([]types.Type{
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ }, nil)).Description("`output` is of the form `[query_ast, schema_ast]`. If the GraphQL query is valid given the provided schema, then `query_ast` and `schema_ast` are objects describing the ASTs for the query and schema."),
+ ),
+ CanSkipBctx: false,
+}
+
+// GraphQLParseAndVerify returns a boolean and a pair of AST object from parsing/validation.
+var GraphQLParseAndVerify = &Builtin{
+ Name: "graphql.parse_and_verify",
+ Description: "Returns a boolean indicating success or failure alongside the parsed ASTs for a given GraphQL query and schema after validating the query against the schema. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))).
+ Description("the GraphQL query"),
+ types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))).
+ Description("the GraphQL schema"),
+ ),
+ types.Named("output", types.NewArray([]types.Type{
+ types.B,
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ }, nil)).Description(" `output` is of the form `[valid, query_ast, schema_ast]`. If the query is valid given the provided schema, then `valid` is `true`, and `query_ast` and `schema_ast` are objects describing the ASTs for the GraphQL query and schema. Otherwise, `valid` is `false` and `query_ast` and `schema_ast` are `{}`."),
+ ),
+ CanSkipBctx: false,
+}
+
+// GraphQLParseQuery parses the input GraphQL query and returns a JSON
+// representation of its AST.
+var GraphQLParseQuery = &Builtin{
+ Name: "graphql.parse_query",
+ Description: "Returns an AST object for a GraphQL query.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("query", types.S).Description("GraphQL query string"),
+ ),
+ types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("AST object for the GraphQL query."),
+ ),
+ CanSkipBctx: true,
+}
+
+// GraphQLParseSchema parses the input GraphQL schema and returns a JSON
+// representation of its AST.
+var GraphQLParseSchema = &Builtin{
+ Name: "graphql.parse_schema",
+ Description: "Returns an AST object for a GraphQL schema.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("schema", types.S).Description("GraphQL schema string"),
+ ),
+ types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("AST object for the GraphQL schema."),
+ ),
+ CanSkipBctx: false,
+}
+
+// GraphQLIsValid returns true if a GraphQL query is valid with a given
+// schema, and returns false for all other inputs.
+var GraphQLIsValid = &Builtin{
+ Name: "graphql.is_valid",
+ Description: "Checks that a GraphQL query is valid against a given schema. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))).
+ Description("the GraphQL query"),
+ types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))).
+ Description("the GraphQL schema"),
+ ),
+ types.Named("output", types.B).Description("`true` if the query is valid under the given schema. `false` otherwise."),
+ ),
+ CanSkipBctx: false,
+}
+
+// GraphQLSchemaIsValid returns true if the input is valid GraphQL schema,
+// and returns false for all other inputs.
+var GraphQLSchemaIsValid = &Builtin{
+ Name: "graphql.schema_is_valid",
+ Description: "Checks that the input is a valid GraphQL schema. The schema can be either a GraphQL string or an AST object from the other GraphQL builtin functions.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))).
+ Description("the schema to verify"),
+ ),
+ types.Named("output", types.B).Description("`true` if the schema is a valid GraphQL schema. `false` otherwise."),
+ ),
+ CanSkipBctx: false,
+}
+
+/**
+ * JSON Schema
+ */
+
+// JSONSchemaVerify returns empty string if the input is valid JSON schema
+// and returns error string for all other inputs.
+var JSONSchemaVerify = &Builtin{
+ Name: "json.verify_schema",
+ Description: "Checks that the input is a valid JSON schema object. The schema can be either a JSON string or an JSON object.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))).
+ Description("the schema to verify"),
+ ),
+ types.Named("output", types.NewArray([]types.Type{
+ types.B,
+ types.NewAny(types.S, types.Null{}),
+ }, nil)).
+ Description("`output` is of the form `[valid, error]`. If the schema is valid, then `valid` is `true`, and `error` is `null`. Otherwise, `valid` is `false` and `error` is a string describing the error."),
+ ),
+ Categories: objectCat,
+ CanSkipBctx: true,
+}
+
+// JSONMatchSchema returns empty array if the document matches the JSON schema,
+// and returns non-empty array with error objects otherwise.
+var JSONMatchSchema = &Builtin{
+ Name: "json.match_schema",
+ Description: "Checks that the document matches the JSON schema.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("document", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))).
+ Description("document to verify by schema"),
+ types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))).
+ Description("schema to verify document by"),
+ ),
+ types.Named("output", types.NewArray([]types.Type{
+ types.B,
+ types.NewArray(
+ nil, types.NewObject(
+ []*types.StaticProperty{
+ {Key: "error", Value: types.S},
+ {Key: "type", Value: types.S},
+ {Key: "field", Value: types.S},
+ {Key: "desc", Value: types.S},
+ },
+ nil,
+ ),
+ ),
+ }, nil)).
+ Description("`output` is of the form `[match, errors]`. If the document is valid given the schema, then `match` is `true`, and `errors` is an empty array. Otherwise, `match` is `false` and `errors` is an array of objects describing the error(s)."),
+ ),
+ Categories: objectCat,
+ CanSkipBctx: false,
+}
+
+/**
+ * Cloud Provider Helper Functions
+ */
+var providersAWSCat = category("providers.aws")
+
+var ProvidersAWSSignReqObj = &Builtin{
+ Name: "providers.aws.sign_req",
+ Description: "Signs an HTTP request object for Amazon Web Services. Currently implements [AWS Signature Version 4 request signing](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) by the `Authorization` header method.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("request", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).
+ Description("HTTP request object"),
+ types.Named("aws_config", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).
+ Description("AWS configuration object"),
+ types.Named("time_ns", types.N).Description("nanoseconds since the epoch"),
+ ),
+ types.Named("signed_request", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).
+ Description("HTTP request object with `Authorization` header"),
+ ),
+ Categories: providersAWSCat,
+ CanSkipBctx: true,
+}
+
+/**
+ * Rego
+ */
+
+var RegoParseModule = &Builtin{
+ Name: "rego.parse_module",
+ Description: "Parses the input Rego string and returns an object representation of the AST.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("filename", types.S).Description("file name to attach to AST nodes' locations"),
+ types.Named("rego", types.S).Description("Rego module"),
+ ),
+ types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).
+ Description("AST object for the Rego module"),
+ ),
+ CanSkipBctx: true,
+}
+
+var RegoMetadataChain = &Builtin{
+ Name: "rego.metadata.chain",
+ Description: `Returns the chain of metadata for the active rule.
+Ordered starting at the active rule, going outward to the most distant node in its package ancestry.
+A chain entry is a JSON document with two members: "path", an array representing the path of the node; and "annotations", a JSON document containing the annotations declared for the node.
+The first entry in the chain always points to the active rule, even if it has no declared annotations (in which case the "annotations" member is not present).`,
+ Decl: types.NewFunction(
+ types.Args(),
+ types.Named("chain", types.NewArray(nil, types.A)).Description("each array entry represents a node in the path ancestry (chain) of the active rule that also has declared annotations"),
+ ),
+ CanSkipBctx: true,
+}
+
+// RegoMetadataRule returns the metadata for the active rule
+var RegoMetadataRule = &Builtin{
+ Name: "rego.metadata.rule",
+ Description: "Returns annotations declared for the active rule and using the _rule_ scope.",
+ Decl: types.NewFunction(
+ types.Args(),
+ types.Named("output", types.A).Description("\"rule\" scope annotations for this rule; empty object if no annotations exist"),
+ ),
+ CanSkipBctx: true,
+}
+
+/**
+ * OPA
+ */
+
+// Marked non-deterministic because of unpredictable config/environment-dependent results.
+var OPARuntime = &Builtin{
+ Name: "opa.runtime",
+ Description: "Returns an object that describes the runtime environment where OPA is deployed.",
+ Decl: types.NewFunction(
+ nil,
+ types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).
+ Description("includes a `config` key if OPA was started with a configuration file; an `env` key containing the environment variables that the OPA process was started with; includes `version` and `commit` keys containing the version and build commit of OPA."),
+ ),
+ Nondeterministic: true,
+ CanSkipBctx: false,
+}
+
+/**
+ * Trace
+ */
+var tracing = category("tracing")
+
+var Trace = &Builtin{
+ Name: "trace",
+ Description: "Emits `note` as a `Note` event in the query explanation. Query explanations show the exact expressions evaluated by OPA during policy execution. For example, `trace(\"Hello There!\")` includes `Note \"Hello There!\"` in the query explanation. To include variables in the message, use `sprintf`. For example, `person := \"Bob\"; trace(sprintf(\"Hello There! %v\", [person]))` will emit `Note \"Hello There! Bob\"` inside of the explanation.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("note", types.S).Description("the note to include"),
+ ),
+ types.Named("result", types.B).Description("always `true`"),
+ ),
+ Categories: tracing,
+ CanSkipBctx: false,
+}
+
+/**
+ * Glob
+ */
+
+var GlobMatch = &Builtin{
+ Name: "glob.match",
+ Description: "Parses and matches strings against the glob notation. Not to be confused with `regex.globs_match`.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("pattern", types.S).Description("glob pattern"),
+ types.Named("delimiters", types.NewAny(
+ types.NewArray(nil, types.S),
+ types.Nl,
+ )).Description("glob pattern delimiters, e.g. `[\".\", \":\"]`, defaults to `[\".\"]` if unset. If `delimiters` is `null`, glob match without delimiter."),
+ types.Named("match", types.S).Description("string to match against `pattern`"),
+ ),
+ types.Named("result", types.B).Description("true if `match` can be found in `pattern` which is separated by `delimiters`"),
+ ),
+ CanSkipBctx: false,
+}
+
+var GlobQuoteMeta = &Builtin{
+ Name: "glob.quote_meta",
+ Description: "Returns a string which represents a version of the pattern where all asterisks have been escaped.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("pattern", types.S).Description("glob pattern"),
+ ),
+ types.Named("output", types.S).Description("the escaped string of `pattern`"),
+ ),
+ CanSkipBctx: true,
+ // TODO(sr): example for this was: Calling ``glob.quote_meta("*.github.com", output)`` returns ``\\*.github.com`` as ``output``.
+}
+
+/**
+ * Networking
+ */
+
+var NetCIDRIntersects = &Builtin{
+ Name: "net.cidr_intersects",
+ Description: "Checks if a CIDR intersects with another CIDR (e.g. `192.168.0.0/16` overlaps with `192.168.1.0/24`). Supports both IPv4 and IPv6 notations.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("cidr1", types.S).Description("first CIDR"),
+ types.Named("cidr2", types.S).Description("second CIDR"),
+ ),
+ types.Named("result", types.B).Description("`true` if `cidr1` intersects with `cidr2`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var NetCIDRExpand = &Builtin{
+ Name: "net.cidr_expand",
+ Description: "Expands CIDR to set of hosts (e.g., `net.cidr_expand(\"192.168.0.0/30\")` generates 4 hosts: `{\"192.168.0.0\", \"192.168.0.1\", \"192.168.0.2\", \"192.168.0.3\"}`).",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("cidr", types.S).Description("CIDR to expand"),
+ ),
+ types.Named("hosts", types.SetOfStr).Description("set of IP addresses the CIDR `cidr` expands to"),
+ ),
+ CanSkipBctx: false,
+}
+
+var NetCIDRContains = &Builtin{
+ Name: "net.cidr_contains",
+ Description: "Checks if a CIDR or IP is contained within another CIDR. `output` is `true` if `cidr_or_ip` (e.g. `127.0.0.64/26` or `127.0.0.1`) is contained within `cidr` (e.g. `127.0.0.1/24`) and `false` otherwise. Supports both IPv4 and IPv6 notations.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("cidr", types.S).Description("CIDR to check against"),
+ types.Named("cidr_or_ip", types.S).Description("CIDR or IP to check"),
+ ),
+ types.Named("result", types.B).Description("`true` if `cidr_or_ip` is contained within `cidr`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var NetCIDRContainsMatches = &Builtin{
+ Name: "net.cidr_contains_matches",
+ Description: "Checks if collections of cidrs or ips are contained within another collection of cidrs and returns matches. " +
+ "This function is similar to `net.cidr_contains` except it allows callers to pass collections of CIDRs or IPs as arguments and returns the matches (as opposed to a boolean result indicating a match between two CIDRs/IPs).",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("cidrs", netCidrContainsMatchesOperandType).Description("CIDRs to check against"),
+ types.Named("cidrs_or_ips", netCidrContainsMatchesOperandType).Description("CIDRs or IPs to check"),
+ ),
+ types.Named("output", types.NewSet(types.NewArray([]types.Type{types.A, types.A}, nil))).Description("tuples identifying matches where `cidrs_or_ips` are contained within `cidrs`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var NetCIDRMerge = &Builtin{
+ Name: "net.cidr_merge",
+ Description: "Merges IP addresses and subnets into the smallest possible list of CIDRs (e.g., `net.cidr_merge([\"192.0.128.0/24\", \"192.0.129.0/24\"])` generates `{\"192.0.128.0/23\"}`." +
+ `This function merges adjacent subnets where possible, those contained within others and also removes any duplicates.
+Supports both IPv4 and IPv6 notations. IPv6 inputs need a prefix length (e.g. "/128").`,
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("addrs", types.NewAny(
+ types.NewArray(nil, types.NewAny(types.S)),
+ types.SetOfStr,
+ )).Description("CIDRs or IP addresses"),
+ ),
+ types.Named("output", types.SetOfStr).Description("smallest possible set of CIDRs obtained after merging the provided list of IP addresses and subnets in `addrs`"),
+ ),
+ CanSkipBctx: true,
+}
+
+var NetCIDRIsValid = &Builtin{
+ Name: "net.cidr_is_valid",
+ Description: "Parses an IPv4/IPv6 CIDR and returns a boolean indicating if the provided CIDR is valid.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("cidr", types.S).Description("CIDR to validate"),
+ ),
+ types.Named("result", types.B).Description("`true` if `cidr` is a valid CIDR"),
+ ),
+ CanSkipBctx: true,
+}
+
+var netCidrContainsMatchesOperandType = types.NewAny(
+ types.S,
+ types.NewArray(nil, types.NewAny(
+ types.S,
+ types.NewArray(nil, types.A),
+ )),
+ types.NewSet(types.NewAny(
+ types.S,
+ types.NewArray(nil, types.A),
+ )),
+ types.NewObject(nil, types.NewDynamicProperty(
+ types.S,
+ types.NewAny(
+ types.S,
+ types.NewArray(nil, types.A),
+ ),
+ )),
+)
+
+// Marked non-deterministic because DNS resolution results can be non-deterministic.
+var NetLookupIPAddr = &Builtin{
+ Name: "net.lookup_ip_addr",
+ Description: "Returns the set of IP addresses (both v4 and v6) that the passed-in `name` resolves to using the standard name resolution mechanisms available.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("name", types.S).Description("domain name to resolve"),
+ ),
+ types.Named("addrs", types.SetOfStr).Description("IP addresses (v4 and v6) that `name` resolves to"),
+ ),
+ Nondeterministic: true,
+ CanSkipBctx: false,
+}
+
+/**
+ * Semantic Versions
+ */
+
+var SemVerIsValid = &Builtin{
+ Name: "semver.is_valid",
+ Description: "Validates that the input is a valid SemVer string.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("vsn", types.A).Description("input to validate"),
+ ),
+ types.Named("result", types.B).Description("`true` if `vsn` is a valid SemVer; `false` otherwise"),
+ ),
+ CanSkipBctx: true,
+}
+
+var SemVerCompare = &Builtin{
+ Name: "semver.compare",
+ Description: "Compares valid SemVer formatted version strings.",
+ Decl: types.NewFunction(
+ types.Args(
+ types.Named("a", types.S).Description("first version string"),
+ types.Named("b", types.S).Description("second version string"),
+ ),
+ types.Named("result", types.N).Description("`-1` if `a < b`; `1` if `a > b`; `0` if `a == b`"),
+ ),
+ CanSkipBctx: true,
+}
+
+/**
+ * Printing
+ */
+
+// Print is a special built-in function that writes zero or more operands
+// to a message buffer. The caller controls how the buffer is displayed. The
+// operands may be of any type. Furthermore, unlike other built-in functions,
+// undefined operands DO NOT cause the print() function to fail during
+// evaluation.
+var Print = &Builtin{
+ Name: "print",
+ Decl: types.NewVariadicFunction(nil, types.A, nil),
+}
+
+// InternalPrint represents the internal implementation of the print() function.
+// The compiler rewrites print() calls to refer to the internal implementation.
+var InternalPrint = &Builtin{
+ Name: "internal.print",
+ Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.SetOfAny)}, nil),
+}
+
+var InternalTestCase = &Builtin{
+ Name: "internal.test_case",
+ Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.A)}, nil),
+}
+
+/**
+ * Deprecated built-ins.
+ */
+
+// SetDiff has been replaced by the minus built-in.
+var SetDiff = &Builtin{
+ Name: "set_diff",
+ Decl: types.NewFunction(
+ types.Args(
+ types.SetOfAny,
+ types.SetOfAny,
+ ),
+ types.SetOfAny,
+ ),
+ deprecated: true,
+ CanSkipBctx: true,
+}
+
+// NetCIDROverlap has been replaced by the `net.cidr_contains` built-in.
+var NetCIDROverlap = &Builtin{
+ Name: "net.cidr_overlap",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+ deprecated: true,
+ CanSkipBctx: true,
+}
+
+// CastArray checks the underlying type of the input. If it is array or set, an array
+// containing the values is returned. If it is not an array, an error is thrown.
+var CastArray = &Builtin{
+ Name: "cast_array",
+ Decl: types.NewFunction(
+ types.Args(types.A),
+ types.NewArray(nil, types.A),
+ ),
+ deprecated: true,
+ CanSkipBctx: true,
+}
+
+// CastSet checks the underlying type of the input.
+// If it is a set, the set is returned.
+// If it is an array, the array is returned in set form (all duplicates removed)
+// If neither, an error is thrown
+var CastSet = &Builtin{
+ Name: "cast_set",
+ Decl: types.NewFunction(
+ types.Args(types.A),
+ types.SetOfAny,
+ ),
+ deprecated: true,
+ CanSkipBctx: true,
+}
+
+// CastString returns input if it is a string; if not returns error.
+// For formatting variables, see sprintf
+var CastString = &Builtin{
+ Name: "cast_string",
+ Decl: types.NewFunction(
+ types.Args(types.A),
+ types.S,
+ ),
+ deprecated: true,
+ CanSkipBctx: true,
+}
+
+// CastBoolean returns input if it is a boolean; if not returns error.
+var CastBoolean = &Builtin{
+ Name: "cast_boolean",
+ Decl: types.NewFunction(
+ types.Args(types.A),
+ types.B,
+ ),
+ deprecated: true,
+ CanSkipBctx: true,
+}
+
+// CastNull returns null if input is null; if not returns error.
+var CastNull = &Builtin{
+ Name: "cast_null",
+ Decl: types.NewFunction(
+ types.Args(types.A),
+ types.Nl,
+ ),
+ deprecated: true,
+ CanSkipBctx: true,
+}
+
+// CastObject returns the given object if it is null; throws an error otherwise
+var CastObject = &Builtin{
+ Name: "cast_object",
+ Decl: types.NewFunction(
+ types.Args(types.A),
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ ),
+ deprecated: true,
+ CanSkipBctx: true,
+}
+
+// RegexMatchDeprecated declares `re_match` which has been deprecated. Use `regex.match` instead.
+var RegexMatchDeprecated = &Builtin{
+ Name: "re_match",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+ deprecated: true,
+ CanSkipBctx: false,
+}
+
+// All takes a list and returns true if all of the items
+// are true. A collection of length 0 returns true.
+var All = &Builtin{
+ Name: "all",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewAny(
+ types.SetOfAny,
+ types.NewArray(nil, types.A),
+ ),
+ ),
+ types.B,
+ ),
+ deprecated: true,
+ CanSkipBctx: true,
+}
+
+// Any takes a collection and returns true if any of the items
+// is true. A collection of length 0 returns false.
+var Any = &Builtin{
+ Name: "any",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewAny(
+ types.SetOfAny,
+ types.NewArray(nil, types.A),
+ ),
+ ),
+ types.B,
+ ),
+ deprecated: true,
+ CanSkipBctx: true,
+}
+
+// Builtin represents a built-in function supported by OPA. Every built-in
+// function is uniquely identified by a name.
+type Builtin struct {
+ Name string `json:"name"` // Unique name of built-in function, e.g., (arg1,arg2,...,argN)
+ Description string `json:"description,omitempty"` // Description of what the built-in function does.
+
+ // Categories of the built-in function. Omitted for namespaced
+ // built-ins, i.e. "array.concat" is taken to be of the "array" category.
+ // "minus" for example, is part of two categories: numbers and sets. (NOTE(sr): aspirational)
+ Categories []string `json:"categories,omitempty"`
+
+ Decl *types.Function `json:"decl"` // Built-in function type declaration.
+ Infix string `json:"infix,omitempty"` // Unique name of infix operator. Default should be unset.
+ Relation bool `json:"relation,omitempty"` // Indicates if the built-in acts as a relation.
+ deprecated bool `json:"-"` // Indicates if the built-in has been deprecated.
+ CanSkipBctx bool `json:"-"` // Built-in needs no data from the built-in context.
+ Nondeterministic bool `json:"nondeterministic,omitempty"` // Indicates if the built-in returns non-deterministic results.
+}
+
+// category is a helper for specifying a Builtin's Categories
+func category(cs ...string) []string {
+ return cs
+}
+
+// Minimal returns a shallow copy of b with the descriptions and categories and
+// named arguments stripped out.
+func (b *Builtin) Minimal() *Builtin {
+ cpy := *b
+ fargs := b.Decl.FuncArgs()
+ if fargs.Variadic != nil {
+ cpy.Decl = types.NewVariadicFunction(fargs.Args, fargs.Variadic, b.Decl.Result())
+ } else {
+ cpy.Decl = types.NewFunction(fargs.Args, b.Decl.Result())
+ }
+ cpy.Categories = nil
+ cpy.Description = ""
+ return &cpy
+}
+
+// IsDeprecated returns true if the Builtin function is deprecated and will be removed in a future release.
+func (b *Builtin) IsDeprecated() bool {
+ return b.deprecated
+}
+
+// IsDeterministic returns true if the Builtin function returns non-deterministic results.
+func (b *Builtin) IsNondeterministic() bool {
+ return b.Nondeterministic
+}
+
+// Expr creates a new expression for the built-in with the given operands.
+func (b *Builtin) Expr(operands ...*Term) *Expr {
+ ts := make([]*Term, len(operands)+1)
+ ts[0] = NewTerm(b.Ref())
+ for i := range operands {
+ ts[i+1] = operands[i]
+ }
+ return &Expr{
+ Terms: ts,
+ }
+}
+
+// Call creates a new term for the built-in with the given operands.
+func (b *Builtin) Call(operands ...*Term) *Term {
+ call := make(Call, len(operands)+1)
+ call[0] = NewTerm(b.Ref())
+ for i := range operands {
+ call[i+1] = operands[i]
+ }
+ return NewTerm(call)
+}
+
+// Ref returns a Ref that refers to the built-in function.
+func (b *Builtin) Ref() Ref {
+ parts := strings.Split(b.Name, ".")
+ ref := make(Ref, len(parts))
+ ref[0] = VarTerm(parts[0])
+ for i := 1; i < len(parts); i++ {
+ ref[i] = InternedTerm(parts[i])
+ }
+ return ref
+}
+
+// IsTargetPos returns true if a variable in the i-th position will be bound by
+// evaluating the call expression.
+func (b *Builtin) IsTargetPos(i int) bool {
+ return b.Decl.Arity() == i
+}
+
+// NeedsBuiltInContext returns true if the built-in depends on the built-in context.
+func (b *Builtin) NeedsBuiltInContext() bool {
+ // Negated, so built-ins we don't know about (and who don't know about this option)
+ // will get a built-in context provided to them.
+ return !b.CanSkipBctx
+}
+
+func init() {
+ BuiltinMap = map[string]*Builtin{}
+ for _, b := range &DefaultBuiltins {
+ RegisterBuiltin(b)
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go b/vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go
new file mode 100644
index 0000000000..844cb66f0b
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go
@@ -0,0 +1,290 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "bytes"
+ _ "embed"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "slices"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/open-policy-agent/opa/internal/semver"
+ "github.com/open-policy-agent/opa/internal/wasm/sdk/opa/capabilities"
+ caps "github.com/open-policy-agent/opa/v1/capabilities"
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+// VersonIndex contains an index from built-in function name, language feature,
+// and future rego keyword to version number. During the build, this is used to
+// create an index of the minimum version required for the built-in/feature/kw.
+type VersionIndex struct {
+ Builtins map[string]semver.Version `json:"builtins"`
+ Features map[string]semver.Version `json:"features"`
+ Keywords map[string]semver.Version `json:"keywords"`
+}
+
+// NOTE(tsandall): this file is generated by internal/cmd/genversionindex/main.go
+// and run as part of go:generate. We generate the version index as part of the
+// build process because it's relatively expensive to build (it takes ~500ms on
+// my machine) and never changes.
+//
+//go:embed version_index.json
+var versionIndexBs []byte
+
+// init only on demand, as JSON unmarshalling comes with some cost, and contributes
+// noise to things like pprof stats
+var minVersionIndexOnce = sync.OnceValue(func() VersionIndex {
+ var vi VersionIndex
+ if err := json.Unmarshal(versionIndexBs, &vi); err != nil {
+ panic(err)
+ }
+ return vi
+})
+
+// In the compiler, we used this to check that we're OK working with ref heads.
+// If this isn't present, we'll fail. This is to ensure that older versions of
+// OPA can work with policies that we're compiling -- if they don't know ref
+// heads, they wouldn't be able to parse them.
+const FeatureRefHeadStringPrefixes = "rule_head_ref_string_prefixes"
+const FeatureRefHeads = "rule_head_refs"
+const FeatureRegoV1 = "rego_v1"
+const FeatureRegoV1Import = "rego_v1_import"
+const FeatureKeywordsInRefs = "keywords_in_refs"
+
+// Features carries the default features supported by this version of OPA.
+// Use RegisterFeatures to add to them.
+var Features = []string{
+ FeatureRegoV1,
+ FeatureKeywordsInRefs,
+}
+
+// RegisterFeatures lets applications wrapping OPA register features, to be
+// included in `ast.CapabilitiesForThisVersion()`.
+func RegisterFeatures(fs ...string) {
+ for i := range fs {
+ if slices.Contains(Features, fs[i]) {
+ continue
+ }
+ Features = append(Features, fs[i])
+ }
+}
+
+// Capabilities defines a structure containing data that describes the capabilities
+// or features supported by a particular version of OPA.
+type Capabilities struct {
+ Builtins []*Builtin `json:"builtins,omitempty"`
+ FutureKeywords []string `json:"future_keywords,omitempty"`
+ WasmABIVersions []WasmABIVersion `json:"wasm_abi_versions,omitempty"`
+
+ // Features is a bit of a mixed bag for checking that an older version of OPA
+ // is able to do what needs to be done.
+ // TODO(sr): find better words ^^
+ Features []string `json:"features,omitempty"`
+
+ // allow_net is an array of hostnames or IP addresses, that an OPA instance is
+ // allowed to connect to.
+ // If omitted, ANY host can be connected to. If empty, NO host can be connected to.
+ // As of now, this only controls fetching remote refs for using JSON Schemas in
+ // the type checker.
+ // TODO(sr): support ports to further restrict connection peers
+ AllowNet []string `json:"allow_net,omitempty"`
+}
+
+// WasmABIVersion captures the Wasm ABI version. Its `Minor` version is indicating
+// backwards-compatible changes.
+type WasmABIVersion struct {
+ Version int `json:"version"`
+ Minor int `json:"minor_version"`
+}
+
+type CapabilitiesOptions struct {
+ regoVersion RegoVersion
+}
+
+func newCapabilitiesOptions(opts []CapabilitiesOption) CapabilitiesOptions {
+ co := CapabilitiesOptions{}
+ for _, opt := range opts {
+ opt(&co)
+ }
+ return co
+}
+
+type CapabilitiesOption func(*CapabilitiesOptions)
+
+func CapabilitiesRegoVersion(regoVersion RegoVersion) CapabilitiesOption {
+ return func(o *CapabilitiesOptions) {
+ o.regoVersion = regoVersion
+ }
+}
+
+// CapabilitiesForThisVersion returns the capabilities of this version of OPA.
+func CapabilitiesForThisVersion(opts ...CapabilitiesOption) *Capabilities {
+ co := newCapabilitiesOptions(opts)
+
+ f := &Capabilities{}
+
+ for _, vers := range capabilities.ABIVersions() {
+ f.WasmABIVersions = append(f.WasmABIVersions, WasmABIVersion{Version: vers[0], Minor: vers[1]})
+ }
+
+ f.Builtins = make([]*Builtin, len(Builtins))
+ copy(f.Builtins, Builtins)
+
+ slices.SortFunc(f.Builtins, func(a, b *Builtin) int {
+ return strings.Compare(a.Name, b.Name)
+ })
+
+ switch co.regoVersion {
+ case RegoV0, RegoV0CompatV1:
+ for kw := range allFutureKeywords {
+ f.FutureKeywords = append(f.FutureKeywords, kw)
+ }
+
+ f.Features = []string{
+ FeatureRefHeadStringPrefixes,
+ FeatureRefHeads,
+ FeatureRegoV1Import,
+ FeatureRegoV1, // Included in v0 capabilities to allow v1 bundles in --v0-compatible mode
+ FeatureKeywordsInRefs,
+ }
+ default:
+ for kw := range futureKeywords {
+ f.FutureKeywords = append(f.FutureKeywords, kw)
+ }
+
+ f.Features = make([]string, len(Features))
+ copy(f.Features, Features)
+ }
+
+ sort.Strings(f.FutureKeywords)
+ sort.Strings(f.Features)
+
+ return f
+}
+
+// LoadCapabilitiesJSON loads a JSON serialized capabilities structure from the reader r.
+func LoadCapabilitiesJSON(r io.Reader) (*Capabilities, error) {
+ d := util.NewJSONDecoder(r)
+ var c Capabilities
+ return &c, d.Decode(&c)
+}
+
+// LoadCapabilitiesVersion loads a JSON serialized capabilities structure from the specific version.
+func LoadCapabilitiesVersion(version string) (*Capabilities, error) {
+ cvs, err := LoadCapabilitiesVersions()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, cv := range cvs {
+ if cv == version {
+ cont, err := caps.FS.ReadFile(cv + ".json")
+ if err != nil {
+ return nil, err
+ }
+
+ return LoadCapabilitiesJSON(bytes.NewReader(cont))
+ }
+
+ }
+ return nil, fmt.Errorf("no capabilities version found %v", version)
+}
+
+// LoadCapabilitiesFile loads a JSON serialized capabilities structure from a file.
+func LoadCapabilitiesFile(file string) (*Capabilities, error) {
+ fd, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer fd.Close()
+ return LoadCapabilitiesJSON(fd)
+}
+
+// LoadCapabilitiesVersions loads all capabilities versions
+func LoadCapabilitiesVersions() ([]string, error) {
+ ents, err := caps.FS.ReadDir(".")
+ if err != nil {
+ return nil, err
+ }
+
+ capabilitiesVersions := make([]string, 0, len(ents))
+ for _, ent := range ents {
+ capabilitiesVersions = append(capabilitiesVersions, strings.Replace(ent.Name(), ".json", "", 1))
+ }
+
+ slices.SortStableFunc(capabilitiesVersions, semver.Compare)
+
+ return capabilitiesVersions, nil
+}
+
+// MinimumCompatibleVersion returns the minimum compatible OPA version based on
+// the built-ins, features, and keywords in c.
+func (c *Capabilities) MinimumCompatibleVersion() (string, bool) {
+ var maxVersion semver.Version
+
+ // this is the oldest OPA release that includes capabilities
+ if err := maxVersion.Set("0.17.0"); err != nil {
+ panic("unreachable")
+ }
+
+ minVersionIndex := minVersionIndexOnce()
+
+ for _, bi := range c.Builtins {
+ v, ok := minVersionIndex.Builtins[bi.Name]
+ if !ok {
+ return "", false
+ }
+ if v.Compare(maxVersion) > 0 {
+ maxVersion = v
+ }
+ }
+
+ for _, kw := range c.FutureKeywords {
+ v, ok := minVersionIndex.Keywords[kw]
+ if !ok {
+ return "", false
+ }
+ if v.Compare(maxVersion) > 0 {
+ maxVersion = v
+ }
+ }
+
+ for _, feat := range c.Features {
+ v, ok := minVersionIndex.Features[feat]
+ if !ok {
+ return "", false
+ }
+ if v.Compare(maxVersion) > 0 {
+ maxVersion = v
+ }
+ }
+
+ return maxVersion.String(), true
+}
+
+func (c *Capabilities) ContainsFeature(feature string) bool {
+ return slices.Contains(c.Features, feature)
+}
+
+// addBuiltinSorted inserts a built-in into c in sorted order. An existing built-in with the same name
+// will be overwritten.
+func (c *Capabilities) addBuiltinSorted(bi *Builtin) {
+ i := sort.Search(len(c.Builtins), func(x int) bool {
+ return c.Builtins[x].Name >= bi.Name
+ })
+ if i < len(c.Builtins) && bi.Name == c.Builtins[i].Name {
+ c.Builtins[i] = bi
+ return
+ }
+ c.Builtins = append(c.Builtins, nil)
+ copy(c.Builtins[i+1:], c.Builtins[i:])
+ c.Builtins[i] = bi
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/check.go b/vendor/github.com/open-policy-agent/opa/v1/ast/check.go
new file mode 100644
index 0000000000..0da7e26514
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/check.go
@@ -0,0 +1,1329 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "fmt"
+ "slices"
+ "sort"
+ "strings"
+
+ "github.com/open-policy-agent/opa/v1/types"
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+type varRewriter func(Ref) Ref
+
+// exprChecker defines the interface for executing type checking on a single
+// expression. The exprChecker must update the provided TypeEnv with inferred
+// types of vars.
+type exprChecker func(*TypeEnv, *Expr) *Error
+
+// typeChecker implements type checking on queries and rules. Errors are
+// accumulated on the typeChecker so that a single run can report multiple
+// issues.
+type typeChecker struct {
+ builtins map[string]*Builtin
+ required *Capabilities
+ errs Errors
+ exprCheckers map[string]exprChecker
+ varRewriter varRewriter
+ ss *SchemaSet
+ allowNet []string
+ input types.Type
+ allowUndefinedFuncs bool
+ schemaTypes map[string]types.Type
+}
+
+// newTypeChecker returns a new typeChecker object that has no errors.
+func newTypeChecker() *typeChecker {
+ return &typeChecker{
+ exprCheckers: map[string]exprChecker{
+ "eq": checkExprEq,
+ },
+ }
+}
+
+func (tc *typeChecker) newEnv(exist *TypeEnv) *TypeEnv {
+ if exist != nil {
+ return exist.wrap()
+ }
+ env := newTypeEnv(tc.copy)
+ if tc.input != nil {
+ env.tree.Put(InputRootRef, tc.input)
+ }
+ return env
+}
+
+func (tc *typeChecker) copy() *typeChecker {
+ return newTypeChecker().
+ WithVarRewriter(tc.varRewriter).
+ WithSchemaSet(tc.ss).
+ WithSchemaTypes(tc.schemaTypes).
+ WithAllowNet(tc.allowNet).
+ WithInputType(tc.input).
+ WithAllowUndefinedFunctionCalls(tc.allowUndefinedFuncs).
+ WithBuiltins(tc.builtins).
+ WithRequiredCapabilities(tc.required)
+}
+
+func (tc *typeChecker) WithRequiredCapabilities(c *Capabilities) *typeChecker {
+ tc.required = c
+ return tc
+}
+
+func (tc *typeChecker) WithBuiltins(builtins map[string]*Builtin) *typeChecker {
+ tc.builtins = builtins
+ return tc
+}
+
+func (tc *typeChecker) WithSchemaSet(ss *SchemaSet) *typeChecker {
+ tc.ss = ss
+ return tc
+}
+
+func (tc *typeChecker) WithSchemaTypes(schemaTypes map[string]types.Type) *typeChecker {
+ tc.schemaTypes = schemaTypes
+ return tc
+}
+
+func (tc *typeChecker) WithAllowNet(hosts []string) *typeChecker {
+ tc.allowNet = hosts
+ return tc
+}
+
+func (tc *typeChecker) WithVarRewriter(f varRewriter) *typeChecker {
+ tc.varRewriter = f
+ return tc
+}
+
+func (tc *typeChecker) WithInputType(tpe types.Type) *typeChecker {
+ tc.input = tpe
+ return tc
+}
+
+// WithAllowUndefinedFunctionCalls sets the type checker to allow references to undefined functions.
+// Additionally, the 'CheckUndefinedFuncs' and 'CheckSafetyRuleBodies' compiler stages are skipped.
+func (tc *typeChecker) WithAllowUndefinedFunctionCalls(allow bool) *typeChecker {
+ tc.allowUndefinedFuncs = allow
+ return tc
+}
+
+// Env returns a type environment for the specified built-ins with any other
+// global types configured on the checker. In practice, this is the default
+// environment that other statements will be checked against.
+func (tc *typeChecker) Env(builtins map[string]*Builtin) *TypeEnv {
+ env := tc.newEnv(nil)
+ for _, bi := range builtins {
+ env.tree.Put(bi.Ref(), bi.Decl)
+ }
+ return env
+}
+
+// CheckBody runs type checking on the body and returns a TypeEnv if no errors
+// are found. The resulting TypeEnv wraps the provided one. The resulting
+// TypeEnv will be able to resolve types of vars contained in the body.
+func (tc *typeChecker) CheckBody(env *TypeEnv, body Body) (*TypeEnv, Errors) {
+
+ errors := []*Error{}
+ env = tc.newEnv(env)
+ vis := newRefChecker(env, tc.varRewriter)
+
+ WalkExprs(body, func(expr *Expr) bool {
+
+ closureErrs := tc.checkClosures(env, expr)
+ for _, err := range closureErrs {
+ errors = append(errors, err)
+ }
+
+ hasClosureErrors := len(closureErrs) > 0
+
+ // reset errors from previous iteration
+ vis.errs = nil
+ NewGenericVisitor(vis.Visit).Walk(expr)
+ for _, err := range vis.errs {
+ errors = append(errors, err)
+ }
+
+ hasRefErrors := len(vis.errs) > 0
+
+ if err := tc.checkExpr(env, expr); err != nil {
+ // Suppress this error if a more actionable one has occurred. In
+ // this case, if an error occurred in a ref or closure contained in
+ // this expression, and the error is due to a nil type, then it's
+ // likely to be the result of the more specific error.
+ skip := (hasClosureErrors || hasRefErrors) && causedByNilType(err)
+ if !skip {
+ errors = append(errors, err)
+ }
+ }
+ return true
+ })
+
+ tc.err(errors)
+ return env, errors
+}
+
+// CheckTypes runs type checking on the rules returns a TypeEnv if no errors
+// are found. The resulting TypeEnv wraps the provided one. The resulting
+// TypeEnv will be able to resolve types of refs that refer to rules.
+func (tc *typeChecker) CheckTypes(env *TypeEnv, sorted []util.T, as *AnnotationSet) (*TypeEnv, Errors) {
+ env = tc.newEnv(env)
+ for _, s := range sorted {
+ tc.checkRule(env, as, s.(*Rule))
+ }
+ tc.errs.Sort()
+ return env, tc.errs
+}
+
+func (tc *typeChecker) checkClosures(env *TypeEnv, expr *Expr) Errors {
+ var result Errors
+ WalkClosures(expr, func(x any) bool {
+ switch x := x.(type) {
+ case *ArrayComprehension:
+ _, errs := tc.copy().CheckBody(env, x.Body)
+ if len(errs) > 0 {
+ result = errs
+ return true
+ }
+ case *SetComprehension:
+ _, errs := tc.copy().CheckBody(env, x.Body)
+ if len(errs) > 0 {
+ result = errs
+ return true
+ }
+ case *ObjectComprehension:
+ _, errs := tc.copy().CheckBody(env, x.Body)
+ if len(errs) > 0 {
+ result = errs
+ return true
+ }
+ }
+ return false
+ })
+ return result
+}
+
+func (tc *typeChecker) getSchemaType(schemaAnnot *SchemaAnnotation, rule *Rule) (types.Type, *Error) {
+ if tc.schemaTypes == nil {
+ tc.schemaTypes = make(map[string]types.Type)
+ }
+
+ if len(schemaAnnot.Schema) > 0 {
+ if refType, exists := tc.schemaTypes[schemaAnnot.Schema.String()]; exists {
+ return refType, nil
+ }
+ }
+
+ refType, err := processAnnotation(tc.ss, schemaAnnot, rule, tc.allowNet)
+ if err != nil {
+ return nil, err
+ }
+
+ if refType == nil {
+ return nil, nil
+ }
+
+ // Only add to cache if schema is read from file
+ if len(schemaAnnot.Schema) > 0 {
+ tc.schemaTypes[schemaAnnot.Schema.String()] = refType
+ }
+
+ return refType, nil
+
+}
+
+func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) {
+
+ env = env.wrap()
+
+ schemaAnnots := getRuleAnnotation(as, rule)
+ for _, schemaAnnot := range schemaAnnots {
+ refType, err := tc.getSchemaType(schemaAnnot, rule)
+ if err != nil {
+ tc.err([]*Error{err})
+ continue
+ }
+
+ ref := schemaAnnot.Path
+ // if we do not have a ref or a reftype, we should not evaluate this rule.
+ if ref == nil || refType == nil {
+ continue
+ }
+
+ prefixRef, t := getPrefix(env, ref)
+ if t == nil || len(prefixRef) == len(ref) {
+ env.tree.Put(ref, refType)
+ } else {
+ newType, err := override(ref[len(prefixRef):], t, refType, rule)
+ if err != nil {
+ tc.err([]*Error{err})
+ continue
+ }
+ env.tree.Put(prefixRef, newType)
+ }
+ }
+
+ cpy, err := tc.CheckBody(env, rule.Body)
+ env = env.next
+ path := rule.Ref()
+
+ if len(err) > 0 {
+ // if the rule/function contains an error, add it to the type env so
+ // that expressions that refer to this rule/function do not encounter
+ // type errors.
+ env.tree.Put(path, types.A)
+ return
+ }
+
+ var tpe types.Type
+
+ if len(rule.Head.Args) > 0 {
+ // If args are not referred to in body, infer as any.
+ WalkVars(rule.Head.Args, func(v Var) bool {
+ if cpy.GetByValue(v) == nil {
+ cpy.tree.PutOne(v, types.A)
+ }
+ return false
+ })
+
+ // Construct function type.
+ args := make([]types.Type, len(rule.Head.Args))
+ for i := range len(rule.Head.Args) {
+ args[i] = cpy.GetByValue(rule.Head.Args[i].Value)
+ }
+
+ f := types.NewFunction(args, cpy.Get(rule.Head.Value))
+
+ tpe = f
+ } else {
+ switch rule.Head.RuleKind() {
+ case SingleValue:
+ typeV := cpy.GetByValue(rule.Head.Value.Value)
+ if !path.IsGround() {
+ // e.g. store object[string: whatever] at data.p.q.r, not data.p.q.r[x] or data.p.q.r[x].y[z]
+ objPath := path.DynamicSuffix()
+ path = path.GroundPrefix()
+
+ var err error
+ tpe, err = nestedObject(cpy, objPath, typeV)
+ if err != nil {
+ tc.err([]*Error{NewError(TypeErr, rule.Head.Location, "%s", err.Error())})
+ tpe = nil
+ }
+ } else if typeV != nil {
+ tpe = typeV
+ }
+ case MultiValue:
+ typeK := cpy.GetByValue(rule.Head.Key.Value)
+ if typeK != nil {
+ tpe = types.NewSet(typeK)
+ }
+ }
+ }
+
+ if tpe != nil {
+ env.tree.Insert(path, tpe, env)
+ }
+}
+
+// nestedObject creates a nested structure of object types, where each term on path corresponds to a level in the
+// nesting. Each term in the path only contributes to the dynamic portion of its corresponding object.
+func nestedObject(env *TypeEnv, path Ref, tpe types.Type) (types.Type, error) {
+ if len(path) == 0 {
+ return tpe, nil
+ }
+
+ k := path[0]
+ typeV, err := nestedObject(env, path[1:], tpe)
+ if err != nil {
+ return nil, err
+ }
+ if typeV == nil {
+ return nil, nil
+ }
+
+ var dynamicProperty *types.DynamicProperty
+ typeK := env.GetByValue(k.Value)
+ if typeK == nil {
+ return nil, nil
+ }
+ dynamicProperty = types.NewDynamicProperty(typeK, typeV)
+
+ return types.NewObject(nil, dynamicProperty), nil
+}
+
+func (tc *typeChecker) checkExpr(env *TypeEnv, expr *Expr) *Error {
+ if err := tc.checkExprWith(env, expr, 0); err != nil {
+ return err
+ }
+ if !expr.IsCall() {
+ return nil
+ }
+
+ operator := expr.Operator().String()
+
+ // If the type checker wasn't provided with a required capabilities
+ // structure then just skip. In some cases, type checking might be run
+ // without the need to record what builtins are required.
+ if tc.required != nil && tc.builtins != nil {
+ if bi, ok := tc.builtins[operator]; ok {
+ tc.required.addBuiltinSorted(bi)
+ }
+ }
+
+ checker := tc.exprCheckers[operator]
+ if checker != nil {
+ return checker(env, expr)
+ }
+
+ return tc.checkExprBuiltin(env, expr)
+}
+
+func (tc *typeChecker) checkExprBuiltin(env *TypeEnv, expr *Expr) *Error {
+
+ args := expr.Operands()
+ pre := getArgTypes(env, args)
+
+ // NOTE(tsandall): undefined functions will have been caught earlier in the
+ // compiler. We check for undefined functions before the safety check so
+ // that references to non-existent functions result in undefined function
+ // errors as opposed to unsafe var errors.
+ //
+ // We cannot run type checking before the safety check because part of the
+ // type checker relies on reordering (in particular for references to local
+ // vars).
+ name := expr.Operator()
+ tpe := env.GetByRef(name)
+
+ if tpe == nil {
+ if tc.allowUndefinedFuncs {
+ return nil
+ }
+ return NewError(TypeErr, expr.Location, "undefined function %v", name)
+ }
+
+ // check if the expression refers to a function that contains an error
+ _, ok := tpe.(types.Any)
+ if ok {
+ return nil
+ }
+
+ ftpe, ok := tpe.(*types.Function)
+ if !ok {
+ return NewError(TypeErr, expr.Location, "undefined function %v", name)
+ }
+
+ fargs := ftpe.FuncArgs()
+ namedFargs := ftpe.NamedFuncArgs()
+
+ if ftpe.Result() != nil {
+ fargs.Args = append(fargs.Args, ftpe.Result())
+ namedFargs.Args = append(namedFargs.Args, ftpe.NamedResult())
+ }
+
+ if len(args) > len(fargs.Args) && fargs.Variadic == nil {
+ return newArgError(expr.Location, name, "too many arguments", pre, namedFargs)
+ }
+
+ if len(args) < len(ftpe.FuncArgs().Args) {
+ return newArgError(expr.Location, name, "too few arguments", pre, namedFargs)
+ }
+
+ for i := range args {
+ if !unify1(env, args[i], fargs.Arg(i), false) {
+ post := make([]types.Type, len(args))
+ for i := range args {
+ post[i] = env.GetByValue(args[i].Value)
+ }
+ return newArgError(expr.Location, name, "invalid argument(s)", post, namedFargs)
+ }
+ }
+
+ return nil
+}
+
+func checkExprEq(env *TypeEnv, expr *Expr) *Error {
+
+ pre := getArgTypes(env, expr.Operands())
+
+ if len(pre) < Equality.Decl.Arity() {
+ return newArgError(expr.Location, expr.Operator(), "too few arguments", pre, Equality.Decl.FuncArgs())
+ }
+
+ if Equality.Decl.Arity() < len(pre) {
+ return newArgError(expr.Location, expr.Operator(), "too many arguments", pre, Equality.Decl.FuncArgs())
+ }
+
+ a, b := expr.Operand(0), expr.Operand(1)
+ typeA, typeB := env.GetByValue(a.Value), env.GetByValue(b.Value)
+
+ if !unify2(env, a, typeA, b, typeB) {
+ err := NewError(TypeErr, expr.Location, "match error")
+ err.Details = &UnificationErrDetail{
+ Left: typeA,
+ Right: typeB,
+ }
+ return err
+ }
+
+ return nil
+}
+
+func (tc *typeChecker) checkExprWith(env *TypeEnv, expr *Expr, i int) *Error {
+ if i == len(expr.With) {
+ return nil
+ }
+
+ target, value := expr.With[i].Target, expr.With[i].Value
+ targetType, valueType := env.GetByValue(target.Value), env.GetByValue(value.Value)
+
+ if t, ok := targetType.(*types.Function); ok { // built-in function replacement
+ switch v := valueType.(type) {
+ case *types.Function: // ...by function
+ if !unifies(targetType, valueType) {
+ return newArgError(expr.With[i].Loc(), target.Value.(Ref), "arity mismatch", v.FuncArgs().Args, t.NamedFuncArgs())
+ }
+ default: // ... by value, nothing to check
+ }
+ }
+
+ return tc.checkExprWith(env, expr, i+1)
+}
+
+func unify2(env *TypeEnv, a *Term, typeA types.Type, b *Term, typeB types.Type) bool {
+
+ nilA := types.Nil(typeA)
+ nilB := types.Nil(typeB)
+
+ if nilA && !nilB {
+ return unify1(env, a, typeB, false)
+ } else if nilB && !nilA {
+ return unify1(env, b, typeA, false)
+ } else if !nilA && !nilB {
+ return unifies(typeA, typeB)
+ }
+
+ switch a.Value.(type) {
+ case *Array:
+ return unify2Array(env, a, b)
+ case *object:
+ return unify2Object(env, a, b)
+ case Var:
+ switch b.Value.(type) {
+ case Var:
+ return unify1(env, a, types.A, false) && unify1(env, b, env.GetByValue(a.Value), false)
+ case *Array:
+ return unify2Array(env, b, a)
+ case *object:
+ return unify2Object(env, b, a)
+ }
+ }
+
+ return false
+}
+
+func unify2Array(env *TypeEnv, a *Term, b *Term) bool {
+ arr := a.Value.(*Array)
+ switch bv := b.Value.(type) {
+ case *Array:
+ if arr.Len() == bv.Len() {
+ for i := range arr.Len() {
+ if !unify2(env, arr.Elem(i), env.GetByValue(arr.Elem(i).Value), bv.Elem(i), env.GetByValue(bv.Elem(i).Value)) {
+ return false
+ }
+ }
+ return true
+ }
+ case Var:
+ return unify1(env, a, types.A, false) && unify1(env, b, env.GetByValue(a.Value), false)
+ }
+ return false
+}
+
+func unify2Object(env *TypeEnv, a *Term, b *Term) bool {
+ obj := a.Value.(Object)
+ switch bv := b.Value.(type) {
+ case *object:
+ cv := obj.Intersect(bv)
+ if obj.Len() == bv.Len() && bv.Len() == len(cv) {
+ for i := range cv {
+ if !unify2(env, cv[i][1], env.GetByValue(cv[i][1].Value), cv[i][2], env.GetByValue(cv[i][2].Value)) {
+ return false
+ }
+ }
+ return true
+ }
+ case Var:
+ return unify1(env, a, types.A, false) && unify1(env, b, env.GetByValue(a.Value), false)
+ }
+ return false
+}
+
+func unify1(env *TypeEnv, term *Term, tpe types.Type, union bool) bool {
+ switch v := term.Value.(type) {
+ case *Array:
+ switch tpe := tpe.(type) {
+ case *types.Array:
+ return unify1Array(env, v, tpe, union)
+ case types.Any:
+ if types.Compare(tpe, types.A) == 0 {
+ for i := range v.Len() {
+ unify1(env, v.Elem(i), types.A, true)
+ }
+ return true
+ }
+ unifies := false
+ for i := range tpe {
+ unifies = unify1(env, term, tpe[i], true) || unifies
+ }
+ return unifies
+ }
+ return false
+ case *object:
+ switch tpe := tpe.(type) {
+ case *types.Object:
+ return unify1Object(env, v, tpe, union)
+ case types.Any:
+ if types.Compare(tpe, types.A) == 0 {
+ v.Foreach(func(key, value *Term) {
+ unify1(env, key, types.A, true)
+ unify1(env, value, types.A, true)
+ })
+ return true
+ }
+ unifies := false
+ for i := range tpe {
+ unifies = unify1(env, term, tpe[i], true) || unifies
+ }
+ return unifies
+ }
+ return false
+ case Set:
+ switch tpe := tpe.(type) {
+ case *types.Set:
+ return unify1Set(env, v, tpe, union)
+ case types.Any:
+ if types.Compare(tpe, types.A) == 0 {
+ v.Foreach(func(elem *Term) {
+ unify1(env, elem, types.A, true)
+ })
+ return true
+ }
+ unifies := false
+ for i := range tpe {
+ unifies = unify1(env, term, tpe[i], true) || unifies
+ }
+ return unifies
+ }
+ return false
+ case Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension:
+ return unifies(env.GetByValue(v), tpe)
+ case Var:
+ if !union {
+ if exist := env.GetByValue(v); exist != nil {
+ return unifies(exist, tpe)
+ }
+ env.tree.PutOne(term.Value, tpe)
+ } else {
+ env.tree.PutOne(term.Value, types.Or(env.GetByValue(v), tpe))
+ }
+ return true
+ default:
+ if !IsConstant(v) {
+ panic("unreachable")
+ }
+ return unifies(env.GetByValue(term.Value), tpe)
+ }
+}
+
+func unify1Array(env *TypeEnv, val *Array, tpe *types.Array, union bool) bool {
+ if val.Len() != tpe.Len() && tpe.Dynamic() == nil {
+ return false
+ }
+ for i := range val.Len() {
+ if !unify1(env, val.Elem(i), tpe.Select(i), union) {
+ return false
+ }
+ }
+ return true
+}
+
+func unify1Object(env *TypeEnv, val Object, tpe *types.Object, union bool) bool {
+ if val.Len() != len(tpe.Keys()) && tpe.DynamicValue() == nil {
+ return false
+ }
+ stop := val.Until(func(k, v *Term) bool {
+ if IsConstant(k.Value) {
+ if child := selectConstant(tpe, k); child != nil {
+ if !unify1(env, v, child, union) {
+ return true
+ }
+ } else {
+ return true
+ }
+ } else {
+ // Inferring type of value under dynamic key would involve unioning
+ // with all property values of tpe whose keys unify. For now, type
+ // these values as Any. We can investigate stricter inference in
+ // the future.
+ unify1(env, v, types.A, union)
+ }
+ return false
+ })
+ return !stop
+}
+
+func unify1Set(env *TypeEnv, val Set, tpe *types.Set, union bool) bool {
+ of := types.Values(tpe)
+ return !val.Until(func(elem *Term) bool {
+ return !unify1(env, elem, of, union)
+ })
+}
+
+func (tc *typeChecker) err(errors []*Error) {
+ tc.errs = append(tc.errs, errors...)
+}
+
+type refChecker struct {
+ env *TypeEnv
+ errs Errors
+ varRewriter varRewriter
+}
+
+func rewriteVarsNop(node Ref) Ref {
+ return node
+}
+
+func newRefChecker(env *TypeEnv, f varRewriter) *refChecker {
+ if f == nil {
+ f = rewriteVarsNop
+ }
+
+ return &refChecker{
+ env: env,
+ errs: nil,
+ varRewriter: f,
+ }
+}
+
+func (rc *refChecker) Visit(x any) bool {
+ switch x := x.(type) {
+ case *ArrayComprehension, *ObjectComprehension, *SetComprehension:
+ return true
+ case *Expr:
+ switch terms := x.Terms.(type) {
+ case []*Term:
+ for i := 1; i < len(terms); i++ {
+ NewGenericVisitor(rc.Visit).Walk(terms[i])
+ }
+ return true
+ case *Term:
+ NewGenericVisitor(rc.Visit).Walk(terms)
+ return true
+ }
+ case Ref:
+ if err := rc.checkApply(rc.env, x); err != nil {
+ rc.errs = append(rc.errs, err)
+ return true
+ }
+ if err := rc.checkRef(rc.env, rc.env.tree, x, 0); err != nil {
+ rc.errs = append(rc.errs, err)
+ }
+ }
+ return false
+}
+
+func (rc *refChecker) checkApply(curr *TypeEnv, ref Ref) *Error {
+ if tpe, ok := curr.GetByRef(ref).(*types.Function); ok {
+ // NOTE(sr): We don't support first-class functions, except for `with`.
+ return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), len(ref)-1, tpe)
+ }
+
+ return nil
+}
+
+func (rc *refChecker) checkRef(curr *TypeEnv, node *typeTreeNode, ref Ref, idx int) *Error {
+
+ if idx == len(ref) {
+ return nil
+ }
+
+ head := ref[idx]
+
+ // NOTE(sr): as long as package statements are required, this isn't possible:
+ // the shortest possible rule ref is data.a.b (b is idx 2), idx 1 and 2 need to
+ // be strings or vars.
+ if idx == 1 || idx == 2 {
+ switch head.Value.(type) {
+ case Var, String: // OK
+ default:
+ have := rc.env.GetByValue(head.Value)
+ return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, have, types.S, getOneOfForNode(node))
+ }
+ }
+
+ if _, ok := head.Value.(Var); ok && idx != 0 {
+ tpe := types.Keys(rc.env.getRefRecExtent(node))
+ if exist := rc.env.GetByValue(head.Value); exist != nil {
+ if !unifies(tpe, exist) {
+ return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, tpe, getOneOfForNode(node))
+ }
+ } else {
+ rc.env.tree.PutOne(head.Value, tpe)
+ }
+ }
+
+ child := node.Child(head.Value)
+ if child == nil {
+ // NOTE(sr): idx is reset on purpose: we start over
+ switch {
+ case curr.next != nil:
+ next := curr.next
+ return rc.checkRef(next, next.tree, ref, 0)
+
+ case RootDocumentNames.Contains(ref[0]):
+ if idx != 0 {
+ node.Children().Iter(func(_ Value, child *typeTreeNode) bool {
+ _ = rc.checkRef(curr, child, ref, idx+1) // ignore error
+ return false
+ })
+ return nil
+ }
+ return rc.checkRefLeaf(types.A, ref, 1)
+
+ default:
+ return rc.checkRefLeaf(types.A, ref, 0)
+ }
+ }
+
+ if child.Leaf() {
+ return rc.checkRefLeaf(child.Value(), ref, idx+1)
+ }
+
+ return rc.checkRef(curr, child, ref, idx+1)
+}
+
+func (rc *refChecker) checkRefLeaf(tpe types.Type, ref Ref, idx int) *Error {
+
+ if idx == len(ref) {
+ return nil
+ }
+
+ head := ref[idx]
+
+ keys := types.Keys(tpe)
+ if keys == nil {
+ return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), idx-1, tpe)
+ }
+
+ switch value := head.Value.(type) {
+
+ case Var:
+ if exist := rc.env.GetByValue(value); exist != nil {
+ if !unifies(exist, keys) {
+ return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe))
+ }
+ } else {
+ rc.env.tree.PutOne(value, types.Keys(tpe))
+ }
+
+ case Ref:
+ if exist := rc.env.Get(value); exist != nil {
+ if !unifies(exist, keys) {
+ return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe))
+ }
+ }
+
+ case *Array, Object, Set:
+ if !unify1(rc.env, head, keys, false) {
+ return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, rc.env.Get(head), keys, nil)
+ }
+
+ default:
+ child := selectConstant(tpe, head)
+ if child == nil {
+ return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, nil, types.Keys(tpe), getOneOfForType(tpe))
+ }
+ return rc.checkRefLeaf(child, ref, idx+1)
+ }
+
+ return rc.checkRefLeaf(types.Values(tpe), ref, idx+1)
+}
+
+func unifies(a, b types.Type) bool {
+
+ if a == nil || b == nil {
+ return false
+ }
+
+ anyA, ok1 := a.(types.Any)
+ if ok1 {
+ if unifiesAny(anyA, b) {
+ return true
+ }
+ }
+
+ anyB, ok2 := b.(types.Any)
+ if ok2 {
+ if unifiesAny(anyB, a) {
+ return true
+ }
+ }
+
+ if ok1 || ok2 {
+ return false
+ }
+
+ switch a := a.(type) {
+ case types.Null:
+ _, ok := b.(types.Null)
+ return ok
+ case types.Boolean:
+ _, ok := b.(types.Boolean)
+ return ok
+ case types.Number:
+ _, ok := b.(types.Number)
+ return ok
+ case types.String:
+ _, ok := b.(types.String)
+ return ok
+ case *types.Array:
+ b, ok := b.(*types.Array)
+ if !ok {
+ return false
+ }
+ return unifiesArrays(a, b)
+ case *types.Object:
+ b, ok := b.(*types.Object)
+ if !ok {
+ return false
+ }
+ return unifiesObjects(a, b)
+ case *types.Set:
+ b, ok := b.(*types.Set)
+ if !ok {
+ return false
+ }
+ return unifies(types.Values(a), types.Values(b))
+ case *types.Function:
+ // NOTE(sr): variadic functions can only be internal ones, and we've forbidden
+ // their replacement via `with`; so we disregard variadic here
+ if types.Arity(a) == types.Arity(b) {
+ b := b.(*types.Function)
+ for i := range a.FuncArgs().Args {
+ if !unifies(a.FuncArgs().Arg(i), b.FuncArgs().Arg(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+ default:
+ panic("unreachable")
+ }
+}
+
+func unifiesAny(a types.Any, b types.Type) bool {
+ if _, ok := b.(*types.Function); ok {
+ return false
+ }
+ for i := range a {
+ if unifies(a[i], b) {
+ return true
+ }
+ }
+ return len(a) == 0
+}
+
+func unifiesArrays(a, b *types.Array) bool {
+
+ if !unifiesArraysStatic(a, b) {
+ return false
+ }
+
+ if !unifiesArraysStatic(b, a) {
+ return false
+ }
+
+ return a.Dynamic() == nil || b.Dynamic() == nil || unifies(a.Dynamic(), b.Dynamic())
+}
+
+func unifiesArraysStatic(a, b *types.Array) bool {
+ if a.Len() != 0 {
+ for i := range a.Len() {
+ if !unifies(a.Select(i), b.Select(i)) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func unifiesObjects(a, b *types.Object) bool {
+ if !unifiesObjectsStatic(a, b) {
+ return false
+ }
+
+ if !unifiesObjectsStatic(b, a) {
+ return false
+ }
+
+ return a.DynamicValue() == nil || b.DynamicValue() == nil || unifies(a.DynamicValue(), b.DynamicValue())
+}
+
+func unifiesObjectsStatic(a, b *types.Object) bool {
+ for _, k := range a.Keys() {
+ if !unifies(a.Select(k), b.Select(k)) {
+ return false
+ }
+ }
+ return true
+}
+
+// typeErrorCause defines an interface to determine the reason for a type
+// error. The type error details implement this interface so that type checking
+// can report more actionable errors.
+type typeErrorCause interface {
+ nilType() bool
+}
+
+func causedByNilType(err *Error) bool {
+ cause, ok := err.Details.(typeErrorCause)
+ if !ok {
+ return false
+ }
+ return cause.nilType()
+}
+
+// ArgErrDetail represents a generic argument error.
+type ArgErrDetail struct {
+ Have []types.Type `json:"have"`
+ Want types.FuncArgs `json:"want"`
+}
+
+// Lines returns the string representation of the detail.
+func (d *ArgErrDetail) Lines() []string {
+ lines := make([]string, 2)
+ lines[0] = "have: " + formatArgs(d.Have)
+ lines[1] = "want: " + d.Want.String()
+ return lines
+}
+
+func (d *ArgErrDetail) nilType() bool {
+ return slices.ContainsFunc(d.Have, types.Nil)
+}
+
+// UnificationErrDetail describes a type mismatch error when two values are
+// unified (e.g., x = [1,2,y]).
+type UnificationErrDetail struct {
+ Left types.Type `json:"a"`
+ Right types.Type `json:"b"`
+}
+
+func (a *UnificationErrDetail) nilType() bool {
+ return types.Nil(a.Left) || types.Nil(a.Right)
+}
+
+// Lines returns the string representation of the detail.
+func (a *UnificationErrDetail) Lines() []string {
+ lines := make([]string, 2)
+ lines[0] = fmt.Sprint("left : ", types.Sprint(a.Left))
+ lines[1] = fmt.Sprint("right : ", types.Sprint(a.Right))
+ return lines
+}
+
+// RefErrUnsupportedDetail describes an undefined reference error where the
+// referenced value does not support dereferencing (e.g., scalars).
+type RefErrUnsupportedDetail struct {
+ Ref Ref `json:"ref"` // invalid ref
+ Pos int `json:"pos"` // invalid element
+ Have types.Type `json:"have"` // referenced type
+}
+
+// Lines returns the string representation of the detail.
+func (r *RefErrUnsupportedDetail) Lines() []string {
+ lines := []string{
+ r.Ref.String(),
+ strings.Repeat("^", len(r.Ref[:r.Pos+1].String())),
+ fmt.Sprintf("have: %v", r.Have),
+ }
+ return lines
+}
+
+// RefErrInvalidDetail describes an undefined reference error where the referenced
+// value does not support the reference operand (e.g., missing object key,
+// invalid key type, etc.)
+type RefErrInvalidDetail struct {
+ Ref Ref `json:"ref"` // invalid ref
+ Pos int `json:"pos"` // invalid element
+ Have types.Type `json:"have,omitempty"` // type of invalid element (for var/ref elements)
+ Want types.Type `json:"want"` // allowed type (for non-object values)
+ OneOf []Value `json:"oneOf"` // allowed values (e.g., for object keys)
+}
+
+// Lines returns the string representation of the detail.
+func (r *RefErrInvalidDetail) Lines() []string {
+ lines := []string{r.Ref.String()}
+ offset := len(r.Ref[:r.Pos].String()) + 1
+ pad := strings.Repeat(" ", offset)
+ lines = append(lines, pad+"^")
+ if r.Have != nil {
+ lines = append(lines, fmt.Sprintf("%shave (type): %v", pad, r.Have))
+ } else {
+ lines = append(lines, fmt.Sprintf("%shave: %v", pad, r.Ref[r.Pos]))
+ }
+ if len(r.OneOf) > 0 {
+ lines = append(lines, fmt.Sprintf("%swant (one of): %v", pad, r.OneOf))
+ } else {
+ lines = append(lines, fmt.Sprintf("%swant (type): %v", pad, r.Want))
+ }
+ return lines
+}
+
+func formatArgs(args []types.Type) string {
+ buf := make([]string, len(args))
+ for i := range args {
+ buf[i] = types.Sprint(args[i])
+ }
+ return "(" + strings.Join(buf, ", ") + ")"
+}
+
+func newRefErrInvalid(loc *Location, ref Ref, idx int, have, want types.Type, oneOf []Value) *Error {
+ err := newRefError(loc, ref)
+ err.Details = &RefErrInvalidDetail{
+ Ref: ref,
+ Pos: idx,
+ Have: have,
+ Want: want,
+ OneOf: oneOf,
+ }
+ return err
+}
+
+func newRefErrUnsupported(loc *Location, ref Ref, idx int, have types.Type) *Error {
+ err := newRefError(loc, ref)
+ err.Details = &RefErrUnsupportedDetail{
+ Ref: ref,
+ Pos: idx,
+ Have: have,
+ }
+ return err
+}
+
+func newRefError(loc *Location, ref Ref) *Error {
+ return NewError(TypeErr, loc, "undefined ref: %v", ref)
+}
+
+func newArgError(loc *Location, builtinName Ref, msg string, have []types.Type, want types.FuncArgs) *Error {
+ err := NewError(TypeErr, loc, "%v: %v", builtinName, msg)
+ err.Details = &ArgErrDetail{
+ Have: have,
+ Want: want,
+ }
+ return err
+}
+
+func getOneOfForNode(node *typeTreeNode) (result []Value) {
+ node.Children().Iter(func(k Value, _ *typeTreeNode) bool {
+ result = append(result, k)
+ return false
+ })
+
+ sortValueSlice(result)
+ return result
+}
+
+func getOneOfForType(tpe types.Type) (result []Value) {
+ switch tpe := tpe.(type) {
+ case *types.Object:
+ for _, k := range tpe.Keys() {
+ v, err := InterfaceToValue(k)
+ if err != nil {
+ panic(err)
+ }
+ result = append(result, v)
+ }
+
+ case types.Any:
+ for _, object := range tpe {
+ objRes := getOneOfForType(object)
+ result = append(result, objRes...)
+ }
+ }
+
+ result = removeDuplicate(result)
+ sortValueSlice(result)
+ return result
+}
+
+func sortValueSlice(sl []Value) {
+ sort.Slice(sl, func(i, j int) bool {
+ return sl[i].Compare(sl[j]) < 0
+ })
+}
+
+func removeDuplicate(list []Value) []Value {
+ seen := make(map[Value]bool)
+ var newResult []Value
+ for _, item := range list {
+ if !seen[item] {
+ newResult = append(newResult, item)
+ seen[item] = true
+ }
+ }
+ return newResult
+}
+
+func getArgTypes(env *TypeEnv, args []*Term) []types.Type {
+ pre := make([]types.Type, len(args))
+ for i := range args {
+ pre[i] = env.Get(args[i])
+ }
+ return pre
+}
+
+// getPrefix returns the shortest prefix of ref that exists in env
+func getPrefix(env *TypeEnv, ref Ref) (Ref, types.Type) {
+ if len(ref) == 1 {
+ t := env.Get(ref)
+ if t != nil {
+ return ref, t
+ }
+ }
+ for i := 1; i < len(ref); i++ {
+ t := env.Get(ref[:i])
+ if t != nil {
+ return ref[:i], t
+ }
+ }
+ return nil, nil
+}
+
+// override takes a type t and returns a type obtained from t where the path represented by ref within it has type o (overriding the original type of that path)
+func override(ref Ref, t types.Type, o types.Type, rule *Rule) (types.Type, *Error) {
+ var newStaticProps []*types.StaticProperty
+ obj, ok := t.(*types.Object)
+ if !ok {
+ newType, err := getObjectType(ref, o, rule, types.NewDynamicProperty(types.A, types.A))
+ if err != nil {
+ return nil, err
+ }
+ return newType, nil
+ }
+ found := false
+ if ok {
+ staticProps := obj.StaticProperties()
+ for _, prop := range staticProps {
+ valueCopy := prop.Value
+ key, err := InterfaceToValue(prop.Key)
+ if err != nil {
+ return nil, NewError(TypeErr, rule.Location, "unexpected error in override: %s", err.Error())
+ }
+ if len(ref) > 0 && ref[0].Value.Compare(key) == 0 {
+ found = true
+ if len(ref) == 1 {
+ valueCopy = o
+ } else {
+ newVal, err := override(ref[1:], valueCopy, o, rule)
+ if err != nil {
+ return nil, err
+ }
+ valueCopy = newVal
+ }
+ }
+ newStaticProps = append(newStaticProps, types.NewStaticProperty(prop.Key, valueCopy))
+ }
+ }
+
+ // ref[0] is not a top-level key in staticProps, so it must be added
+ if !found {
+ newType, err := getObjectType(ref, o, rule, obj.DynamicProperties())
+ if err != nil {
+ return nil, err
+ }
+ newStaticProps = append(newStaticProps, newType.StaticProperties()...)
+ }
+ return types.NewObject(newStaticProps, obj.DynamicProperties()), nil
+}
+
+func getKeys(ref Ref, rule *Rule) ([]any, *Error) {
+ keys := []any{}
+ for _, refElem := range ref {
+ key, err := JSON(refElem.Value)
+ if err != nil {
+ return nil, NewError(TypeErr, rule.Location, "error getting key from value: %s", err.Error())
+ }
+ keys = append(keys, key)
+ }
+ return keys, nil
+}
+
+func getObjectTypeRec(keys []any, o types.Type, d *types.DynamicProperty) *types.Object {
+ if len(keys) == 1 {
+ staticProps := []*types.StaticProperty{types.NewStaticProperty(keys[0], o)}
+ return types.NewObject(staticProps, d)
+ }
+
+ staticProps := []*types.StaticProperty{types.NewStaticProperty(keys[0], getObjectTypeRec(keys[1:], o, d))}
+ return types.NewObject(staticProps, d)
+}
+
+func getObjectType(ref Ref, o types.Type, rule *Rule, d *types.DynamicProperty) (*types.Object, *Error) {
+ keys, err := getKeys(ref, rule)
+ if err != nil {
+ return nil, err
+ }
+ return getObjectTypeRec(keys, o, d), nil
+}
+
+func getRuleAnnotation(as *AnnotationSet, rule *Rule) (result []*SchemaAnnotation) {
+
+ for _, x := range as.GetSubpackagesScope(rule.Module.Package.Path) {
+ result = append(result, x.Schemas...)
+ }
+
+ if x := as.GetPackageScope(rule.Module.Package); x != nil {
+ result = append(result, x.Schemas...)
+ }
+
+ if x := as.GetDocumentScope(rule.Ref().GroundPrefix()); x != nil {
+ result = append(result, x.Schemas...)
+ }
+
+ for _, x := range as.GetRuleScope(rule) {
+ result = append(result, x.Schemas...)
+ }
+
+ return result
+}
+
+func processAnnotation(ss *SchemaSet, annot *SchemaAnnotation, rule *Rule, allowNet []string) (types.Type, *Error) {
+
+ var schema any
+
+ if annot.Schema != nil {
+ if ss == nil {
+ return nil, nil
+ }
+ schema = ss.Get(annot.Schema)
+ if schema == nil {
+ return nil, NewError(TypeErr, rule.Location, "undefined schema: %v", annot.Schema)
+ }
+ } else if annot.Definition != nil {
+ schema = *annot.Definition
+ }
+
+ tpe, err := loadSchema(schema, allowNet)
+ if err != nil {
+ return nil, NewError(TypeErr, rule.Location, "%s", err.Error())
+ }
+
+ return tpe, nil
+}
+
+func errAnnotationRedeclared(a *Annotations, other *Location) *Error {
+ return NewError(TypeErr, a.Location, "%v annotation redeclared: %v", a.Scope, other)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/compare.go b/vendor/github.com/open-policy-agent/opa/v1/ast/compare.go
new file mode 100644
index 0000000000..663ad5ae05
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/compare.go
@@ -0,0 +1,440 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "cmp"
+ "fmt"
+ "math/big"
+ "strings"
+)
+
+// Compare returns an integer indicating whether two AST values are less than,
+// equal to, or greater than each other.
+//
+// If a is less than b, the return value is negative. If a is greater than b,
+// the return value is positive. If a is equal to b, the return value is zero.
+//
+// Different types are never equal to each other. For comparison purposes, types
+// are sorted as follows:
+//
+// nil < Null < Boolean < Number < String < Var < Ref < Array < Object < Set <
+// ArrayComprehension < ObjectComprehension < SetComprehension < Expr < SomeDecl
+// < With < Body < Rule < Import < Package < Module.
+//
+// Arrays and Refs are equal if and only if both a and b have the same length
+// and all corresponding elements are equal. If one element is not equal, the
+// return value is the same as for the first differing element. If all elements
+// are equal but a and b have different lengths, the shorter is considered less
+// than the other.
+//
+// Objects are considered equal if and only if both a and b have the same sorted
+// (key, value) pairs and are of the same length. Other comparisons are
+// consistent but not defined.
+//
+// Sets are considered equal if and only if the symmetric difference of a and b
+// is empty.
+// Other comparisons are consistent but not defined.
+func Compare(a, b any) int {
+
+ if t, ok := a.(*Term); ok {
+ if t == nil {
+ a = nil
+ } else {
+ a = t.Value
+ }
+ }
+
+ if t, ok := b.(*Term); ok {
+ if t == nil {
+ b = nil
+ } else {
+ b = t.Value
+ }
+ }
+
+ if a == nil {
+ if b == nil {
+ return 0
+ }
+ return -1
+ }
+ if b == nil {
+ return 1
+ }
+
+ sortA := sortOrder(a)
+ sortB := sortOrder(b)
+
+ if sortA < sortB {
+ return -1
+ } else if sortB < sortA {
+ return 1
+ }
+
+ switch a := a.(type) {
+ case Null:
+ return 0
+ case Boolean:
+ if a == b.(Boolean) {
+ return 0
+ }
+ if !a {
+ return -1
+ }
+ return 1
+ case Number:
+ return NumberCompare(a, b.(Number))
+ case String:
+ b := b.(String)
+ if a == b {
+ return 0
+ }
+ if a < b {
+ return -1
+ }
+ return 1
+ case Var:
+ return VarCompare(a, b.(Var))
+ case Ref:
+ return termSliceCompare(a, b.(Ref))
+ case *Array:
+ b := b.(*Array)
+ return termSliceCompare(a.elems, b.elems)
+ case *lazyObj:
+ return Compare(a.force(), b)
+ case *object:
+ if x, ok := b.(*lazyObj); ok {
+ b = x.force()
+ }
+ return a.Compare(b.(*object))
+ case Set:
+ return a.Compare(b.(Set))
+ case *ArrayComprehension:
+ b := b.(*ArrayComprehension)
+ if cmp := Compare(a.Term, b.Term); cmp != 0 {
+ return cmp
+ }
+ return a.Body.Compare(b.Body)
+ case *ObjectComprehension:
+ b := b.(*ObjectComprehension)
+ if cmp := Compare(a.Key, b.Key); cmp != 0 {
+ return cmp
+ }
+ if cmp := Compare(a.Value, b.Value); cmp != 0 {
+ return cmp
+ }
+ return a.Body.Compare(b.Body)
+ case *SetComprehension:
+ b := b.(*SetComprehension)
+ if cmp := Compare(a.Term, b.Term); cmp != 0 {
+ return cmp
+ }
+ return a.Body.Compare(b.Body)
+ case Call:
+ return termSliceCompare(a, b.(Call))
+ case *Expr:
+ return a.Compare(b.(*Expr))
+ case *SomeDecl:
+ return a.Compare(b.(*SomeDecl))
+ case *Every:
+ return a.Compare(b.(*Every))
+ case *With:
+ return a.Compare(b.(*With))
+ case Body:
+ return a.Compare(b.(Body))
+ case *Head:
+ return a.Compare(b.(*Head))
+ case *Rule:
+ return a.Compare(b.(*Rule))
+ case Args:
+ return termSliceCompare(a, b.(Args))
+ case *Import:
+ return a.Compare(b.(*Import))
+ case *Package:
+ return a.Compare(b.(*Package))
+ case *Annotations:
+ return a.Compare(b.(*Annotations))
+ case *Module:
+ return a.Compare(b.(*Module))
+ }
+ panic(fmt.Sprintf("illegal value: %T", a))
+}
+
+type termSlice []*Term
+
+func (s termSlice) Less(i, j int) bool { return Compare(s[i].Value, s[j].Value) < 0 }
+func (s termSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s termSlice) Len() int { return len(s) }
+
+func sortOrder(x any) int {
+ switch x.(type) {
+ case Null:
+ return 0
+ case Boolean:
+ return 1
+ case Number:
+ return 2
+ case String:
+ return 3
+ case Var:
+ return 4
+ case Ref:
+ return 5
+ case *Array:
+ return 6
+ case Object:
+ return 7
+ case Set:
+ return 8
+ case *ArrayComprehension:
+ return 9
+ case *ObjectComprehension:
+ return 10
+ case *SetComprehension:
+ return 11
+ case Call:
+ return 12
+ case Args:
+ return 13
+ case *Expr:
+ return 100
+ case *SomeDecl:
+ return 101
+ case *Every:
+ return 102
+ case *With:
+ return 110
+ case *Head:
+ return 120
+ case Body:
+ return 200
+ case *Rule:
+ return 1000
+ case *Import:
+ return 1001
+ case *Package:
+ return 1002
+ case *Annotations:
+ return 1003
+ case *Module:
+ return 10000
+ }
+ panic(fmt.Sprintf("illegal value: %T", x))
+}
+
+func importsCompare(a, b []*Import) int {
+ minLen := min(len(b), len(a))
+ for i := range minLen {
+ if cmp := a[i].Compare(b[i]); cmp != 0 {
+ return cmp
+ }
+ }
+ if len(a) < len(b) {
+ return -1
+ }
+ if len(b) < len(a) {
+ return 1
+ }
+ return 0
+}
+
+func annotationsCompare(a, b []*Annotations) int {
+ minLen := min(len(b), len(a))
+ for i := range minLen {
+ if cmp := a[i].Compare(b[i]); cmp != 0 {
+ return cmp
+ }
+ }
+ if len(a) < len(b) {
+ return -1
+ }
+ if len(b) < len(a) {
+ return 1
+ }
+ return 0
+}
+
+func rulesCompare(a, b []*Rule) int {
+ minLen := min(len(b), len(a))
+ for i := range minLen {
+ if cmp := a[i].Compare(b[i]); cmp != 0 {
+ return cmp
+ }
+ }
+ if len(a) < len(b) {
+ return -1
+ }
+ if len(b) < len(a) {
+ return 1
+ }
+ return 0
+}
+
+func termSliceCompare(a, b []*Term) int {
+ minLen := min(len(b), len(a))
+ for i := range minLen {
+ if cmp := Compare(a[i], b[i]); cmp != 0 {
+ return cmp
+ }
+ }
+ if len(a) < len(b) {
+ return -1
+ } else if len(b) < len(a) {
+ return 1
+ }
+ return 0
+}
+
+func withSliceCompare(a, b []*With) int {
+ minLen := min(len(b), len(a))
+ for i := range minLen {
+ if cmp := Compare(a[i], b[i]); cmp != 0 {
+ return cmp
+ }
+ }
+ if len(a) < len(b) {
+ return -1
+ } else if len(b) < len(a) {
+ return 1
+ }
+ return 0
+}
+
+func VarCompare(a, b Var) int {
+ if a == b {
+ return 0
+ }
+ if a < b {
+ return -1
+ }
+ return 1
+}
+
+func TermValueCompare(a, b *Term) int {
+ return a.Value.Compare(b.Value)
+}
+
+func TermValueEqual(a, b *Term) bool {
+ return ValueEqual(a.Value, b.Value)
+}
+
+func ValueEqual(a, b Value) bool {
+ // TODO(ae): why doesn't this work the same?
+ //
+ // case interface{ Equal(Value) bool }:
+ // return v.Equal(b)
+ //
+ // When put on top, golangci-lint even flags the other cases as unreachable..
+ // but TestTopdownVirtualCache will have failing test cases when we replace
+ // the other cases with the above one.. 🤔
+ switch v := a.(type) {
+ case Null:
+ return v.Equal(b)
+ case Boolean:
+ return v.Equal(b)
+ case Number:
+ return v.Equal(b)
+ case String:
+ return v.Equal(b)
+ case Var:
+ return v.Equal(b)
+ case Ref:
+ return v.Equal(b)
+ case *Array:
+ return v.Equal(b)
+ }
+
+ return a.Compare(b) == 0
+}
+
+func RefCompare(a, b Ref) int {
+ return termSliceCompare(a, b)
+}
+
+func RefEqual(a, b Ref) bool {
+ return termSliceEqual(a, b)
+}
+
+func NumberCompare(x, y Number) int {
+ xs, ys := string(x), string(y)
+
+ var xIsF, yIsF bool
+
+ // Treat "1" and "1.0", "1.00", etc as "1"
+ if strings.Contains(xs, ".") {
+ if tx := strings.TrimRight(xs, ".0"); tx != xs {
+ // Still a float after trimming?
+ xIsF = strings.Contains(tx, ".")
+ xs = tx
+ }
+ }
+ if strings.Contains(ys, ".") {
+ if ty := strings.TrimRight(ys, ".0"); ty != ys {
+ yIsF = strings.Contains(ty, ".")
+ ys = ty
+ }
+ }
+ if xs == ys {
+ return 0
+ }
+
+ var xi, yi int64
+ var xf, yf float64
+ var xiOK, yiOK, xfOK, yfOK bool
+
+ if xi, xiOK = x.Int64(); xiOK {
+ if yi, yiOK = y.Int64(); yiOK {
+ return cmp.Compare(xi, yi)
+ }
+ }
+
+ if xIsF && yIsF {
+ if xf, xfOK = x.Float64(); xfOK {
+ if yf, yfOK = y.Float64(); yfOK {
+ if xf == yf {
+ return 0
+ }
+ // could still be "equal" depending on precision, so we continue?
+ }
+ }
+ }
+
+ var a *big.Rat
+ fa, ok := new(big.Float).SetString(string(x))
+ if !ok {
+ panic("illegal value")
+ }
+ if fa.IsInt() {
+ if i, _ := fa.Int64(); i == 0 {
+ a = new(big.Rat).SetInt64(0)
+ }
+ }
+ if a == nil {
+ a, ok = new(big.Rat).SetString(string(x))
+ if !ok {
+ panic("illegal value")
+ }
+ }
+
+ var b *big.Rat
+ fb, ok := new(big.Float).SetString(string(y))
+ if !ok {
+ panic("illegal value")
+ }
+ if fb.IsInt() {
+ if i, _ := fb.Int64(); i == 0 {
+ b = new(big.Rat).SetInt64(0)
+ }
+ }
+ if b == nil {
+ b, ok = new(big.Rat).SetString(string(y))
+ if !ok {
+ panic("illegal value")
+ }
+ }
+
+ return a.Cmp(b)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/compile.go b/vendor/github.com/open-policy-agent/opa/v1/ast/compile.go
new file mode 100644
index 0000000000..62e22bf937
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/compile.go
@@ -0,0 +1,6129 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "maps"
+ "slices"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/open-policy-agent/opa/internal/debug"
+ "github.com/open-policy-agent/opa/internal/gojsonschema"
+ "github.com/open-policy-agent/opa/v1/ast/location"
+ "github.com/open-policy-agent/opa/v1/metrics"
+ "github.com/open-policy-agent/opa/v1/types"
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+// CompileErrorLimitDefault is the default number errors a compiler will allow before
+// exiting.
+const CompileErrorLimitDefault = 10
+
+var (
+ errLimitReached = NewError(CompileErr, nil, "error limit reached")
+
+ doubleEq = Equal.Ref()
+)
+
+// Compiler contains the state of a compilation process.
+type Compiler struct {
+
+ // Errors contains errors that occurred during the compilation process.
+ // If there are one or more errors, the compilation process is considered
+ // "failed".
+ Errors Errors
+
+ // Modules contains the compiled modules. The compiled modules are the
+ // output of the compilation process. If the compilation process failed,
+ // there is no guarantee about the state of the modules.
+ Modules map[string]*Module
+
+ // ModuleTree organizes the modules into a tree where each node is keyed by
+ // an element in the module's package path. E.g., given modules containing
+ // the following package directives: "a", "a.b", "a.c", and "a.b", the
+ // resulting module tree would be:
+ //
+ // root
+ // |
+ // +--- data (no modules)
+ // |
+ // +--- a (1 module)
+ // |
+ // +--- b (2 modules)
+ // |
+ // +--- c (1 module)
+ //
+ ModuleTree *ModuleTreeNode
+
+ // RuleTree organizes rules into a tree where each node is keyed by an
+ // element in the rule's path. The rule path is the concatenation of the
+ // containing package and the stringified rule name. E.g., given the
+ // following module:
+ //
+ // package ex
+ // p[1] { true }
+ // p[2] { true }
+ // q = true
+ // a.b.c = 3
+ //
+ // root
+ // |
+ // +--- data (no rules)
+ // |
+ // +--- ex (no rules)
+ // |
+ // +--- p (2 rules)
+ // |
+ // +--- q (1 rule)
+ // |
+ // +--- a
+ // |
+ // +--- b
+ // |
+ // +--- c (1 rule)
+ //
+ // Another example with general refs containing vars at arbitrary locations:
+ //
+ // package ex
+ // a.b[x].d { x := "c" } # R1
+ // a.b.c[x] { x := "d" } # R2
+ // a.b[x][y] { x := "c"; y := "d" } # R3
+ // p := true # R4
+ //
+ // root
+ // |
+ // +--- data (no rules)
+ // |
+ // +--- ex (no rules)
+ // |
+ // +--- a
+ // | |
+ // | +--- b (R1, R3)
+ // | |
+ // | +--- c (R2)
+ // |
+ // +--- p (R4)
+ RuleTree *TreeNode
+
+ // Graph contains dependencies between rules. An edge (u,v) is added to the
+ // graph if rule 'u' refers to the virtual document defined by 'v'.
+ Graph *Graph
+
+ // TypeEnv holds type information for values inferred by the compiler.
+ TypeEnv *TypeEnv
+
+ // RewrittenVars is a mapping of variables that have been rewritten
+ // with the key being the generated name and value being the original.
+ RewrittenVars map[Var]Var
+
+ // Capabilities required by the modules that were compiled.
+ Required *Capabilities
+
+ localvargen *localVarGenerator
+ moduleLoader ModuleLoader
+ ruleIndices *util.HasherMap[Ref, RuleIndex]
+ stages []stage
+ maxErrs int
+ sorted []string // list of sorted module names
+ pathExists func([]string) (bool, error)
+ pathConflictCheckRoots []string
+ after map[string][]CompilerStageDefinition
+ metrics metrics.Metrics
+ capabilities *Capabilities // user-supplied capabilities
+ imports map[string][]*Import // saved imports from stripping
+ builtins map[string]*Builtin // universe of built-in functions
+ customBuiltins map[string]*Builtin // user-supplied custom built-in functions (deprecated: use capabilities)
+ unsafeBuiltinsMap map[string]struct{} // user-supplied set of unsafe built-ins functions to block (deprecated: use capabilities)
+ deprecatedBuiltinsMap map[string]struct{} // set of deprecated, but not removed, built-in functions
+ enablePrintStatements bool // indicates if print statements should be elided (default)
+ comprehensionIndices map[*Term]*ComprehensionIndex // comprehension key index
+ initialized bool // indicates if init() has been called
+ debug debug.Debug // emits debug information produced during compilation
+ schemaSet *SchemaSet // user-supplied schemas for input and data documents
+ inputType types.Type // global input type retrieved from schema set
+ annotationSet *AnnotationSet // hierarchical set of annotations
+ strict bool // enforce strict compilation checks
+ keepModules bool // whether to keep the unprocessed, parse modules (below)
+ parsedModules map[string]*Module // parsed, but otherwise unprocessed modules, kept track of when keepModules is true
+ useTypeCheckAnnotations bool // whether to provide annotated information (schemas) to the type checker
+ allowUndefinedFuncCalls bool // don't error on calls to unknown functions.
+ evalMode CompilerEvalMode //
+ rewriteTestRulesForTracing bool // rewrite test rules to capture dynamic values for tracing.
+ defaultRegoVersion RegoVersion
+}
+
+func (c *Compiler) DefaultRegoVersion() RegoVersion {
+ return c.defaultRegoVersion
+}
+
+// CompilerStage defines the interface for stages in the compiler.
+type CompilerStage func(*Compiler) *Error
+
+// CompilerEvalMode allows toggling certain stages that are only
+// needed for certain modes, Concretely, only "topdown" mode will
+// have the compiler build comprehension and rule indices.
+type CompilerEvalMode int
+
+const (
+ // EvalModeTopdown (default) instructs the compiler to build rule
+ // and comprehension indices used by topdown evaluation.
+ EvalModeTopdown CompilerEvalMode = iota
+
+ // EvalModeIR makes the compiler skip the stages for comprehension
+ // and rule indices.
+ EvalModeIR
+)
+
+// CompilerStageDefinition defines a compiler stage
+type CompilerStageDefinition struct {
+ Name string
+ MetricName string
+ Stage CompilerStage
+}
+
+// RulesOptions defines the options for retrieving rules by Ref from the
+// compiler.
+type RulesOptions struct {
+ // IncludeHiddenModules determines if the result contains hidden modules,
+ // currently only the "system" namespace, i.e. "data.system.*".
+ IncludeHiddenModules bool
+}
+
+// QueryContext contains contextual information for running an ad-hoc query.
+//
+// Ad-hoc queries can be run in the context of a package and imports may be
+// included to provide concise access to data.
+type QueryContext struct {
+ Package *Package
+ Imports []*Import
+}
+
+// NewQueryContext returns a new QueryContext object.
+func NewQueryContext() *QueryContext {
+ return &QueryContext{}
+}
+
+// WithPackage sets the pkg on qc.
+func (qc *QueryContext) WithPackage(pkg *Package) *QueryContext {
+ if qc == nil {
+ qc = NewQueryContext()
+ }
+ qc.Package = pkg
+ return qc
+}
+
+// WithImports sets the imports on qc.
+func (qc *QueryContext) WithImports(imports []*Import) *QueryContext {
+ if qc == nil {
+ qc = NewQueryContext()
+ }
+ qc.Imports = imports
+ return qc
+}
+
+// Copy returns a deep copy of qc.
+func (qc *QueryContext) Copy() *QueryContext {
+ if qc == nil {
+ return nil
+ }
+ cpy := *qc
+ if cpy.Package != nil {
+ cpy.Package = qc.Package.Copy()
+ }
+ cpy.Imports = make([]*Import, len(qc.Imports))
+ for i := range qc.Imports {
+ cpy.Imports[i] = qc.Imports[i].Copy()
+ }
+ return &cpy
+}
+
+// QueryCompiler defines the interface for compiling ad-hoc queries.
+type QueryCompiler interface {
+
+ // Compile should be called to compile ad-hoc queries. The return value is
+ // the compiled version of the query.
+ Compile(q Body) (Body, error)
+
+ // TypeEnv returns the type environment built after running type checking
+ // on the query.
+ TypeEnv() *TypeEnv
+
+ // WithContext sets the QueryContext on the QueryCompiler. Subsequent calls
+ // to Compile will take the QueryContext into account.
+ WithContext(qctx *QueryContext) QueryCompiler
+
+ // WithEnablePrintStatements enables print statements in queries compiled
+ // with the QueryCompiler.
+ WithEnablePrintStatements(yes bool) QueryCompiler
+
+ // WithUnsafeBuiltins sets the built-in functions to treat as unsafe and not
+ // allow inside of queries. By default the query compiler inherits the
+ // compiler's unsafe built-in functions. This function allows callers to
+ // override that set. If an empty (non-nil) map is provided, all built-ins
+ // are allowed.
+ WithUnsafeBuiltins(unsafe map[string]struct{}) QueryCompiler
+
+ // WithStageAfter registers a stage to run during query compilation after
+ // the named stage.
+ WithStageAfter(after string, stage QueryCompilerStageDefinition) QueryCompiler
+
+ // RewrittenVars maps generated vars in the compiled query to vars from the
+ // parsed query. For example, given the query "input := 1" the rewritten
+ // query would be "__local0__ = 1". The mapping would then be {__local0__: input}.
+ RewrittenVars() map[Var]Var
+
+ // ComprehensionIndex returns an index data structure for the given comprehension
+ // term. If no index is found, returns nil.
+ ComprehensionIndex(term *Term) *ComprehensionIndex
+
+ // WithStrict enables strict mode for the query compiler.
+ WithStrict(strict bool) QueryCompiler
+}
+
+// QueryCompilerStage defines the interface for stages in the query compiler.
+type QueryCompilerStage func(QueryCompiler, Body) (Body, error)
+
+// QueryCompilerStageDefinition defines a QueryCompiler stage
+type QueryCompilerStageDefinition struct {
+ Name string
+ MetricName string
+ Stage QueryCompilerStage
+}
+
+type stage struct {
+ name string
+ metricName string
+ f func()
+}
+
+// NewCompiler returns a new empty compiler.
+func NewCompiler() *Compiler {
+
+ c := &Compiler{
+ Modules: map[string]*Module{},
+ RewrittenVars: map[Var]Var{},
+ Required: &Capabilities{},
+ ruleIndices: util.NewHasherMap[Ref, RuleIndex](RefEqual),
+ maxErrs: CompileErrorLimitDefault,
+ after: map[string][]CompilerStageDefinition{},
+ unsafeBuiltinsMap: map[string]struct{}{},
+ deprecatedBuiltinsMap: map[string]struct{}{},
+ comprehensionIndices: map[*Term]*ComprehensionIndex{},
+ debug: debug.Discard(),
+ defaultRegoVersion: DefaultRegoVersion,
+ }
+
+ c.ModuleTree = NewModuleTree(nil)
+ c.RuleTree = NewRuleTree(c.ModuleTree)
+
+ c.stages = []stage{
+ // Reference resolution should run first as it may be used to lazily
+ // load additional modules. If any stages run before resolution, they
+ // need to be re-run after resolution.
+ {"ResolveRefs", "compile_stage_resolve_refs", c.resolveAllRefs},
+ // The local variable generator must be initialized after references are
+ // resolved and the dynamic module loader has run but before subsequent
+ // stages that need to generate variables.
+ {"InitLocalVarGen", "compile_stage_init_local_var_gen", c.initLocalVarGen},
+ {"RewriteRuleHeadRefs", "compile_stage_rewrite_rule_head_refs", c.rewriteRuleHeadRefs},
+ {"CheckKeywordOverrides", "compile_stage_check_keyword_overrides", c.checkKeywordOverrides},
+ {"CheckDuplicateImports", "compile_stage_check_imports", c.checkImports},
+ {"RemoveImports", "compile_stage_remove_imports", c.removeImports},
+ {"SetModuleTree", "compile_stage_set_module_tree", c.setModuleTree},
+ {"SetRuleTree", "compile_stage_set_rule_tree", c.setRuleTree}, // depends on RewriteRuleHeadRefs
+ {"RewriteLocalVars", "compile_stage_rewrite_local_vars", c.rewriteLocalVars},
+ {"CheckVoidCalls", "compile_stage_check_void_calls", c.checkVoidCalls},
+ {"RewritePrintCalls", "compile_stage_rewrite_print_calls", c.rewritePrintCalls},
+ {"RewriteExprTerms", "compile_stage_rewrite_expr_terms", c.rewriteExprTerms},
+ {"ParseMetadataBlocks", "compile_stage_parse_metadata_blocks", c.parseMetadataBlocks},
+ {"SetAnnotationSet", "compile_stage_set_annotationset", c.setAnnotationSet},
+ {"RewriteRegoMetadataCalls", "compile_stage_rewrite_rego_metadata_calls", c.rewriteRegoMetadataCalls},
+ {"SetGraph", "compile_stage_set_graph", c.setGraph},
+ {"RewriteComprehensionTerms", "compile_stage_rewrite_comprehension_terms", c.rewriteComprehensionTerms},
+ {"RewriteRefsInHead", "compile_stage_rewrite_refs_in_head", c.rewriteRefsInHead},
+ {"RewriteWithValues", "compile_stage_rewrite_with_values", c.rewriteWithModifiers},
+ {"CheckRuleConflicts", "compile_stage_check_rule_conflicts", c.checkRuleConflicts},
+ {"CheckUndefinedFuncs", "compile_stage_check_undefined_funcs", c.checkUndefinedFuncs},
+ {"CheckSafetyRuleHeads", "compile_stage_check_safety_rule_heads", c.checkSafetyRuleHeads},
+ {"CheckSafetyRuleBodies", "compile_stage_check_safety_rule_bodies", c.checkSafetyRuleBodies},
+ {"RewriteEquals", "compile_stage_rewrite_equals", c.rewriteEquals},
+ {"RewriteDynamicTerms", "compile_stage_rewrite_dynamic_terms", c.rewriteDynamicTerms},
+ {"RewriteTestRulesForTracing", "compile_stage_rewrite_test_rules_for_tracing", c.rewriteTestRuleEqualities}, // must run after RewriteDynamicTerms
+ {"CheckRecursion", "compile_stage_check_recursion", c.checkRecursion},
+ {"CheckTypes", "compile_stage_check_types", c.checkTypes}, // must be run after CheckRecursion
+ {"CheckUnsafeBuiltins", "compile_state_check_unsafe_builtins", c.checkUnsafeBuiltins},
+ {"CheckDeprecatedBuiltins", "compile_state_check_deprecated_builtins", c.checkDeprecatedBuiltins},
+ {"BuildRuleIndices", "compile_stage_rebuild_indices", c.buildRuleIndices},
+ {"BuildComprehensionIndices", "compile_stage_rebuild_comprehension_indices", c.buildComprehensionIndices},
+ {"BuildRequiredCapabilities", "compile_stage_build_required_capabilities", c.buildRequiredCapabilities},
+ }
+
+ return c
+}
+
+// SetErrorLimit sets the number of errors the compiler can encounter before it
+// quits. Zero or a negative number indicates no limit.
+func (c *Compiler) SetErrorLimit(limit int) *Compiler {
+ c.maxErrs = limit
+ return c
+}
+
+// WithEnablePrintStatements enables print statements inside of modules compiled
+// by the compiler. If print statements are not enabled, calls to print() are
+// erased at compile-time.
+func (c *Compiler) WithEnablePrintStatements(yes bool) *Compiler {
+ c.enablePrintStatements = yes
+ return c
+}
+
+// WithPathConflictsCheck enables base-virtual document conflict
+// detection. The compiler will check that rules don't overlap with
+// paths that exist as determined by the provided callable.
+func (c *Compiler) WithPathConflictsCheck(fn func([]string) (bool, error)) *Compiler {
+ c.pathExists = fn
+ return c
+}
+
+// WithPathConflictsCheckRoots enables checking path conflicts from the specified root instead
+// of the top root node. Limiting conflict checks to a known set of roots, such as bundle roots,
+// improves performance. Each root has the format of a "/"-delimited string, excluding the "data"
+// root document.
+func (c *Compiler) WithPathConflictsCheckRoots(rootPaths []string) *Compiler {
+ c.pathConflictCheckRoots = rootPaths
+ return c
+}
+
+// WithStageAfter registers a stage to run during compilation after
+// the named stage.
+func (c *Compiler) WithStageAfter(after string, stage CompilerStageDefinition) *Compiler {
+ c.after[after] = append(c.after[after], stage)
+ return c
+}
+
+// WithMetrics will set a metrics.Metrics and be used for profiling
+// the Compiler instance.
+func (c *Compiler) WithMetrics(metrics metrics.Metrics) *Compiler {
+ c.metrics = metrics
+ return c
+}
+
+// WithCapabilities sets capabilities to enable during compilation. Capabilities allow the caller
+// to specify the set of built-in functions available to the policy. In the future, capabilities
+// may be able to restrict access to other language features. Capabilities allow callers to check
+// if policies are compatible with a particular version of OPA. If policies are a compiled for a
+// specific version of OPA, there is no guarantee that _this_ version of OPA can evaluate them
+// successfully.
+func (c *Compiler) WithCapabilities(capabilities *Capabilities) *Compiler {
+ c.capabilities = capabilities
+ return c
+}
+
+// Capabilities returns the capabilities enabled during compilation.
+func (c *Compiler) Capabilities() *Capabilities {
+ return c.capabilities
+}
+
+// WithDebug sets where debug messages are written to. Passing `nil` has no
+// effect.
+func (c *Compiler) WithDebug(sink io.Writer) *Compiler {
+ if sink != nil {
+ c.debug = debug.New(sink)
+ }
+ return c
+}
+
+// WithBuiltins is deprecated.
+// Deprecated: Use WithCapabilities instead.
+func (c *Compiler) WithBuiltins(builtins map[string]*Builtin) *Compiler {
+ c.customBuiltins = maps.Clone(builtins)
+ return c
+}
+
+// WithUnsafeBuiltins is deprecated.
+// Deprecated: Use WithCapabilities instead.
+func (c *Compiler) WithUnsafeBuiltins(unsafeBuiltins map[string]struct{}) *Compiler {
+ maps.Copy(c.unsafeBuiltinsMap, unsafeBuiltins)
+ return c
+}
+
+// WithStrict toggles strict mode in the compiler.
+func (c *Compiler) WithStrict(strict bool) *Compiler {
+ c.strict = strict
+ return c
+}
+
+// WithKeepModules enables retaining unprocessed modules in the compiler.
+// Note that the modules aren't copied on the way in or out -- so when
+// accessing them via ParsedModules(), mutations will occur in the module
+// map that was passed into Compile().`
+func (c *Compiler) WithKeepModules(y bool) *Compiler {
+ c.keepModules = y
+ return c
+}
+
+// WithUseTypeCheckAnnotations use schema annotations during type checking
+func (c *Compiler) WithUseTypeCheckAnnotations(enabled bool) *Compiler {
+ c.useTypeCheckAnnotations = enabled
+ return c
+}
+
+func (c *Compiler) WithAllowUndefinedFunctionCalls(allow bool) *Compiler {
+ c.allowUndefinedFuncCalls = allow
+ return c
+}
+
+// WithEvalMode allows setting the CompilerEvalMode of the compiler
+func (c *Compiler) WithEvalMode(e CompilerEvalMode) *Compiler {
+ c.evalMode = e
+ return c
+}
+
+// WithRewriteTestRules enables rewriting test rules to capture dynamic values in local variables,
+// so they can be accessed by tracing.
+func (c *Compiler) WithRewriteTestRules(rewrite bool) *Compiler {
+ c.rewriteTestRulesForTracing = rewrite
+ return c
+}
+
+// ParsedModules returns the parsed, unprocessed modules from the compiler.
+// It is `nil` if keeping modules wasn't enabled via `WithKeepModules(true)`.
+// The map includes all modules loaded via the ModuleLoader, if one was used.
+func (c *Compiler) ParsedModules() map[string]*Module {
+ return c.parsedModules
+}
+
+func (c *Compiler) QueryCompiler() QueryCompiler {
+ c.init()
+ c0 := *c
+ return newQueryCompiler(&c0)
+}
+
+// Compile runs the compilation process on the input modules. The compiled
+// version of the modules and associated data structures are stored on the
+// compiler. If the compilation process fails for any reason, the compiler will
+// contain a slice of errors.
+func (c *Compiler) Compile(modules map[string]*Module) {
+
+ c.init()
+
+ c.Modules = make(map[string]*Module, len(modules))
+ c.sorted = make([]string, 0, len(modules))
+
+ if c.keepModules {
+ c.parsedModules = make(map[string]*Module, len(modules))
+ } else {
+ c.parsedModules = nil
+ }
+
+ for k, v := range modules {
+ c.Modules[k] = v.Copy()
+ c.sorted = append(c.sorted, k)
+ if c.parsedModules != nil {
+ c.parsedModules[k] = v
+ }
+ }
+
+ sort.Strings(c.sorted)
+
+ c.compile()
+}
+
+// WithSchemas sets a schemaSet to the compiler
+func (c *Compiler) WithSchemas(schemas *SchemaSet) *Compiler {
+ c.schemaSet = schemas
+ return c
+}
+
+// Failed returns true if a compilation error has been encountered.
+func (c *Compiler) Failed() bool {
+ return len(c.Errors) > 0
+}
+
+// ComprehensionIndex returns a data structure specifying how to index comprehension
+// results so that callers do not have to recompute the comprehension more than once.
+// If no index is found, returns nil.
+func (c *Compiler) ComprehensionIndex(term *Term) *ComprehensionIndex {
+ return c.comprehensionIndices[term]
+}
+
+// GetArity returns the number of args a function referred to by ref takes. If
+// ref refers to built-in function, the built-in declaration is consulted,
+// otherwise, the ref is used to perform a ruleset lookup.
+func (c *Compiler) GetArity(ref Ref) int {
+ if bi := c.builtins[ref.String()]; bi != nil {
+ return bi.Decl.Arity()
+ }
+ rules := c.GetRulesExact(ref)
+ if len(rules) == 0 {
+ return -1
+ }
+ return len(rules[0].Head.Args)
+}
+
+// GetRulesExact returns a slice of rules referred to by the reference.
+//
+// E.g., given the following module:
+//
+// package a.b.c
+//
+// p[k] = v { ... } # rule1
+// p[k1] = v1 { ... } # rule2
+//
+// The following calls yield the rules on the right.
+//
+// GetRulesExact("data.a.b.c.p") => [rule1, rule2]
+// GetRulesExact("data.a.b.c.p.x") => nil
+// GetRulesExact("data.a.b.c") => nil
+func (c *Compiler) GetRulesExact(ref Ref) (rules []*Rule) {
+ node := c.RuleTree
+
+ for _, x := range ref {
+ if node = node.Child(x.Value); node == nil {
+ return nil
+ }
+ }
+
+ return extractRules(node.Values)
+}
+
+// GetRulesForVirtualDocument returns a slice of rules that produce the virtual
+// document referred to by the reference.
+//
+// E.g., given the following module:
+//
+// package a.b.c
+//
+// p[k] = v { ... } # rule1
+// p[k1] = v1 { ... } # rule2
+//
+// The following calls yield the rules on the right.
+//
+// GetRulesForVirtualDocument("data.a.b.c.p") => [rule1, rule2]
+// GetRulesForVirtualDocument("data.a.b.c.p.x") => [rule1, rule2]
+// GetRulesForVirtualDocument("data.a.b.c") => nil
+func (c *Compiler) GetRulesForVirtualDocument(ref Ref) (rules []*Rule) {
+
+ node := c.RuleTree
+
+ for _, x := range ref {
+ if node = node.Child(x.Value); node == nil {
+ return nil
+ }
+ if len(node.Values) > 0 {
+ return extractRules(node.Values)
+ }
+ }
+
+ return extractRules(node.Values)
+}
+
+// GetRulesWithPrefix returns a slice of rules that share the prefix ref.
+//
+// E.g., given the following module:
+//
+// package a.b.c
+//
+// p[x] = y { ... } # rule1
+// p[k] = v { ... } # rule2
+// q { ... } # rule3
+//
+// The following calls yield the rules on the right.
+//
+// GetRulesWithPrefix("data.a.b.c.p") => [rule1, rule2]
+// GetRulesWithPrefix("data.a.b.c.p.a") => nil
+// GetRulesWithPrefix("data.a.b.c") => [rule1, rule2, rule3]
+func (c *Compiler) GetRulesWithPrefix(ref Ref) (rules []*Rule) {
+
+ node := c.RuleTree
+
+ for _, x := range ref {
+ if node = node.Child(x.Value); node == nil {
+ return nil
+ }
+ }
+
+ var acc func(node *TreeNode)
+
+ acc = func(node *TreeNode) {
+ rules = append(rules, extractRules(node.Values)...)
+ for _, child := range node.Children {
+ if child.Hide {
+ continue
+ }
+ acc(child)
+ }
+ }
+
+ acc(node)
+
+ return rules
+}
+
+func extractRules(s []any) []*Rule {
+ rules := make([]*Rule, len(s))
+ for i := range s {
+ rules[i] = s[i].(*Rule)
+ }
+ return rules
+}
+
+// GetRules returns a slice of rules that are referred to by ref.
+//
+// E.g., given the following module:
+//
+// package a.b.c
+//
+// p[x] = y { q[x] = y; ... } # rule1
+// q[x] = y { ... } # rule2
+//
+// The following calls yield the rules on the right.
+//
+// GetRules("data.a.b.c.p") => [rule1]
+// GetRules("data.a.b.c.p.x") => [rule1]
+// GetRules("data.a.b.c.q") => [rule2]
+// GetRules("data.a.b.c") => [rule1, rule2]
+// GetRules("data.a.b.d") => nil
+func (c *Compiler) GetRules(ref Ref) (rules []*Rule) {
+
+ set := map[*Rule]struct{}{}
+
+ for _, rule := range c.GetRulesForVirtualDocument(ref) {
+ set[rule] = struct{}{}
+ }
+
+ for _, rule := range c.GetRulesWithPrefix(ref) {
+ set[rule] = struct{}{}
+ }
+
+ for rule := range set {
+ rules = append(rules, rule)
+ }
+
+ return rules
+}
+
+// GetRulesDynamic returns a slice of rules that could be referred to by a ref.
+//
+// Deprecated: use GetRulesDynamicWithOpts
+func (c *Compiler) GetRulesDynamic(ref Ref) []*Rule {
+ return c.GetRulesDynamicWithOpts(ref, RulesOptions{})
+}
+
+// GetRulesDynamicWithOpts returns a slice of rules that could be referred to by
+// a ref.
+// When parts of the ref are statically known, we use that information to narrow
+// down which rules the ref could refer to, but in the most general case this
+// will be an over-approximation.
+//
+// E.g., given the following modules:
+//
+// package a.b.c
+//
+// r1 = 1 # rule1
+//
+// and:
+//
+// package a.d.c
+//
+// r2 = 2 # rule2
+//
+// The following calls yield the rules on the right.
+//
+// GetRulesDynamicWithOpts("data.a[x].c[y]", opts) => [rule1, rule2]
+// GetRulesDynamicWithOpts("data.a[x].c.r2", opts) => [rule2]
+// GetRulesDynamicWithOpts("data.a.b[x][y]", opts) => [rule1]
+//
+// Using the RulesOptions parameter, the inclusion of hidden modules can be
+// controlled:
+//
+// With
+//
+// package system.main
+//
+// r3 = 3 # rule3
+//
+// We'd get this result:
+//
+// GetRulesDynamicWithOpts("data[x]", RulesOptions{IncludeHiddenModules: true}) => [rule1, rule2, rule3]
+//
+// Without the options, it would be excluded.
+func (c *Compiler) GetRulesDynamicWithOpts(ref Ref, opts RulesOptions) []*Rule {
+ node := c.RuleTree
+
+ set := map[*Rule]struct{}{}
+ var walk func(node *TreeNode, i int)
+ walk = func(node *TreeNode, i int) {
+ switch {
+ case i >= len(ref):
+ // We've reached the end of the reference and want to collect everything
+ // under this "prefix".
+ node.DepthFirst(func(descendant *TreeNode) bool {
+ insertRules(set, descendant.Values)
+ if opts.IncludeHiddenModules {
+ return false
+ }
+ return descendant.Hide
+ })
+
+ case i == 0 || IsConstant(ref[i].Value):
+ // The head of the ref is always grounded. In case another part of the
+ // ref is also grounded, we can lookup the exact child. If it's not found
+ // we can immediately return...
+ if child := node.Child(ref[i].Value); child != nil {
+ if len(child.Values) > 0 {
+ // Add any rules at this position
+ insertRules(set, child.Values)
+ }
+ // There might still be "sub-rules" contributing key-value "overrides" for e.g. partial object rules, continue walking
+ walk(child, i+1)
+ } else {
+ return
+ }
+
+ default:
+ // This part of the ref is a dynamic term. We can't know what it refers
+ // to and will just need to try all of the children.
+ for _, child := range node.Children {
+ if child.Hide && !opts.IncludeHiddenModules {
+ continue
+ }
+ insertRules(set, child.Values)
+ walk(child, i+1)
+ }
+ }
+ }
+
+ walk(node, 0)
+ rules := make([]*Rule, 0, len(set))
+ for rule := range set {
+ rules = append(rules, rule)
+ }
+ return rules
+}
+
+// Utility: add all rule values to the set.
+func insertRules(set map[*Rule]struct{}, rules []any) {
+ for _, rule := range rules {
+ set[rule.(*Rule)] = struct{}{}
+ }
+}
+
+// RuleIndex returns a RuleIndex built for the rule set referred to by path.
+// The path must refer to the rule set exactly, i.e., given a rule set at path
+// data.a.b.c.p, refs data.a.b.c.p.x and data.a.b.c would not return a
+// RuleIndex built for the rule.
+func (c *Compiler) RuleIndex(path Ref) RuleIndex {
+ r, ok := c.ruleIndices.Get(path)
+ if !ok {
+ return nil
+ }
+ return r
+}
+
+// PassesTypeCheck determines whether the given body passes type checking
+func (c *Compiler) PassesTypeCheck(body Body) bool {
+ checker := newTypeChecker().WithSchemaSet(c.schemaSet).WithInputType(c.inputType)
+ env := c.TypeEnv
+ _, errs := checker.CheckBody(env, body)
+ return len(errs) == 0
+}
+
+// PassesTypeCheckRules determines whether the given rules passes type checking
+func (c *Compiler) PassesTypeCheckRules(rules []*Rule) Errors {
+ elems := []util.T{}
+
+ for _, rule := range rules {
+ elems = append(elems, rule)
+ }
+
+ // Load the global input schema if one was provided.
+ if c.schemaSet != nil {
+ if schema := c.schemaSet.Get(SchemaRootRef); schema != nil {
+
+ var allowNet []string
+ if c.capabilities != nil {
+ allowNet = c.capabilities.AllowNet
+ }
+
+ tpe, err := loadSchema(schema, allowNet)
+ if err != nil {
+ return Errors{NewError(TypeErr, nil, "%s", err.Error())}
+ }
+ c.inputType = tpe
+ }
+ }
+
+ var as *AnnotationSet
+ if c.useTypeCheckAnnotations {
+ as = c.annotationSet
+ }
+
+ checker := newTypeChecker().WithSchemaSet(c.schemaSet).WithInputType(c.inputType)
+
+ if c.TypeEnv == nil {
+ if c.capabilities == nil {
+ c.capabilities = CapabilitiesForThisVersion()
+ }
+
+ c.builtins = make(map[string]*Builtin, len(c.capabilities.Builtins)+len(c.customBuiltins))
+
+ for _, bi := range c.capabilities.Builtins {
+ c.builtins[bi.Name] = bi
+ }
+
+ maps.Copy(c.builtins, c.customBuiltins)
+
+ c.TypeEnv = checker.Env(c.builtins)
+ }
+
+ _, errs := checker.CheckTypes(c.TypeEnv, elems, as)
+ return errs
+}
+
+// ModuleLoader defines the interface that callers can implement to enable lazy
+// loading of modules during compilation.
+type ModuleLoader func(resolved map[string]*Module) (parsed map[string]*Module, err error)
+
+// WithModuleLoader sets f as the ModuleLoader on the compiler.
+//
+// The compiler will invoke the ModuleLoader after resolving all references in
+// the current set of input modules. The ModuleLoader can return a new
+// collection of parsed modules that are to be included in the compilation
+// process. This process will repeat until the ModuleLoader returns an empty
+// collection or an error. If an error is returned, compilation will stop
+// immediately.
+func (c *Compiler) WithModuleLoader(f ModuleLoader) *Compiler {
+ c.moduleLoader = f
+ return c
+}
+
+// WithDefaultRegoVersion sets the default Rego version to use when a module doesn't specify one;
+// such as when it's hand-crafted instead of parsed.
+func (c *Compiler) WithDefaultRegoVersion(regoVersion RegoVersion) *Compiler {
+ c.defaultRegoVersion = regoVersion
+ return c
+}
+
+func (c *Compiler) counterAdd(name string, n uint64) {
+ if c.metrics == nil {
+ return
+ }
+ c.metrics.Counter(name).Add(n)
+}
+
+func (c *Compiler) buildRuleIndices() {
+
+ c.RuleTree.DepthFirst(func(node *TreeNode) bool {
+ if len(node.Values) == 0 {
+ return false
+ }
+ rules := extractRules(node.Values)
+ hasNonGroundRef := false
+ for _, r := range rules {
+ hasNonGroundRef = !r.Head.Ref().IsGround()
+ }
+ if hasNonGroundRef {
+ // Collect children to ensure that all rules within the extent of a rule with a general ref
+ // are found on the same index. E.g. the following rules should be indexed under data.a.b.c:
+ //
+ // package a
+ // b.c[x].e := 1 { x := input.x }
+ // b.c.d := 2
+ // b.c.d2.e[x] := 3 { x := input.x }
+ for _, child := range node.Children {
+ child.DepthFirst(func(c *TreeNode) bool {
+ rules = append(rules, extractRules(c.Values)...)
+ return false
+ })
+ }
+ }
+
+ index := newBaseDocEqIndex(func(ref Ref) bool {
+ return isVirtual(c.RuleTree, ref.GroundPrefix())
+ })
+ if index.Build(rules) {
+ c.ruleIndices.Put(rules[0].Ref().GroundPrefix(), index)
+ }
+ return hasNonGroundRef // currently, we don't allow those branches to go deeper
+ })
+
+}
+
+func (c *Compiler) buildComprehensionIndices() {
+ for _, name := range c.sorted {
+ WalkRules(c.Modules[name], func(r *Rule) bool {
+ candidates := ReservedVars.Copy()
+ if len(r.Head.Args) > 0 {
+ candidates.Update(r.Head.Args.Vars())
+ }
+ n := buildComprehensionIndices(c.debug, c.GetArity, candidates, c.RewrittenVars, r.Body, c.comprehensionIndices)
+ c.counterAdd(compileStageComprehensionIndexBuild, n)
+ return false
+ })
+ }
+}
+
+var futureKeywordsPrefix = Ref{FutureRootDocument, InternedTerm("keywords")}
+
+// buildRequiredCapabilities updates the required capabilities on the compiler
+// to include any keyword and feature dependencies present in the modules. The
+// built-in function dependencies will have already been added by the type
+// checker.
+func (c *Compiler) buildRequiredCapabilities() {
+
+ features := map[string]struct{}{}
+
+ // extract required keywords from modules
+
+ keywords := map[string]struct{}{}
+
+ for _, name := range c.sorted {
+ for _, imp := range c.imports[name] {
+ mod := c.Modules[name]
+ path := imp.Path.Value.(Ref)
+ switch {
+ case path.Equal(RegoV1CompatibleRef):
+ if !c.moduleIsRegoV1(mod) {
+ features[FeatureRegoV1Import] = struct{}{}
+ }
+ case path.HasPrefix(futureKeywordsPrefix):
+ if len(path) == 2 {
+ if c.moduleIsRegoV1(mod) {
+ for kw := range futureKeywords {
+ keywords[kw] = struct{}{}
+ }
+ } else {
+ for kw := range allFutureKeywords {
+ keywords[kw] = struct{}{}
+ }
+ }
+ } else {
+ kw := string(path[2].Value.(String))
+ if c.moduleIsRegoV1(mod) {
+ for allowedKw := range futureKeywords {
+ if kw == allowedKw {
+ keywords[kw] = struct{}{}
+ break
+ }
+ }
+ } else {
+ for allowedKw := range allFutureKeywords {
+ if kw == allowedKw {
+ keywords[kw] = struct{}{}
+ break
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ c.Required.FutureKeywords = util.KeysSorted(keywords)
+
+ // extract required features from modules
+
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+
+ if c.moduleIsRegoV1(mod) {
+ features[FeatureRegoV1] = struct{}{}
+ } else {
+ for _, rule := range mod.Rules {
+ refLen := len(rule.Head.Reference)
+ if refLen >= 3 {
+ if refLen > len(rule.Head.Reference.ConstantPrefix()) {
+ features[FeatureRefHeads] = struct{}{}
+ } else {
+ features[FeatureRefHeadStringPrefixes] = struct{}{}
+ }
+ }
+ }
+ }
+ }
+
+ c.Required.Features = util.KeysSorted(features)
+
+ for i, bi := range c.Required.Builtins {
+ c.Required.Builtins[i] = bi.Minimal()
+ }
+}
+
+// checkRecursion ensures that there are no recursive definitions, i.e., there are
+// no cycles in the Graph.
+func (c *Compiler) checkRecursion() {
+ eq := func(a, b util.T) bool {
+ return a.(*Rule) == b.(*Rule)
+ }
+
+ c.RuleTree.DepthFirst(func(node *TreeNode) bool {
+ for _, rule := range node.Values {
+ for node := rule.(*Rule); node != nil; node = node.Else {
+ c.checkSelfPath(node.Loc(), eq, node, node)
+ }
+ }
+ return false
+ })
+}
+
+func (c *Compiler) checkSelfPath(loc *Location, eq func(a, b util.T) bool, a, b util.T) {
+ tr := NewGraphTraversal(c.Graph)
+ if p := util.DFSPath(tr, eq, a, b); len(p) > 0 {
+ n := make([]string, 0, len(p))
+ for _, x := range p {
+ n = append(n, astNodeToString(x))
+ }
+ c.err(NewError(RecursionErr, loc, "rule %v is recursive: %v", astNodeToString(a), strings.Join(n, " -> ")))
+ }
+}
+
+func astNodeToString(x any) string {
+ return x.(*Rule).Ref().String()
+}
+
+// checkRuleConflicts ensures that rules definitions are not in conflict.
+func (c *Compiler) checkRuleConflicts() {
+ rw := rewriteVarsInRef(c.RewrittenVars)
+
+ c.RuleTree.DepthFirst(func(node *TreeNode) bool {
+ if len(node.Values) == 0 {
+ return false // go deeper
+ }
+
+ kinds := make(map[RuleKind]struct{}, len(node.Values))
+ completeRules := 0
+ partialRules := 0
+ arities := make(map[int]struct{}, len(node.Values))
+ name := ""
+ var conflicts []Ref
+ defaultRules := make([]*Rule, 0)
+
+ for _, rule := range node.Values {
+ r := rule.(*Rule)
+ ref := r.Ref()
+ name = rw(ref.CopyNonGround()).String() // varRewriter operates in-place
+ kinds[r.Head.RuleKind()] = struct{}{}
+ arities[len(r.Head.Args)] = struct{}{}
+ if r.Default {
+ defaultRules = append(defaultRules, r)
+ }
+
+ // Single-value rules may not have any other rules in their extent.
+ // Rules with vars in their ref are allowed to have rules inside their extent.
+ // Only the ground portion (terms before the first var term) of a rule's ref is considered when determining
+ // whether it's inside the extent of another (c.RuleTree is organized this way already).
+ // These pairs are invalid:
+ //
+ // data.p.q.r { true } # data.p.q is { "r": true }
+ // data.p.q.r.s { true }
+ //
+ // data.p.q.r { true }
+ // data.p.q.r[s].t { s = input.key }
+ //
+ // But this is allowed:
+ //
+ // data.p.q.r { true }
+ // data.p.q[r].s.t { r = input.key }
+ //
+ // data.p[r] := x { r = input.key; x = input.bar }
+ // data.p.q[r] := x { r = input.key; x = input.bar }
+ //
+ // data.p.q[r] { r := input.r }
+ // data.p.q.r.s { true }
+ //
+ // data.p.q[r] = 1 { r := "r" }
+ // data.p.q.s = 2
+ //
+ // data.p[q][r] { q := input.q; r := input.r }
+ // data.p.q.r { true }
+ //
+ // data.p.q[r] { r := input.r }
+ // data.p[q].r { q := input.q }
+ //
+ // data.p.q[r][s] { r := input.r; s := input.s }
+ // data.p[q].r.s { q := input.q }
+
+ if ref.IsGround() && len(node.Children) > 0 {
+ conflicts = node.flattenChildren()
+ }
+
+ if r.Head.RuleKind() == SingleValue && r.Head.Ref().IsGround() {
+ completeRules++
+ } else {
+ partialRules++
+ }
+ }
+
+ switch {
+ case conflicts != nil:
+ c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "rule %v conflicts with %v", name, conflicts))
+
+ case len(kinds) > 1 || len(arities) > 1 || (completeRules >= 1 && partialRules >= 1):
+ c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "conflicting rules %v found", name))
+
+ case len(defaultRules) > 1:
+
+ defaultRuleLocations := strings.Builder{}
+ defaultRuleLocations.WriteString(defaultRules[0].Loc().String())
+ for i := 1; i < len(defaultRules); i++ {
+ defaultRuleLocations.WriteString(", ")
+ defaultRuleLocations.WriteString(defaultRules[i].Loc().String())
+ }
+
+ c.err(NewError(
+ TypeErr,
+ defaultRules[0].Module.Package.Loc(),
+ "multiple default rules %s found at %s",
+ name, defaultRuleLocations.String()),
+ )
+ }
+
+ return false
+ })
+
+ if c.pathExists != nil {
+ for _, err := range CheckPathConflicts(c, c.pathExists) {
+ c.err(err)
+ }
+ }
+
+ // NOTE(sr): depthfirst might better use sorted for stable errs?
+ c.ModuleTree.DepthFirst(func(node *ModuleTreeNode) bool {
+ for _, mod := range node.Modules {
+ for _, rule := range mod.Rules {
+ ref := rule.Head.Ref().GroundPrefix()
+ // Rules with a dynamic portion in their ref are exempted, as a conflict within the dynamic portion
+ // can only be detected at eval-time.
+ if len(ref) < len(rule.Head.Ref()) {
+ continue
+ }
+
+ childNode, tail := node.find(ref)
+ if childNode != nil && len(tail) == 0 {
+ for _, childMod := range childNode.Modules {
+ // Avoid recursively checking a module for equality unless we know it's a possible self-match.
+ if childMod.Equal(mod) {
+ continue // don't self-conflict
+ }
+ msg := fmt.Sprintf("%v conflicts with rule %v defined at %v", childMod.Package, rule.Head.Ref(), rule.Loc())
+ c.err(NewError(TypeErr, mod.Package.Loc(), "%s", msg))
+ }
+ }
+ }
+ }
+ return false
+ })
+}
+
+func (c *Compiler) checkUndefinedFuncs() {
+ for _, name := range c.sorted {
+ m := c.Modules[name]
+ for _, err := range checkUndefinedFuncs(c.TypeEnv, m, c.GetArity, c.RewrittenVars) {
+ c.err(err)
+ }
+ }
+}
+
+func checkUndefinedFuncs(env *TypeEnv, x any, arity func(Ref) int, rwVars map[Var]Var) Errors {
+
+ var errs Errors
+
+ WalkExprs(x, func(expr *Expr) bool {
+ if !expr.IsCall() {
+ return false
+ }
+ ref := expr.Operator()
+ if arity := arity(ref); arity >= 0 {
+ operands := len(expr.Operands())
+ if expr.Generated { // an output var was added
+ if !expr.IsEquality() && operands != arity+1 {
+ ref = rewriteVarsInRef(rwVars)(ref)
+ errs = append(errs, arityMismatchError(env, ref, expr, arity, operands-1))
+ return true
+ }
+ } else { // either output var or not
+ if operands != arity && operands != arity+1 {
+ ref = rewriteVarsInRef(rwVars)(ref)
+ errs = append(errs, arityMismatchError(env, ref, expr, arity, operands))
+ return true
+ }
+ }
+ return false
+ }
+ ref = rewriteVarsInRef(rwVars)(ref)
+ errs = append(errs, NewError(TypeErr, expr.Loc(), "undefined function %v", ref))
+ return true
+ })
+
+ return errs
+}
+
+func arityMismatchError(env *TypeEnv, f Ref, expr *Expr, exp, act int) *Error {
+ if want, ok := env.Get(f).(*types.Function); ok { // generate richer error for built-in functions
+ have := make([]types.Type, len(expr.Operands()))
+ for i, op := range expr.Operands() {
+ have[i] = env.Get(op)
+ }
+ return newArgError(expr.Loc(), f, "arity mismatch", have, want.NamedFuncArgs())
+ }
+ if act != 1 {
+ return NewError(TypeErr, expr.Loc(), "function %v has arity %d, got %d arguments", f, exp, act)
+ }
+ return NewError(TypeErr, expr.Loc(), "function %v has arity %d, got %d argument", f, exp, act)
+}
+
+// checkSafetyRuleBodies ensures that variables appearing in negated expressions or non-target
+// positions of built-in expressions will be bound when evaluating the rule from left
+// to right, re-ordering as necessary.
+func (c *Compiler) checkSafetyRuleBodies() {
+ for _, name := range c.sorted {
+ m := c.Modules[name]
+ WalkRules(m, func(r *Rule) bool {
+ safe := ReservedVars.Copy()
+ if len(r.Head.Args) > 0 {
+ safe.Update(r.Head.Args.Vars())
+ }
+ r.Body = c.checkBodySafety(safe, r.Body)
+ return false
+ })
+ }
+}
+
+func (c *Compiler) checkBodySafety(safe VarSet, b Body) Body {
+ reordered, unsafe := reorderBodyForSafety(c.builtins, c.GetArity, safe, b)
+ if errs := safetyErrorSlice(unsafe, c.RewrittenVars); len(errs) > 0 {
+ for _, err := range errs {
+ c.err(err)
+ }
+ return b
+ }
+ return reordered
+}
+
+// SafetyCheckVisitorParams defines the AST visitor parameters to use for collecting
+// variables during the safety check. This has to be exported because it's relied on
+// by the copy propagation implementation in topdown.
+var SafetyCheckVisitorParams = VarVisitorParams{
+ SkipRefCallHead: true,
+ SkipClosures: true,
+}
+
+// checkSafetyRuleHeads ensures that variables appearing in the head of a
+// rule also appear in the body.
+func (c *Compiler) checkSafetyRuleHeads() {
+ for _, name := range c.sorted {
+ WalkRules(c.Modules[name], func(r *Rule) bool {
+ safe := r.Body.Vars(SafetyCheckVisitorParams)
+ if len(r.Head.Args) > 0 {
+ safe.Update(r.Head.Args.Vars())
+ }
+ if headMayHaveVars(r.Head) {
+ vars := r.Head.Vars()
+ if vars.DiffCount(safe) > 0 {
+ unsafe := vars.Diff(safe)
+ for v := range unsafe {
+ if w, ok := c.RewrittenVars[v]; ok {
+ v = w
+ }
+ if !v.IsGenerated() {
+ c.err(NewError(UnsafeVarErr, r.Loc(), "var %v is unsafe", v))
+ }
+ }
+ }
+ }
+ return false
+ })
+ }
+}
+
+func compileSchema(goSchema any, allowNet []string) (*gojsonschema.Schema, error) {
+ gojsonschema.SetAllowNet(allowNet)
+
+ var refLoader gojsonschema.JSONLoader
+ sl := gojsonschema.NewSchemaLoader()
+
+ if goSchema != nil {
+ refLoader = gojsonschema.NewGoLoader(goSchema)
+ } else {
+ return nil, errors.New("no schema as input to compile")
+ }
+ schemasCompiled, err := sl.Compile(refLoader)
+ if err != nil {
+ return nil, fmt.Errorf("unable to compile the schema: %w", err)
+ }
+ return schemasCompiled, nil
+}
+
+func mergeSchemas(schemas ...*gojsonschema.SubSchema) (*gojsonschema.SubSchema, error) {
+ if len(schemas) == 0 {
+ return nil, nil
+ }
+ var result = schemas[0]
+
+ for i := range schemas {
+ if len(schemas[i].PropertiesChildren) > 0 {
+ if !schemas[i].Types.Contains("object") {
+ if err := schemas[i].Types.Add("object"); err != nil {
+ return nil, errors.New("unable to set the type in schemas")
+ }
+ }
+ } else if len(schemas[i].ItemsChildren) > 0 {
+ if !schemas[i].Types.Contains("array") {
+ if err := schemas[i].Types.Add("array"); err != nil {
+ return nil, errors.New("unable to set the type in schemas")
+ }
+ }
+ }
+ }
+
+ for i := 1; i < len(schemas); i++ {
+ if result.Types.String() != schemas[i].Types.String() {
+ return nil, fmt.Errorf("unable to merge these schemas: type mismatch: %v and %v", result.Types.String(), schemas[i].Types.String())
+ } else if result.Types.Contains("object") && len(result.PropertiesChildren) > 0 && schemas[i].Types.Contains("object") && len(schemas[i].PropertiesChildren) > 0 {
+ result.PropertiesChildren = append(result.PropertiesChildren, schemas[i].PropertiesChildren...)
+ } else if result.Types.Contains("array") && len(result.ItemsChildren) > 0 && schemas[i].Types.Contains("array") && len(schemas[i].ItemsChildren) > 0 {
+ for j := range len(schemas[i].ItemsChildren) {
+ if len(result.ItemsChildren)-1 < j && !(len(schemas[i].ItemsChildren)-1 < j) {
+ result.ItemsChildren = append(result.ItemsChildren, schemas[i].ItemsChildren[j])
+ }
+ if result.ItemsChildren[j].Types.String() != schemas[i].ItemsChildren[j].Types.String() {
+ return nil, errors.New("unable to merge these schemas")
+ }
+ }
+ }
+ }
+ return result, nil
+}
+
+type schemaParser struct {
+ definitionCache map[string]*cachedDef
+}
+
+type cachedDef struct {
+ properties []*types.StaticProperty
+}
+
+func newSchemaParser() *schemaParser {
+ return &schemaParser{
+ definitionCache: map[string]*cachedDef{},
+ }
+}
+
+func (parser *schemaParser) parseSchema(schema any) (types.Type, error) {
+ return parser.parseSchemaWithPropertyKey(schema, "")
+}
+
+func (parser *schemaParser) parseSchemaWithPropertyKey(schema any, propertyKey string) (types.Type, error) {
+ subSchema, ok := schema.(*gojsonschema.SubSchema)
+ if !ok {
+ return nil, fmt.Errorf("unexpected schema type %v", subSchema)
+ }
+
+ // Handle referenced schemas, returns directly when a $ref is found
+ if subSchema.RefSchema != nil {
+ if existing, ok := parser.definitionCache[subSchema.Ref.String()]; ok {
+ return types.NewObject(existing.properties, nil), nil
+ }
+ return parser.parseSchemaWithPropertyKey(subSchema.RefSchema, subSchema.Ref.String())
+ }
+
+ // Handle anyOf
+ if subSchema.AnyOf != nil {
+ var orType types.Type
+
+ // If there is a core schema, find its type first
+ if subSchema.Types.IsTyped() {
+ copySchema := *subSchema
+ copySchemaRef := ©Schema
+ copySchemaRef.AnyOf = nil
+ coreType, err := parser.parseSchema(copySchemaRef)
+ if err != nil {
+ return nil, fmt.Errorf("unexpected schema type %v: %w", subSchema, err)
+ }
+
+ // Only add Object type with static props to orType
+ if objType, ok := coreType.(*types.Object); ok {
+ if objType.StaticProperties() != nil && objType.DynamicProperties() == nil {
+ orType = types.Or(orType, coreType)
+ }
+ }
+ }
+
+ // Iterate through every property of AnyOf and add it to orType
+ for _, pSchema := range subSchema.AnyOf {
+ newtype, err := parser.parseSchema(pSchema)
+ if err != nil {
+ return nil, fmt.Errorf("unexpected schema type %v: %w", pSchema, err)
+ }
+ orType = types.Or(newtype, orType)
+ }
+
+ return orType, nil
+ }
+
+ if subSchema.AllOf != nil {
+ subSchemaArray := subSchema.AllOf
+ allOfResult, err := mergeSchemas(subSchemaArray...)
+ if err != nil {
+ return nil, err
+ }
+
+ if subSchema.Types.IsTyped() {
+ if (subSchema.Types.Contains("object") && allOfResult.Types.Contains("object")) || (subSchema.Types.Contains("array") && allOfResult.Types.Contains("array")) {
+ objectOrArrayResult, err := mergeSchemas(allOfResult, subSchema)
+ if err != nil {
+ return nil, err
+ }
+ return parser.parseSchema(objectOrArrayResult)
+ } else if subSchema.Types.String() != allOfResult.Types.String() {
+ return nil, errors.New("unable to merge these schemas")
+ }
+ }
+ return parser.parseSchema(allOfResult)
+ }
+
+ if subSchema.Types.IsTyped() {
+ if subSchema.Types.Contains("boolean") {
+ return types.B, nil
+
+ } else if subSchema.Types.Contains("string") {
+ return types.S, nil
+
+ } else if subSchema.Types.Contains("integer") || subSchema.Types.Contains("number") {
+ return types.N, nil
+
+ } else if subSchema.Types.Contains("object") {
+ if len(subSchema.PropertiesChildren) > 0 {
+ def := &cachedDef{
+ properties: make([]*types.StaticProperty, 0, len(subSchema.PropertiesChildren)),
+ }
+ for _, pSchema := range subSchema.PropertiesChildren {
+ def.properties = append(def.properties, types.NewStaticProperty(pSchema.Property, nil))
+ }
+ if propertyKey != "" {
+ parser.definitionCache[propertyKey] = def
+ }
+ for _, pSchema := range subSchema.PropertiesChildren {
+ newtype, err := parser.parseSchema(pSchema)
+ if err != nil {
+ return nil, fmt.Errorf("unexpected schema type %v: %w", pSchema, err)
+ }
+ for i, prop := range def.properties {
+ if prop.Key == pSchema.Property {
+ def.properties[i].Value = newtype
+ break
+ }
+ }
+ }
+ return types.NewObject(def.properties, nil), nil
+ }
+ return types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), nil
+
+ } else if subSchema.Types.Contains("array") {
+ if len(subSchema.ItemsChildren) > 0 {
+ if subSchema.ItemsChildrenIsSingleSchema {
+ iSchema := subSchema.ItemsChildren[0]
+ newtype, err := parser.parseSchema(iSchema)
+ if err != nil {
+ return nil, fmt.Errorf("unexpected schema type %v", iSchema)
+ }
+ return types.NewArray(nil, newtype), nil
+ }
+ newTypes := make([]types.Type, 0, len(subSchema.ItemsChildren))
+ for i := 0; i != len(subSchema.ItemsChildren); i++ {
+ iSchema := subSchema.ItemsChildren[i]
+ newtype, err := parser.parseSchema(iSchema)
+ if err != nil {
+ return nil, fmt.Errorf("unexpected schema type %v", iSchema)
+ }
+ newTypes = append(newTypes, newtype)
+ }
+ return types.NewArray(newTypes, nil), nil
+ }
+ return types.NewArray(nil, types.A), nil
+ }
+ }
+
+ // Assume types if not specified in schema
+ if len(subSchema.PropertiesChildren) > 0 {
+ if err := subSchema.Types.Add("object"); err == nil {
+ return parser.parseSchema(subSchema)
+ }
+ } else if len(subSchema.ItemsChildren) > 0 {
+ if err := subSchema.Types.Add("array"); err == nil {
+ return parser.parseSchema(subSchema)
+ }
+ }
+
+ return types.A, nil
+}
+
+func (c *Compiler) setAnnotationSet() {
+ // Sorting modules by name for stable error reporting
+ sorted := make([]*Module, 0, len(c.Modules))
+ for _, mName := range c.sorted {
+ sorted = append(sorted, c.Modules[mName])
+ }
+
+ as, errs := BuildAnnotationSet(sorted)
+ for _, err := range errs {
+ c.err(err)
+ }
+ c.annotationSet = as
+}
+
+// checkTypes runs the type checker on all rules. The type checker builds a
+// TypeEnv that is stored on the compiler.
+func (c *Compiler) checkTypes() {
+ // Recursion is caught in earlier step, so this cannot fail.
+ sorted, _ := c.Graph.Sort()
+ checker := newTypeChecker().
+ WithAllowNet(c.capabilities.AllowNet).
+ WithSchemaSet(c.schemaSet).
+ WithInputType(c.inputType).
+ WithBuiltins(c.builtins).
+ WithRequiredCapabilities(c.Required).
+ WithVarRewriter(rewriteVarsInRef(c.RewrittenVars)).
+ WithAllowUndefinedFunctionCalls(c.allowUndefinedFuncCalls)
+ var as *AnnotationSet
+ if c.useTypeCheckAnnotations {
+ as = c.annotationSet
+ }
+ env, errs := checker.CheckTypes(c.TypeEnv, sorted, as)
+ for _, err := range errs {
+ c.err(err)
+ }
+ c.TypeEnv = env
+}
+
+func (c *Compiler) checkUnsafeBuiltins() {
+ if len(c.unsafeBuiltinsMap) == 0 {
+ return
+ }
+
+ for _, name := range c.sorted {
+ errs := checkUnsafeBuiltins(c.unsafeBuiltinsMap, c.Modules[name])
+ for _, err := range errs {
+ c.err(err)
+ }
+ }
+}
+
+func (c *Compiler) checkDeprecatedBuiltins() {
+ checkNeeded := false
+ for _, b := range c.Required.Builtins {
+ if _, found := c.deprecatedBuiltinsMap[b.Name]; found {
+ checkNeeded = true
+ break
+ }
+ }
+ if !checkNeeded {
+ return
+ }
+
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+ if c.strict || mod.regoV1Compatible() {
+ errs := checkDeprecatedBuiltins(c.deprecatedBuiltinsMap, mod)
+ for _, err := range errs {
+ c.err(err)
+ }
+ }
+ }
+}
+
+func (c *Compiler) runStage(metricName string, f func()) {
+ if c.metrics != nil {
+ c.metrics.Timer(metricName).Start()
+ defer c.metrics.Timer(metricName).Stop()
+ }
+ f()
+}
+
+func (c *Compiler) runStageAfter(metricName string, s CompilerStage) *Error {
+ if c.metrics != nil {
+ c.metrics.Timer(metricName).Start()
+ defer c.metrics.Timer(metricName).Stop()
+ }
+ return s(c)
+}
+
+func (c *Compiler) compile() {
+
+ defer func() {
+ if r := recover(); r != nil && r != errLimitReached {
+ panic(r)
+ }
+ }()
+
+ for _, s := range c.stages {
+ if c.evalMode == EvalModeIR {
+ switch s.name {
+ case "BuildRuleIndices", "BuildComprehensionIndices":
+ continue // skip these stages
+ }
+ }
+
+ if c.allowUndefinedFuncCalls && (s.name == "CheckUndefinedFuncs" || s.name == "CheckSafetyRuleBodies") {
+ continue
+ }
+
+ c.runStage(s.metricName, s.f)
+ if c.Failed() {
+ return
+ }
+ for _, a := range c.after[s.name] {
+ if err := c.runStageAfter(a.MetricName, a.Stage); err != nil {
+ c.err(err)
+ return
+ }
+ }
+ }
+}
+
+func (c *Compiler) init() {
+
+ if c.initialized {
+ return
+ }
+
+ if defaultModuleLoader != nil {
+ if c.moduleLoader == nil {
+ c.moduleLoader = defaultModuleLoader
+ } else {
+ first := c.moduleLoader
+ c.moduleLoader = func(res map[string]*Module) (map[string]*Module, error) {
+ res0, err := first(res)
+ if err != nil {
+ return nil, err
+ }
+ res1, err := defaultModuleLoader(res)
+ if err != nil {
+ return nil, err
+ }
+ // merge res1 into res0, based on module "file" names, to avoid clashes
+ for k, v := range res1 {
+ if _, ok := res0[k]; !ok {
+ res0[k] = v
+ }
+ }
+ return res0, nil
+ }
+ }
+ }
+
+ if c.capabilities == nil {
+ c.capabilities = CapabilitiesForThisVersion()
+ }
+
+ c.builtins = make(map[string]*Builtin, len(c.capabilities.Builtins)+len(c.customBuiltins))
+
+ for _, bi := range c.capabilities.Builtins {
+ c.builtins[bi.Name] = bi
+ if bi.IsDeprecated() {
+ c.deprecatedBuiltinsMap[bi.Name] = struct{}{}
+ }
+ }
+
+ maps.Copy(c.builtins, c.customBuiltins)
+
+ // Load the global input schema if one was provided.
+ if c.schemaSet != nil {
+ if schema := c.schemaSet.Get(SchemaRootRef); schema != nil {
+ tpe, err := loadSchema(schema, c.capabilities.AllowNet)
+ if err != nil {
+ c.err(NewError(TypeErr, nil, "%s", err.Error()))
+ } else {
+ c.inputType = tpe
+ }
+ }
+ }
+
+ c.TypeEnv = newTypeChecker().
+ WithSchemaSet(c.schemaSet).
+ WithInputType(c.inputType).
+ Env(c.builtins)
+
+ c.initialized = true
+}
+
+func (c *Compiler) err(err *Error) {
+ if c.maxErrs > 0 && len(c.Errors) >= c.maxErrs {
+ c.Errors = append(c.Errors, errLimitReached)
+ panic(errLimitReached)
+ }
+ c.Errors = append(c.Errors, err)
+}
+
+func (c *Compiler) getExports() *util.HasherMap[Ref, []Ref] {
+
+ rules := util.NewHasherMap[Ref, []Ref](RefEqual)
+
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+
+ for _, rule := range mod.Rules {
+ hashMapAdd(rules, mod.Package.Path, rule.Head.Ref().GroundPrefix())
+ }
+ }
+
+ return rules
+}
+
+func refSliceEqual(a, b []Ref) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if !a[i].Equal(b[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+func hashMapAdd(rules *util.HasherMap[Ref, []Ref], pkg, rule Ref) {
+ prev, ok := rules.Get(pkg)
+ if !ok {
+ rules.Put(pkg, []Ref{rule})
+ return
+ }
+ for _, p := range prev {
+ if p.Equal(rule) {
+ return
+ }
+ }
+ rules.Put(pkg, append(prev, rule))
+}
+
+func (c *Compiler) GetAnnotationSet() *AnnotationSet {
+ return c.annotationSet
+}
+
+func (c *Compiler) checkImports() {
+ modules := make([]*Module, 0, len(c.Modules))
+
+ supportsRegoV1Import := c.capabilities.ContainsFeature(FeatureRegoV1Import) ||
+ c.capabilities.ContainsFeature(FeatureRegoV1)
+
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+
+ for _, imp := range mod.Imports {
+ if !supportsRegoV1Import && RegoV1CompatibleRef.Equal(imp.Path.Value) {
+ c.err(NewError(CompileErr, imp.Loc(), "rego.v1 import is not supported"))
+ }
+ }
+
+ if c.strict || c.moduleIsRegoV1Compatible(mod) {
+ modules = append(modules, mod)
+ }
+ }
+
+ errs := checkDuplicateImports(modules)
+ for _, err := range errs {
+ c.err(err)
+ }
+}
+
+func (c *Compiler) checkKeywordOverrides() {
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+ if c.strict || c.moduleIsRegoV1Compatible(mod) {
+ errs := checkRootDocumentOverrides(mod)
+ for _, err := range errs {
+ c.err(err)
+ }
+ }
+ }
+}
+
+func (c *Compiler) moduleIsRegoV1(mod *Module) bool {
+ if mod.regoVersion == RegoUndefined {
+ switch c.defaultRegoVersion {
+ case RegoUndefined:
+ c.err(NewError(CompileErr, mod.Package.Loc(), "cannot determine rego version for module"))
+ return false
+ case RegoV1:
+ return true
+ }
+ return false
+ }
+ return mod.regoVersion == RegoV1
+}
+
+func (c *Compiler) moduleIsRegoV1Compatible(mod *Module) bool {
+ if mod.regoVersion == RegoUndefined {
+ switch c.defaultRegoVersion {
+ case RegoUndefined:
+ c.err(NewError(CompileErr, mod.Package.Loc(), "cannot determine rego version for module"))
+ return false
+ case RegoV1, RegoV0CompatV1:
+ return true
+ }
+ return false
+ }
+ return mod.regoV1Compatible()
+}
+
+// resolveAllRefs resolves references in expressions to their fully qualified values.
+//
+// For instance, given the following module:
+//
+// package a.b
+// import data.foo.bar
+// p[x] { bar[_] = x }
+//
+// The reference "bar[_]" would be resolved to "data.foo.bar[_]".
+//
+// Ref rules are resolved, too:
+//
+// package a.b
+// q { c.d.e == 1 }
+// c.d[e] := 1 if e := "e"
+//
+// The reference "c.d.e" would be resolved to "data.a.b.c.d.e".
+func (c *Compiler) resolveAllRefs() {
+
+ rules := c.getExports()
+
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+
+ var ruleExports []Ref
+ if x, ok := rules.Get(mod.Package.Path); ok {
+ ruleExports = x
+ }
+
+ globals := getGlobals(mod.Package, ruleExports, mod.Imports)
+
+ WalkRules(mod, func(rule *Rule) bool {
+ err := resolveRefsInRule(globals, rule)
+ if err != nil {
+ c.err(NewError(CompileErr, rule.Location, "%s", err.Error()))
+ }
+ return false
+ })
+
+ if c.strict { // check for unused imports
+ for _, imp := range mod.Imports {
+ path := imp.Path.Value.(Ref)
+ if FutureRootDocument.Equal(path[0]) || RegoRootDocument.Equal(path[0]) {
+ continue // ignore future and rego imports
+ }
+
+ for v, u := range globals {
+ if v.Equal(imp.Name()) && !u.used {
+ c.err(NewError(CompileErr, imp.Location, "%s unused", imp.String()))
+ }
+ }
+ }
+ }
+ }
+
+ if c.moduleLoader != nil {
+
+ parsed, err := c.moduleLoader(c.Modules)
+ if err != nil {
+ c.err(NewError(CompileErr, nil, "%s", err.Error()))
+ return
+ }
+
+ if len(parsed) == 0 {
+ return
+ }
+
+ for id, module := range parsed {
+ c.Modules[id] = module.Copy()
+ c.sorted = append(c.sorted, id)
+ if c.parsedModules != nil {
+ c.parsedModules[id] = module
+ }
+ }
+
+ sort.Strings(c.sorted)
+ c.resolveAllRefs()
+ }
+}
+
+func (c *Compiler) removeImports() {
+ c.imports = make(map[string][]*Import, len(c.Modules))
+ for name := range c.Modules {
+ c.imports[name] = c.Modules[name].Imports
+ c.Modules[name].Imports = nil
+ }
+}
+
+func (c *Compiler) initLocalVarGen() {
+ c.localvargen = newLocalVarGeneratorForModuleSet(c.sorted, c.Modules)
+}
+
+func (c *Compiler) rewriteComprehensionTerms() {
+ f := newEqualityFactory(c.localvargen)
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+ _, _ = rewriteComprehensionTerms(f, mod) // ignore error
+ }
+}
+
+func (c *Compiler) rewriteExprTerms() {
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+ WalkRules(mod, func(rule *Rule) bool {
+ rewriteExprTermsInHead(c.localvargen, rule)
+ rule.Body = rewriteExprTermsInBody(c.localvargen, rule.Body)
+ return false
+ })
+ }
+}
+
+func (c *Compiler) rewriteRuleHeadRefs() {
+ f := newEqualityFactory(c.localvargen)
+ for _, name := range c.sorted {
+ WalkRules(c.Modules[name], func(rule *Rule) bool {
+
+ ref := rule.Head.Ref()
+ // NOTE(sr): We're backfilling Refs here -- all parser code paths would have them, but
+ // it's possible to construct Module{} instances from Golang code, so we need
+ // to accommodate for that, too.
+ if len(rule.Head.Reference) == 0 {
+ rule.Head.Reference = ref
+ }
+
+ cannotSpeakStringPrefixRefs := true
+ cannotSpeakGeneralRefs := true
+ for _, f := range c.capabilities.Features {
+ switch f {
+ case FeatureRefHeadStringPrefixes:
+ cannotSpeakStringPrefixRefs = false
+ case FeatureRefHeads:
+ cannotSpeakGeneralRefs = false
+ case FeatureRegoV1:
+ cannotSpeakStringPrefixRefs = false
+ cannotSpeakGeneralRefs = false
+ }
+ }
+
+ if cannotSpeakStringPrefixRefs && cannotSpeakGeneralRefs && rule.Head.Name == "" {
+ c.err(NewError(CompileErr, rule.Loc(), "rule heads with refs are not supported: %v", rule.Head.Reference))
+ return true
+ }
+
+ for i := 1; i < len(ref); i++ {
+ if cannotSpeakGeneralRefs && (rule.Head.RuleKind() == MultiValue || i != len(ref)-1) { // last
+ if _, ok := ref[i].Value.(String); !ok {
+ c.err(NewError(TypeErr, rule.Loc(), "rule heads with general refs (containing variables) are not supported: %v", rule.Head.Reference))
+ continue
+ }
+ }
+
+ // Rewrite so that any non-scalar elements in the rule's ref are vars:
+ // p.q.r[y.z] { ... } => p.q.r[__local0__] { __local0__ = y.z }
+ // p.q[a.b][c.d] { ... } => p.q[__local0__] { __local0__ = a.b; __local1__ = c.d }
+ // because that's what the RuleTree knows how to deal with.
+ if _, ok := ref[i].Value.(Var); !ok && !IsScalar(ref[i].Value) {
+ expr := f.Generate(ref[i])
+ if i == len(ref)-1 && rule.Head.Key.Equal(ref[i]) {
+ rule.Head.Key = expr.Operand(0)
+ }
+ rule.Head.Reference[i] = expr.Operand(0)
+ rule.Body.Append(expr)
+ }
+ }
+
+ return true
+ })
+ }
+}
+
+func (c *Compiler) checkVoidCalls() {
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+ for _, err := range checkVoidCalls(c.TypeEnv, mod) {
+ c.err(err)
+ }
+ }
+}
+
+func (c *Compiler) rewritePrintCalls() {
+ var modified bool
+ if !c.enablePrintStatements {
+ for _, name := range c.sorted {
+ if erasePrintCalls(c.Modules[name]) {
+ modified = true
+ }
+ }
+ } else {
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+ WalkRules(mod, func(r *Rule) bool {
+ safe := r.Head.Args.Vars()
+ safe.Update(ReservedVars)
+ vis := func(b Body) bool {
+ modrec, errs := rewritePrintCalls(c.localvargen, c.GetArity, safe, b)
+ if modrec {
+ modified = true
+ }
+ for _, err := range errs {
+ c.err(err)
+ }
+ return false
+ }
+ WalkBodies(r.Head, vis)
+ WalkBodies(r.Body, vis)
+ return false
+ })
+ }
+ }
+ if modified {
+ c.Required.addBuiltinSorted(Print)
+ }
+}
+
+// checkVoidCalls returns errors for any expressions that treat void function
+// calls as values. The only void functions in Rego are specific built-ins like
+// print().
+func checkVoidCalls(env *TypeEnv, x any) Errors {
+ var errs Errors
+ WalkTerms(x, func(x *Term) bool {
+ if call, ok := x.Value.(Call); ok {
+ if tpe, ok := env.Get(call[0]).(*types.Function); ok && tpe.Result() == nil {
+ errs = append(errs, NewError(TypeErr, x.Loc(), "%v used as value", call))
+ }
+ }
+ return false
+ })
+ return errs
+}
+
+// rewritePrintCalls will rewrite the body so that print operands are captured
+// in local variables and their evaluation occurs within a comprehension.
+// Wrapping the terms inside of a comprehension ensures that undefined values do
+// not short-circuit evaluation.
+//
+// For example, given the following print statement:
+//
+// print("the value of x is:", input.x)
+//
+// The expression would be rewritten to:
+//
+// print({__local0__ | __local0__ = "the value of x is:"}, {__local1__ | __local1__ = input.x})
+func rewritePrintCalls(gen *localVarGenerator, getArity func(Ref) int, globals VarSet, body Body) (bool, Errors) {
+
+ var errs Errors
+ var modified bool
+
+ // Visit comprehension bodies recursively to ensure print statements inside
+ // those bodies only close over variables that are safe.
+ for i := range body {
+ if ContainsClosures(body[i]) {
+ safe := outputVarsForBody(body[:i], getArity, globals)
+ safe.Update(globals)
+ WalkClosures(body[i], func(x any) bool {
+ var modrec bool
+ var errsrec Errors
+ switch x := x.(type) {
+ case *SetComprehension:
+ modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body)
+ case *ArrayComprehension:
+ modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body)
+ case *ObjectComprehension:
+ modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body)
+ case *Every:
+ safe.Update(x.KeyValueVars())
+ modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body)
+ }
+ if modrec {
+ modified = true
+ }
+ errs = append(errs, errsrec...)
+ return true
+ })
+ if len(errs) > 0 {
+ return false, errs
+ }
+ }
+ }
+
+ for i := range body {
+
+ if !isPrintCall(body[i]) {
+ continue
+ }
+
+ modified = true
+
+ var errs Errors
+ safe := outputVarsForBody(body[:i], getArity, globals)
+ safe.Update(globals)
+
+ // Fixes Issue #7647 by adding generated variables to the safe set
+ WalkVars(body[:i], func(v Var) bool {
+ if v.IsGenerated() {
+ safe.Add(v)
+ }
+ return false
+ })
+
+ args := body[i].Operands()
+
+ var vis *VarVisitor
+ for j := range args {
+ vis = vis.ClearOrNew().WithParams(SafetyCheckVisitorParams)
+ vis.Walk(args[j])
+ vars := vis.Vars()
+ if vars.DiffCount(safe) > 0 {
+ unsafe := vars.Diff(safe)
+ for _, v := range unsafe.Sorted() {
+ errs = append(errs, NewError(CompileErr, args[j].Loc(), "var %v is undeclared", v))
+ }
+ }
+ }
+
+ if len(errs) > 0 {
+ return false, errs
+ }
+
+ terms := make([]*Term, 0, len(args))
+
+ for j := range args {
+ x := NewTerm(gen.Generate()).SetLocation(args[j].Loc())
+ capture := Equality.Expr(x, args[j]).SetLocation(args[j].Loc())
+ terms = append(terms, SetComprehensionTerm(x, NewBody(capture)).SetLocation(args[j].Loc()))
+ }
+
+ body.Set(NewExpr([]*Term{
+ NewTerm(InternalPrint.Ref()).SetLocation(body[i].Loc()),
+ ArrayTerm(terms...).SetLocation(body[i].Loc()),
+ }).SetLocation(body[i].Loc()), i)
+ }
+
+ return modified, nil
+}
+
+func erasePrintCalls(node any) bool {
+ var modified bool
+ NewGenericVisitor(func(x any) bool {
+ var modrec bool
+ switch x := x.(type) {
+ case *Rule:
+ modrec, x.Body = erasePrintCallsInBody(x.Body)
+ case *ArrayComprehension:
+ modrec, x.Body = erasePrintCallsInBody(x.Body)
+ case *SetComprehension:
+ modrec, x.Body = erasePrintCallsInBody(x.Body)
+ case *ObjectComprehension:
+ modrec, x.Body = erasePrintCallsInBody(x.Body)
+ case *Every:
+ modrec, x.Body = erasePrintCallsInBody(x.Body)
+ }
+ if modrec {
+ modified = true
+ }
+ return false
+ }).Walk(node)
+ return modified
+}
+
+func erasePrintCallsInBody(x Body) (bool, Body) {
+
+ if !containsPrintCall(x) {
+ return false, x
+ }
+
+ var cpy Body
+
+ for i := range x {
+
+ // Recursively visit any comprehensions contained in this expression.
+ erasePrintCalls(x[i])
+
+ if !isPrintCall(x[i]) {
+ cpy.Append(x[i])
+ }
+ }
+
+ if len(cpy) == 0 {
+ term := BooleanTerm(true).SetLocation(x.Loc())
+ expr := NewExpr(term).SetLocation(x.Loc())
+ cpy.Append(expr)
+ }
+
+ return true, cpy
+}
+
+func containsPrintCall(x any) bool {
+ var found bool
+ WalkExprs(x, func(expr *Expr) bool {
+ if !found {
+ if isPrintCall(expr) {
+ found = true
+ }
+ }
+ return found
+ })
+ return found
+}
+
+var printRef = Print.Ref()
+
+func isPrintCall(x *Expr) bool {
+ return x.IsCall() && x.Operator().Equal(printRef)
+}
+
+// rewriteRefsInHead will rewrite rules so that the head does not contain any
+// terms that require evaluation (e.g., refs or comprehensions). If the key or
+// value contains one or more of these terms, the key or value will be moved
+// into the body and assigned to a new variable. The new variable will replace
+// the key or value in the head.
+//
+// For instance, given the following rule:
+//
+// p[{"foo": data.foo[i]}] { i < 100 }
+//
+// The rule would be re-written as:
+//
+// p[__local0__] { i < 100; __local0__ = {"foo": data.foo[i]} }
+func (c *Compiler) rewriteRefsInHead() {
+ f := newEqualityFactory(c.localvargen)
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+ WalkRules(mod, func(rule *Rule) bool {
+ if requiresEval(rule.Head.Key) {
+ expr := f.Generate(rule.Head.Key)
+ rule.Head.Key = expr.Operand(0)
+ rule.Body.Append(expr)
+ }
+ if requiresEval(rule.Head.Value) {
+ expr := f.Generate(rule.Head.Value)
+ rule.Head.Value = expr.Operand(0)
+ rule.Body.Append(expr)
+ }
+ for i := 0; i < len(rule.Head.Args); i++ {
+ if requiresEval(rule.Head.Args[i]) {
+ expr := f.Generate(rule.Head.Args[i])
+ rule.Head.Args[i] = expr.Operand(0)
+ rule.Body.Append(expr)
+ }
+ }
+ return false
+ })
+ }
+}
+
+func (c *Compiler) rewriteEquals() {
+ modified := false
+ for _, name := range c.sorted {
+ modified = rewriteEquals(c.Modules[name]) || modified
+ }
+ if modified {
+ c.Required.addBuiltinSorted(Equal)
+ }
+}
+
+func (c *Compiler) rewriteDynamicTerms() {
+ f := newEqualityFactory(c.localvargen)
+ for _, name := range c.sorted {
+ WalkRules(c.Modules[name], func(rule *Rule) bool {
+ rule.Body = rewriteDynamics(f, rule.Body)
+ return false
+ })
+ }
+}
+
+// rewriteTestRuleEqualities rewrites equality expressions in test rule bodies to create local vars for statements that would otherwise
+// not have their values captured through tracing, such as refs and comprehensions not unified/assigned to a local var.
+// For example, given the following module:
+//
+// package test
+//
+// p.q contains v if {
+// some v in numbers.range(1, 3)
+// }
+//
+// p.r := "foo"
+//
+// test_rule {
+// p == {
+// "q": {4, 5, 6}
+// }
+// }
+//
+// `p` in `test_rule` resolves to `data.test.p`, which won't be an entry in the virtual-cache and must therefore be calculated after-the-fact.
+// If `p` isn't captured in a local var, there is no trivial way to retrieve its value for test reporting.
+func (c *Compiler) rewriteTestRuleEqualities() {
+ if !c.rewriteTestRulesForTracing {
+ return
+ }
+
+ f := newEqualityFactory(c.localvargen)
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+ WalkRules(mod, func(rule *Rule) bool {
+ if strings.HasPrefix(string(rule.Head.Name), "test_") {
+ rule.Body = rewriteTestEqualities(f, rule.Body)
+ }
+ return false
+ })
+ }
+}
+
+func (c *Compiler) parseMetadataBlocks() {
+ // Only parse annotations if rego.metadata built-ins are called
+ regoMetadataCalled := false
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+ WalkExprs(mod, func(expr *Expr) bool {
+ if isRegoMetadataChainCall(expr) || isRegoMetadataRuleCall(expr) {
+ regoMetadataCalled = true
+ }
+ return regoMetadataCalled
+ })
+
+ if regoMetadataCalled {
+ break
+ }
+ }
+
+ if regoMetadataCalled {
+ // NOTE: Possible optimization: only parse annotations for modules on the path of rego.metadata-calling module
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+
+ if len(mod.Annotations) == 0 {
+ var errs Errors
+ mod.Annotations, errs = parseAnnotations(mod.Comments)
+ errs = append(errs, attachAnnotationsNodes(mod)...)
+ for _, err := range errs {
+ c.err(err)
+ }
+
+ attachRuleAnnotations(mod)
+ }
+ }
+ }
+}
+
+func (c *Compiler) rewriteRegoMetadataCalls() {
+ eqFactory := newEqualityFactory(c.localvargen)
+
+ _, chainFuncAllowed := c.builtins[RegoMetadataChain.Name]
+ _, ruleFuncAllowed := c.builtins[RegoMetadataRule.Name]
+
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+
+ WalkRules(mod, func(rule *Rule) bool {
+ var firstChainCall *Expr
+ var firstRuleCall *Expr
+
+ WalkExprs(rule, func(expr *Expr) bool {
+ if chainFuncAllowed && firstChainCall == nil && isRegoMetadataChainCall(expr) {
+ firstChainCall = expr
+ } else if ruleFuncAllowed && firstRuleCall == nil && isRegoMetadataRuleCall(expr) {
+ firstRuleCall = expr
+ }
+ return firstChainCall != nil && firstRuleCall != nil
+ })
+
+ chainCalled := firstChainCall != nil
+ ruleCalled := firstRuleCall != nil
+
+ if chainCalled || ruleCalled {
+ body := make(Body, 0, len(rule.Body)+2)
+
+ var metadataChainVar Var
+ if chainCalled {
+ // Create and inject metadata chain for rule
+
+ chain, err := createMetadataChain(c.annotationSet.Chain(rule))
+ if err != nil {
+ c.err(err)
+ return false
+ }
+
+ chain.Location = firstChainCall.Location
+ eq := eqFactory.Generate(chain)
+ metadataChainVar = eq.Operands()[0].Value.(Var)
+ body.Append(eq)
+ }
+
+ var metadataRuleVar Var
+ if ruleCalled {
+ // Create and inject metadata for rule
+
+ var metadataRuleTerm *Term
+
+ a := getPrimaryRuleAnnotations(c.annotationSet, rule)
+ if a != nil {
+ annotObj, err := a.toObject()
+ if err != nil {
+ c.err(err)
+ return false
+ }
+ metadataRuleTerm = NewTerm(*annotObj)
+ } else {
+ // If rule has no annotations, assign an empty object
+ metadataRuleTerm = ObjectTerm()
+ }
+
+ metadataRuleTerm.Location = firstRuleCall.Location
+ eq := eqFactory.Generate(metadataRuleTerm)
+ metadataRuleVar = eq.Operands()[0].Value.(Var)
+ body.Append(eq)
+ }
+
+ for _, expr := range rule.Body {
+ body.Append(expr)
+ }
+ rule.Body = body
+
+ vis := func(b Body) bool {
+ for _, err := range rewriteRegoMetadataCalls(&metadataChainVar, &metadataRuleVar, b, &c.RewrittenVars) {
+ c.err(err)
+ }
+ return false
+ }
+ WalkBodies(rule.Head, vis)
+ WalkBodies(rule.Body, vis)
+ }
+
+ return false
+ })
+ }
+}
+
+func getPrimaryRuleAnnotations(as *AnnotationSet, rule *Rule) *Annotations {
+ annots := as.GetRuleScope(rule)
+
+ if len(annots) == 0 {
+ return nil
+ }
+
+ // Sort by annotation location; chain must start with annotations declared closest to rule, then going outward
+ slices.SortStableFunc(annots, func(a, b *Annotations) int {
+ return -a.Location.Compare(b.Location)
+ })
+
+ return annots[0]
+}
+
+func rewriteRegoMetadataCalls(metadataChainVar *Var, metadataRuleVar *Var, body Body, rewrittenVars *map[Var]Var) Errors {
+ var errs Errors
+
+ WalkClosures(body, func(x any) bool {
+ switch x := x.(type) {
+ case *ArrayComprehension:
+ errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars)
+ case *SetComprehension:
+ errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars)
+ case *ObjectComprehension:
+ errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars)
+ case *Every:
+ errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars)
+ }
+ return true
+ })
+
+ for i := range body {
+ expr := body[i]
+ var metadataVar Var
+
+ if metadataChainVar != nil && isRegoMetadataChainCall(expr) {
+ metadataVar = *metadataChainVar
+ } else if metadataRuleVar != nil && isRegoMetadataRuleCall(expr) {
+ metadataVar = *metadataRuleVar
+ } else {
+ continue
+ }
+
+ // NOTE(johanfylling): An alternative strategy would be to walk the body and replace all operands[0]
+ // usages with *metadataChainVar
+ operands := expr.Operands()
+ var newExpr *Expr
+ if len(operands) > 0 { // There is an output var to rewrite
+ rewrittenVar := operands[0]
+ newExpr = Equality.Expr(rewrittenVar, NewTerm(metadataVar))
+ } else { // No output var, just rewrite expr to metadataVar
+ newExpr = NewExpr(NewTerm(metadataVar))
+ }
+
+ newExpr.Generated = true
+ newExpr.Location = expr.Location
+ body.Set(newExpr, i)
+ }
+
+ return errs
+}
+
+var regoMetadataChainRef = RegoMetadataChain.Ref()
+var regoMetadataRuleRef = RegoMetadataRule.Ref()
+
+func isRegoMetadataChainCall(x *Expr) bool {
+ return x.IsCall() && x.Operator().Equal(regoMetadataChainRef)
+}
+
+func isRegoMetadataRuleCall(x *Expr) bool {
+ return x.IsCall() && x.Operator().Equal(regoMetadataRuleRef)
+}
+
+func createMetadataChain(chain []*AnnotationsRef) (*Term, *Error) {
+
+ metaArray := NewArray()
+ for _, link := range chain {
+ // Dropping leading 'data' element of path
+ p := link.Path[1:].toArray()
+ obj := NewObject(Item(InternedTerm("path"), NewTerm(p)))
+ if link.Annotations != nil {
+ annotObj, err := link.Annotations.toObject()
+ if err != nil {
+ return nil, err
+ }
+ obj.Insert(InternedTerm("annotations"), NewTerm(*annotObj))
+ }
+ metaArray = metaArray.Append(NewTerm(obj))
+ }
+
+ return NewTerm(metaArray), nil
+}
+
+func (c *Compiler) rewriteLocalVars() {
+ var assignment bool
+
+ args := NewVarVisitor()
+ argsStack := newLocalDeclaredVars()
+
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+ gen := c.localvargen
+
+ WalkRules(mod, func(rule *Rule) bool {
+ args.Clear()
+ argsStack.Clear()
+
+ if c.strict && len(rule.Head.Args) > 0 {
+ args.WalkArgs(rule.Head.Args)
+ }
+ unusedArgs := args.Vars()
+
+ c.rewriteLocalArgVars(gen, argsStack, rule)
+
+ // Rewrite local vars in each else-branch of the rule.
+ // Note: this is done instead of a walk so that we can capture any unused function arguments
+ // across else-branches.
+ for rule := rule; rule != nil; rule = rule.Else {
+ stack, errs := c.rewriteLocalVarsInRule(rule, unusedArgs, argsStack, gen)
+ if stack.assignment {
+ assignment = true
+ }
+
+ for arg := range unusedArgs {
+ if stack.Count(arg) > 1 {
+ delete(unusedArgs, arg)
+ }
+ }
+
+ for _, err := range errs {
+ c.err(err)
+ }
+ }
+
+ if c.strict {
+ // Report an error for each unused function argument
+ for arg := range unusedArgs {
+ if !arg.IsWildcard() {
+ c.err(NewError(CompileErr, rule.Head.Location, "unused argument %v. (hint: use _ (wildcard variable) instead)", arg))
+ }
+ }
+ }
+
+ return true
+ })
+ }
+
+ if assignment {
+ c.Required.addBuiltinSorted(Assign)
+ }
+}
+
+func (c *Compiler) rewriteLocalVarsInRule(rule *Rule, unusedArgs VarSet, argsStack *localDeclaredVars, gen *localVarGenerator) (*localDeclaredVars, Errors) {
+ onlyScalars := !headMayHaveVars(rule.Head)
+
+ var used VarSet
+
+ if !onlyScalars {
+ // Rewrite assignments contained in head of rule. Assignments can
+ // occur in rule head if they're inside a comprehension. Note,
+ // assigned vars in comprehensions in the head will be rewritten
+ // first to preserve scoping rules. For example:
+ //
+ // p = [x | x := 1] { x := 2 } becomes p = [__local0__ | __local0__ = 1] { __local1__ = 2 }
+ //
+ // This behaviour is consistent scoping inside the body. For example:
+ //
+ // p = xs { x := 2; xs = [x | x := 1] } becomes p = xs { __local0__ = 2; xs = [__local1__ | __local1__ = 1] }
+ nestedXform := &rewriteNestedHeadVarLocalTransform{
+ gen: gen,
+ RewrittenVars: c.RewrittenVars,
+ strict: c.strict,
+ }
+
+ NewGenericVisitor(nestedXform.Visit).Walk(rule.Head)
+
+ for _, err := range nestedXform.errs {
+ c.err(err)
+ }
+
+ // Rewrite assignments in body.
+ used = NewVarSet()
+
+ for _, t := range rule.Head.Ref()[1:] {
+ used.Update(t.Vars())
+ }
+
+ if rule.Head.Key != nil {
+ used.Update(rule.Head.Key.Vars())
+ }
+
+ if rule.Head.Value != nil {
+ valueVars := rule.Head.Value.Vars()
+ used.Update(valueVars)
+ for arg := range unusedArgs {
+ if valueVars.Contains(arg) {
+ delete(unusedArgs, arg)
+ }
+ }
+ }
+ }
+
+ stack := argsStack.Copy()
+
+ body, declared, errs := rewriteLocalVars(gen, stack, used, rule.Body, c.strict)
+
+ // For rewritten vars use the collection of all variables that
+ // were in the stack at some point in time.
+ maps.Copy(c.RewrittenVars, stack.rewritten)
+
+ rule.Body = body
+
+ if onlyScalars {
+ return stack, errs
+ }
+
+ // Rewrite vars in head that refer to locally declared vars in the body.
+ localXform := rewriteHeadVarLocalTransform{declared: declared}
+
+ for i := range rule.Head.Args {
+ rule.Head.Args[i], _ = transformTerm(localXform, rule.Head.Args[i])
+ }
+
+ for i := 1; i < len(rule.Head.Ref()); i++ {
+ rule.Head.Reference[i], _ = transformTerm(localXform, rule.Head.Ref()[i])
+ }
+ if rule.Head.Key != nil {
+ rule.Head.Key, _ = transformTerm(localXform, rule.Head.Key)
+ }
+
+ if rule.Head.Value != nil {
+ rule.Head.Value, _ = transformTerm(localXform, rule.Head.Value)
+ }
+ return stack, errs
+}
+
+func headMayHaveVars(head *Head) bool {
+ if head == nil {
+ return false
+ }
+ for i := range head.Args {
+ if !IsScalar(head.Args[i].Value) {
+ return true
+ }
+ }
+ if head.Key != nil && !IsScalar(head.Key.Value) {
+ return true
+ }
+ if head.Value != nil && !IsScalar(head.Value.Value) {
+ return true
+ }
+ ref := head.Ref()[1:]
+ for i := range ref {
+ if !IsScalar(ref[i].Value) {
+ return true
+ }
+ }
+ return false
+}
+
+type rewriteNestedHeadVarLocalTransform struct {
+ gen *localVarGenerator
+ errs Errors
+ RewrittenVars map[Var]Var
+ strict bool
+}
+
+func (xform *rewriteNestedHeadVarLocalTransform) Visit(x any) bool {
+ if term, ok := x.(*Term); ok {
+ stop := false
+ stack := newLocalDeclaredVars()
+
+ switch x := term.Value.(type) {
+ case *object:
+ cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) {
+ kcpy := k.Copy()
+ NewGenericVisitor(xform.Visit).Walk(kcpy)
+ vcpy := v.Copy()
+ NewGenericVisitor(xform.Visit).Walk(vcpy)
+ return kcpy, vcpy, nil
+ })
+ term.Value = cpy
+ stop = true
+ case *set:
+ cpy, _ := x.Map(func(v *Term) (*Term, error) {
+ vcpy := v.Copy()
+ NewGenericVisitor(xform.Visit).Walk(vcpy)
+ return vcpy, nil
+ })
+ term.Value = cpy
+ stop = true
+ case *ArrayComprehension:
+ xform.errs = rewriteDeclaredVarsInArrayComprehension(xform.gen, stack, x, xform.errs, xform.strict)
+ stop = true
+ case *SetComprehension:
+ xform.errs = rewriteDeclaredVarsInSetComprehension(xform.gen, stack, x, xform.errs, xform.strict)
+ stop = true
+ case *ObjectComprehension:
+ xform.errs = rewriteDeclaredVarsInObjectComprehension(xform.gen, stack, x, xform.errs, xform.strict)
+ stop = true
+ }
+
+ maps.Copy(xform.RewrittenVars, stack.rewritten)
+
+ return stop
+ }
+
+ return false
+}
+
+type rewriteHeadVarLocalTransform struct {
+ declared map[Var]Var
+}
+
+func (xform rewriteHeadVarLocalTransform) Transform(x any) (any, error) {
+ if v, ok := x.(Var); ok {
+ if gv, ok := xform.declared[v]; ok {
+ return gv, nil
+ }
+ }
+ return x, nil
+}
+
+func (c *Compiler) rewriteLocalArgVars(gen *localVarGenerator, stack *localDeclaredVars, rule *Rule) {
+
+ vis := &ruleArgLocalRewriter{
+ stack: stack,
+ gen: gen,
+ }
+
+ for i := range rule.Head.Args {
+ Walk(vis, rule.Head.Args[i])
+ }
+
+ for i := range vis.errs {
+ c.err(vis.errs[i])
+ }
+}
+
+type ruleArgLocalRewriter struct {
+ stack *localDeclaredVars
+ gen *localVarGenerator
+ errs []*Error
+}
+
+func (vis *ruleArgLocalRewriter) Visit(x any) Visitor {
+
+ t, ok := x.(*Term)
+ if !ok {
+ return vis
+ }
+
+ switch v := t.Value.(type) {
+ case Var:
+ gv, ok := vis.stack.Declared(v)
+ if ok {
+ vis.stack.Seen(v)
+ } else {
+ gv = vis.gen.Generate()
+ vis.stack.Insert(v, gv, argVar)
+ }
+ t.Value = gv
+ return nil
+ case *object:
+ if cpy, err := v.Map(func(k, v *Term) (*Term, *Term, error) {
+ vcpy := v.Copy()
+ Walk(vis, vcpy)
+ return k, vcpy, nil
+ }); err != nil {
+ vis.errs = append(vis.errs, NewError(CompileErr, t.Location, "%s", err.Error()))
+ } else {
+ t.Value = cpy
+ }
+ return nil
+ case Null, Boolean, Number, String, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Set:
+ // Scalars are no-ops. Comprehensions are handled above. Sets must not
+ // contain variables.
+ return nil
+ case Call:
+ vis.errs = append(vis.errs, NewError(CompileErr, t.Location, "rule arguments cannot contain calls"))
+ return nil
+ default:
+ // Recurse on refs and arrays. Any embedded
+ // variables can be rewritten.
+ return vis
+ }
+}
+
+func (c *Compiler) rewriteWithModifiers() {
+ f := newEqualityFactory(c.localvargen)
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+ t := NewGenericTransformer(func(x any) (any, error) {
+ body, ok := x.(Body)
+ if !ok {
+ return x, nil
+ }
+ body, err := rewriteWithModifiersInBody(c, c.unsafeBuiltinsMap, f, body)
+ if err != nil {
+ c.err(err)
+ }
+
+ return body, nil
+ })
+ _, _ = Transform(t, mod) // ignore error
+ }
+}
+
+func (c *Compiler) setModuleTree() {
+ c.ModuleTree = NewModuleTree(c.Modules)
+}
+
+func (c *Compiler) setRuleTree() {
+ c.RuleTree = NewRuleTree(c.ModuleTree)
+}
+
+func (c *Compiler) setGraph() {
+ list := func(r Ref) []*Rule {
+ return c.GetRulesDynamicWithOpts(r, RulesOptions{IncludeHiddenModules: true})
+ }
+ c.Graph = NewGraph(c.Modules, list)
+}
+
+type queryCompiler struct {
+ compiler *Compiler
+ qctx *QueryContext
+ typeEnv *TypeEnv
+ rewritten map[Var]Var
+ after map[string][]QueryCompilerStageDefinition
+ unsafeBuiltins map[string]struct{}
+ comprehensionIndices map[*Term]*ComprehensionIndex
+ enablePrintStatements bool
+}
+
+func newQueryCompiler(compiler *Compiler) QueryCompiler {
+ qc := &queryCompiler{
+ compiler: compiler,
+ qctx: nil,
+ after: map[string][]QueryCompilerStageDefinition{},
+ comprehensionIndices: map[*Term]*ComprehensionIndex{},
+ }
+ return qc
+}
+
+func (qc *queryCompiler) WithStrict(strict bool) QueryCompiler {
+ qc.compiler.WithStrict(strict)
+ return qc
+}
+
+func (qc *queryCompiler) WithEnablePrintStatements(yes bool) QueryCompiler {
+ qc.enablePrintStatements = yes
+ return qc
+}
+
+func (qc *queryCompiler) WithContext(qctx *QueryContext) QueryCompiler {
+ qc.qctx = qctx
+ return qc
+}
+
+func (qc *queryCompiler) WithStageAfter(after string, stage QueryCompilerStageDefinition) QueryCompiler {
+ qc.after[after] = append(qc.after[after], stage)
+ return qc
+}
+
+func (qc *queryCompiler) WithUnsafeBuiltins(unsafe map[string]struct{}) QueryCompiler {
+ qc.unsafeBuiltins = unsafe
+ return qc
+}
+
+func (qc *queryCompiler) RewrittenVars() map[Var]Var {
+ return qc.rewritten
+}
+
+func (qc *queryCompiler) ComprehensionIndex(term *Term) *ComprehensionIndex {
+ if result, ok := qc.comprehensionIndices[term]; ok {
+ return result
+ } else if result, ok := qc.compiler.comprehensionIndices[term]; ok {
+ return result
+ }
+ return nil
+}
+
+func (qc *queryCompiler) runStage(metricName string, qctx *QueryContext, query Body, s func(*QueryContext, Body) (Body, error)) (Body, error) {
+ if qc.compiler.metrics != nil {
+ qc.compiler.metrics.Timer(metricName).Start()
+ defer qc.compiler.metrics.Timer(metricName).Stop()
+ }
+ return s(qctx, query)
+}
+
+func (qc *queryCompiler) runStageAfter(metricName string, query Body, s QueryCompilerStage) (Body, error) {
+ if qc.compiler.metrics != nil {
+ qc.compiler.metrics.Timer(metricName).Start()
+ defer qc.compiler.metrics.Timer(metricName).Stop()
+ }
+ return s(qc, query)
+}
+
+type queryStage = struct {
+ name string
+ metricName string
+ f func(*QueryContext, Body) (Body, error)
+}
+
+func (qc *queryCompiler) Compile(query Body) (Body, error) {
+ if len(query) == 0 {
+ return nil, Errors{NewError(CompileErr, nil, "empty query cannot be compiled")}
+ }
+
+ query = query.Copy()
+
+ stages := []queryStage{
+ {"CheckKeywordOverrides", "query_compile_stage_check_keyword_overrides", qc.checkKeywordOverrides},
+ {"ResolveRefs", "query_compile_stage_resolve_refs", qc.resolveRefs},
+ {"RewriteLocalVars", "query_compile_stage_rewrite_local_vars", qc.rewriteLocalVars},
+ {"CheckVoidCalls", "query_compile_stage_check_void_calls", qc.checkVoidCalls},
+ {"RewritePrintCalls", "query_compile_stage_rewrite_print_calls", qc.rewritePrintCalls},
+ {"RewriteExprTerms", "query_compile_stage_rewrite_expr_terms", qc.rewriteExprTerms},
+ {"RewriteComprehensionTerms", "query_compile_stage_rewrite_comprehension_terms", qc.rewriteComprehensionTerms},
+ {"RewriteWithValues", "query_compile_stage_rewrite_with_values", qc.rewriteWithModifiers},
+ {"CheckUndefinedFuncs", "query_compile_stage_check_undefined_funcs", qc.checkUndefinedFuncs},
+ {"CheckSafety", "query_compile_stage_check_safety", qc.checkSafety},
+ {"RewriteDynamicTerms", "query_compile_stage_rewrite_dynamic_terms", qc.rewriteDynamicTerms},
+ {"CheckTypes", "query_compile_stage_check_types", qc.checkTypes},
+ {"CheckUnsafeBuiltins", "query_compile_stage_check_unsafe_builtins", qc.checkUnsafeBuiltins},
+ {"CheckDeprecatedBuiltins", "query_compile_stage_check_deprecated_builtins", qc.checkDeprecatedBuiltins},
+ }
+ if qc.compiler.evalMode == EvalModeTopdown {
+ stages = append(stages, queryStage{"BuildComprehensionIndex", "query_compile_stage_build_comprehension_index", qc.buildComprehensionIndices})
+ }
+
+ qctx := qc.qctx.Copy()
+
+ for _, s := range stages {
+ var err error
+ query, err = qc.runStage(s.metricName, qctx, query, s.f)
+ if err != nil {
+ return nil, qc.applyErrorLimit(err)
+ }
+ for _, s := range qc.after[s.name] {
+ query, err = qc.runStageAfter(s.MetricName, query, s.Stage)
+ if err != nil {
+ return nil, qc.applyErrorLimit(err)
+ }
+ }
+ }
+
+ return query, nil
+}
+
+func (qc *queryCompiler) TypeEnv() *TypeEnv {
+ return qc.typeEnv
+}
+
+func (qc *queryCompiler) applyErrorLimit(err error) error {
+ var errs Errors
+ if errors.As(err, &errs) {
+ if qc.compiler.maxErrs > 0 && len(errs) > qc.compiler.maxErrs {
+ err = append(errs[:qc.compiler.maxErrs], errLimitReached)
+ }
+ }
+ return err
+}
+
+func (qc *queryCompiler) checkKeywordOverrides(_ *QueryContext, body Body) (Body, error) {
+ if qc.compiler.strict {
+ if errs := checkRootDocumentOverrides(body); len(errs) > 0 {
+ return nil, errs
+ }
+ }
+ return body, nil
+}
+
+func (qc *queryCompiler) resolveRefs(qctx *QueryContext, body Body) (Body, error) {
+
+ var globals map[Var]*usedRef
+
+ if qctx != nil {
+ pkg := qctx.Package
+ // Query compiler ought to generate a package if one was not provided and one or more imports were provided.
+ // The generated package name could even be an empty string to avoid conflicts (it doesn't have to be valid syntactically)
+ if pkg == nil && len(qctx.Imports) > 0 {
+ pkg = &Package{Path: RefTerm(VarTerm("")).Value.(Ref)}
+ }
+ if pkg != nil {
+ var ruleExports []Ref
+ rules := qc.compiler.getExports()
+ if exist, ok := rules.Get(pkg.Path); ok {
+ ruleExports = exist
+ }
+
+ globals = getGlobals(qctx.Package, ruleExports, qctx.Imports)
+ qctx.Imports = nil
+ }
+ }
+
+ ignore := &declaredVarStack{declaredVars(body)}
+
+ return resolveRefsInBody(globals, ignore, body), nil
+}
+
+func (*queryCompiler) rewriteComprehensionTerms(_ *QueryContext, body Body) (Body, error) {
+ gen := newLocalVarGenerator("q", body)
+ f := newEqualityFactory(gen)
+ node, err := rewriteComprehensionTerms(f, body)
+ if err != nil {
+ return nil, err
+ }
+ return node.(Body), nil
+}
+
+func (*queryCompiler) rewriteDynamicTerms(_ *QueryContext, body Body) (Body, error) {
+ gen := newLocalVarGenerator("q", body)
+ f := newEqualityFactory(gen)
+ return rewriteDynamics(f, body), nil
+}
+
+func (*queryCompiler) rewriteExprTerms(_ *QueryContext, body Body) (Body, error) {
+ gen := newLocalVarGenerator("q", body)
+ return rewriteExprTermsInBody(gen, body), nil
+}
+
+func (qc *queryCompiler) rewriteLocalVars(_ *QueryContext, body Body) (Body, error) {
+ gen := newLocalVarGenerator("q", body)
+ stack := newLocalDeclaredVars()
+ body, _, err := rewriteLocalVars(gen, stack, nil, body, qc.compiler.strict)
+ if len(err) != 0 {
+ return nil, err
+ }
+
+ // The vars returned during the rewrite will include all seen vars,
+ // even if they're not declared with an assignment operation. We don't
+ // want to include these inside the rewritten set though.
+ qc.rewritten = maps.Clone(stack.rewritten)
+
+ return body, nil
+}
+
+func (qc *queryCompiler) rewritePrintCalls(_ *QueryContext, body Body) (Body, error) {
+ if !qc.enablePrintStatements {
+ _, cpy := erasePrintCallsInBody(body)
+ return cpy, nil
+ }
+ gen := newLocalVarGenerator("q", body)
+ if _, errs := rewritePrintCalls(gen, qc.compiler.GetArity, ReservedVars, body); len(errs) > 0 {
+ return nil, errs
+ }
+ return body, nil
+}
+
+func (qc *queryCompiler) checkVoidCalls(_ *QueryContext, body Body) (Body, error) {
+ if errs := checkVoidCalls(qc.compiler.TypeEnv, body); len(errs) > 0 {
+ return nil, errs
+ }
+ return body, nil
+}
+
+func (qc *queryCompiler) checkUndefinedFuncs(_ *QueryContext, body Body) (Body, error) {
+ if errs := checkUndefinedFuncs(qc.compiler.TypeEnv, body, qc.compiler.GetArity, qc.rewritten); len(errs) > 0 {
+ return nil, errs
+ }
+ return body, nil
+}
+
+func (qc *queryCompiler) checkSafety(_ *QueryContext, body Body) (Body, error) {
+ safe := ReservedVars.Copy()
+ reordered, unsafe := reorderBodyForSafety(qc.compiler.builtins, qc.compiler.GetArity, safe, body)
+ if errs := safetyErrorSlice(unsafe, qc.RewrittenVars()); len(errs) > 0 {
+ return nil, errs
+ }
+ return reordered, nil
+}
+
+func (qc *queryCompiler) checkTypes(_ *QueryContext, body Body) (Body, error) {
+ var errs Errors
+ checker := newTypeChecker().
+ WithSchemaSet(qc.compiler.schemaSet).
+ WithInputType(qc.compiler.inputType).
+ WithVarRewriter(rewriteVarsInRef(qc.rewritten, qc.compiler.RewrittenVars))
+ qc.typeEnv, errs = checker.CheckBody(qc.compiler.TypeEnv, body)
+ if len(errs) > 0 {
+ return nil, errs
+ }
+
+ return body, nil
+}
+
+func (qc *queryCompiler) checkUnsafeBuiltins(_ *QueryContext, body Body) (Body, error) {
+ errs := checkUnsafeBuiltins(qc.unsafeBuiltinsMap(), body)
+ if len(errs) > 0 {
+ return nil, errs
+ }
+ return body, nil
+}
+
+func (qc *queryCompiler) unsafeBuiltinsMap() map[string]struct{} {
+ if qc.unsafeBuiltins != nil {
+ return qc.unsafeBuiltins
+ }
+ return qc.compiler.unsafeBuiltinsMap
+}
+
+func (qc *queryCompiler) checkDeprecatedBuiltins(_ *QueryContext, body Body) (Body, error) {
+ if qc.compiler.strict {
+ errs := checkDeprecatedBuiltins(qc.compiler.deprecatedBuiltinsMap, body)
+ if len(errs) > 0 {
+ return nil, errs
+ }
+ }
+ return body, nil
+}
+
+func (qc *queryCompiler) rewriteWithModifiers(_ *QueryContext, body Body) (Body, error) {
+ f := newEqualityFactory(newLocalVarGenerator("q", body))
+ body, err := rewriteWithModifiersInBody(qc.compiler, qc.unsafeBuiltinsMap(), f, body)
+ if err != nil {
+ return nil, Errors{err}
+ }
+ return body, nil
+}
+
+func (qc *queryCompiler) buildComprehensionIndices(_ *QueryContext, body Body) (Body, error) {
+ // NOTE(tsandall): The query compiler does not have a metrics object so we
+ // cannot record index metrics currently.
+ _ = buildComprehensionIndices(qc.compiler.debug, qc.compiler.GetArity, ReservedVars, qc.RewrittenVars(), body, qc.comprehensionIndices)
+ return body, nil
+}
+
+// ComprehensionIndex specifies how the comprehension term can be indexed. The keys
+// tell the evaluator what variables to use for indexing. In the future, the index
+// could be expanded with more information that would allow the evaluator to index
+// a larger fragment of comprehensions (e.g., by closing over variables in the outer
+// query.)
+type ComprehensionIndex struct {
+ Term *Term
+ Keys []*Term
+}
+
+func (ci *ComprehensionIndex) String() string {
+ if ci == nil {
+ return ""
+ }
+ return fmt.Sprintf("", NewArray(ci.Keys...))
+}
+
+func buildComprehensionIndices(dbg debug.Debug, arity func(Ref) int, candidates VarSet, rwVars map[Var]Var, node Body, result map[*Term]*ComprehensionIndex) uint64 {
+ var n uint64
+ cpy := candidates.Copy()
+ WalkBodies(node, func(b Body) bool {
+ for _, expr := range b {
+ index := getComprehensionIndex(dbg, arity, cpy, rwVars, expr)
+ if index != nil {
+ result[index.Term] = index
+ n++
+ }
+ // Any variables appearing in the expressions leading up to the comprehension
+ // are fair-game to be used as index keys.
+ cpy.Update(expr.Vars(VarVisitorParams{SkipClosures: true, SkipRefCallHead: true}))
+ }
+ return false
+ })
+ return n
+}
+
+func getComprehensionIndex(dbg debug.Debug, arity func(Ref) int, candidates VarSet, rwVars map[Var]Var, expr *Expr) *ComprehensionIndex {
+
+ // Ignore everything except = expressions. Extract
+ // the comprehension term from the expression.
+ if !expr.IsEquality() || expr.Negated || len(expr.With) > 0 {
+ // No debug message, these are assumed to be known hinderances
+ // to comprehension indexing.
+ return nil
+ }
+
+ var term *Term
+
+ lhs, rhs := expr.Operand(0), expr.Operand(1)
+
+ if _, ok := lhs.Value.(Var); ok && IsComprehension(rhs.Value) {
+ term = rhs
+ } else if _, ok := rhs.Value.(Var); ok && IsComprehension(lhs.Value) {
+ term = lhs
+ }
+
+ if term == nil {
+ // no debug for this, it's the ordinary "nothing to do here" case
+ return nil
+ }
+
+ // Ignore comprehensions that contain expressions that close over variables
+ // in the outer body if those variables are not also output variables in the
+ // comprehension body. In other words, ignore comprehensions that we cannot
+ // safely evaluate without bindings from the outer body. For example:
+ //
+ // x = [1]
+ // [true | data.y[z] = x] # safe to evaluate w/o outer body
+ // [true | data.y[z] = x[0]] # NOT safe to evaluate because 'x' would be unsafe.
+ //
+ // By identifying output variables in the body we also know what to index on by
+ // intersecting with candidate variables from the outer query.
+ //
+ // For example:
+ //
+ // x = data.foo[_]
+ // _ = [y | data.bar[y] = x] # index on 'x'
+ //
+ // This query goes from O(data.foo*data.bar) to O(data.foo+data.bar).
+ var body Body
+
+ switch x := term.Value.(type) {
+ case *ArrayComprehension:
+ body = x.Body
+ case *SetComprehension:
+ body = x.Body
+ case *ObjectComprehension:
+ body = x.Body
+ }
+
+ outputs := outputVarsForBody(body, arity, ReservedVars)
+ unsafe := body.Vars(SafetyCheckVisitorParams).Diff(outputs).Diff(ReservedVars)
+
+ if len(unsafe) > 0 {
+ dbg.Printf("%s: comprehension index: unsafe vars: %v", expr.Location, unsafe)
+ return nil
+ }
+
+ // Similarly, ignore comprehensions that contain references with output variables
+ // that intersect with the candidates. Indexing these comprehensions could worsen
+ // performance.
+ regressionVis := newComprehensionIndexRegressionCheckVisitor(candidates)
+ regressionVis.Walk(body)
+ if regressionVis.worse {
+ dbg.Printf("%s: comprehension index: output vars intersect candidates", expr.Location)
+ return nil
+ }
+
+ // Check if any nested comprehensions close over candidates. If any intersection is found
+ // the comprehension cannot be cached because it would require closing over the candidates
+ // which the evaluator does not support today.
+ nestedVis := newComprehensionIndexNestedCandidateVisitor(candidates)
+ nestedVis.Walk(body)
+ if nestedVis.found {
+ dbg.Printf("%s: comprehension index: nested comprehensions close over candidates", expr.Location)
+ return nil
+ }
+
+ // Make a sorted set of variable names that will serve as the index key set.
+ // Sort to ensure deterministic indexing. In future this could be relaxed
+ // if we can decide that one ordering is better than another. If the set is
+ // empty, there is no indexing to do.
+ indexVars := candidates.Intersect(outputs)
+ if len(indexVars) == 0 {
+ dbg.Printf("%s: comprehension index: no index vars", expr.Location)
+ return nil
+ }
+
+ result := make([]*Term, 0, len(indexVars))
+
+ for v := range indexVars {
+ result = append(result, NewTerm(v))
+ }
+
+ slices.SortFunc(result, TermValueCompare)
+
+ debugRes := make([]*Term, len(result))
+ for i, r := range result {
+ if o, ok := rwVars[r.Value.(Var)]; ok {
+ debugRes[i] = NewTerm(o)
+ } else {
+ debugRes[i] = r
+ }
+ }
+ dbg.Printf("%s: comprehension index: built with keys: %v", expr.Location, debugRes)
+ return &ComprehensionIndex{Term: term, Keys: result}
+}
+
+type comprehensionIndexRegressionCheckVisitor struct {
+ candidates VarSet
+ seen VarSet
+ worse bool
+}
+
+// TODO(tsandall): Improve this so that users can either supply this list explicitly
+// or the information is maintained on the built-in function declaration. What we really
+// need to know is whether the built-in function allows callers to push down output
+// values or not. It's unlikely that anything outside of OPA does this today so this
+// solution is fine for now.
+var comprehensionIndexBlacklist = map[string]int{
+ WalkBuiltin.Name: len(WalkBuiltin.Decl.FuncArgs().Args),
+}
+
+func newComprehensionIndexRegressionCheckVisitor(candidates VarSet) *comprehensionIndexRegressionCheckVisitor {
+ return &comprehensionIndexRegressionCheckVisitor{
+ candidates: candidates,
+ seen: NewVarSet(),
+ }
+}
+
+func (vis *comprehensionIndexRegressionCheckVisitor) Walk(x any) {
+ NewGenericVisitor(vis.visit).Walk(x)
+}
+
+func (vis *comprehensionIndexRegressionCheckVisitor) visit(x any) bool {
+ if !vis.worse {
+ switch x := x.(type) {
+ case *Expr:
+ operands := x.Operands()
+ if pos := comprehensionIndexBlacklist[x.Operator().String()]; pos > 0 && pos < len(operands) {
+ vis.assertEmptyIntersection(operands[pos].Vars())
+ }
+ case Ref:
+ vis.assertEmptyIntersection(x.OutputVars())
+ case Var:
+ vis.seen.Add(x)
+ // Always skip comprehensions. We do not have to visit their bodies here.
+ case *ArrayComprehension, *SetComprehension, *ObjectComprehension:
+ return true
+ }
+ }
+ return vis.worse
+}
+
+func (vis *comprehensionIndexRegressionCheckVisitor) assertEmptyIntersection(vs VarSet) {
+ for v := range vs {
+ if vis.candidates.Contains(v) && !vis.seen.Contains(v) {
+ vis.worse = true
+ return
+ }
+ }
+}
+
+type comprehensionIndexNestedCandidateVisitor struct {
+ candidates VarSet
+ found bool
+}
+
+func newComprehensionIndexNestedCandidateVisitor(candidates VarSet) *comprehensionIndexNestedCandidateVisitor {
+ return &comprehensionIndexNestedCandidateVisitor{
+ candidates: candidates,
+ }
+}
+
+func (vis *comprehensionIndexNestedCandidateVisitor) Walk(x any) {
+ NewGenericVisitor(vis.visit).Walk(x)
+}
+
+func (vis *comprehensionIndexNestedCandidateVisitor) visit(x any) bool {
+ if vis.found {
+ return true
+ }
+
+ if v, ok := x.(Value); ok && IsComprehension(v) {
+ varVis := NewVarVisitor().WithParams(VarVisitorParams{SkipRefHead: true})
+ varVis.Walk(v)
+ vis.found = len(varVis.Vars().Intersect(vis.candidates)) > 0
+ return true
+ }
+
+ return false
+}
+
+// ModuleTreeNode represents a node in the module tree. The module
+// tree is keyed by the package path.
+type ModuleTreeNode struct {
+ Key Value
+ Modules []*Module
+ Children map[Value]*ModuleTreeNode
+ Hide bool
+}
+
+func (n *ModuleTreeNode) String() string {
+ var rules []string
+ for _, m := range n.Modules {
+ for _, r := range m.Rules {
+ rules = append(rules, r.Head.String())
+ }
+ }
+ return fmt.Sprintf("", n.Key, n.Children, rules, n.Hide)
+}
+
+// NewModuleTree returns a new ModuleTreeNode that represents the root
+// of the module tree populated with the given modules.
+func NewModuleTree(mods map[string]*Module) *ModuleTreeNode {
+ root := &ModuleTreeNode{
+ Children: map[Value]*ModuleTreeNode{},
+ }
+ for _, name := range util.KeysSorted(mods) {
+ m := mods[name]
+ node := root
+ for i, x := range m.Package.Path {
+ c, ok := node.Children[x.Value]
+ if !ok {
+ var hide bool
+ if i == 1 && x.Value.Compare(SystemDocumentKey) == 0 {
+ hide = true
+ }
+ c = &ModuleTreeNode{
+ Key: x.Value,
+ Children: map[Value]*ModuleTreeNode{},
+ Hide: hide,
+ }
+ node.Children[x.Value] = c
+ }
+ node = c
+ }
+ node.Modules = append(node.Modules, m)
+ }
+ return root
+}
+
+// Size returns the number of modules in the tree.
+func (n *ModuleTreeNode) Size() int {
+ s := len(n.Modules)
+ for _, c := range n.Children {
+ s += c.Size()
+ }
+ return s
+}
+
+// Child returns n's child with key k.
+func (n *ModuleTreeNode) child(k Value) *ModuleTreeNode {
+ switch k.(type) {
+ case String, Var:
+ return n.Children[k]
+ }
+ return nil
+}
+
+// Find dereferences ref along the tree. ref[0] is converted to a String
+// for convenience.
+func (n *ModuleTreeNode) find(ref Ref) (*ModuleTreeNode, Ref) {
+ if v, ok := ref[0].Value.(Var); ok {
+ ref = Ref{StringTerm(string(v))}.Concat(ref[1:])
+ }
+ node := n
+ for i, r := range ref {
+ next := node.child(r.Value)
+ if next == nil {
+ tail := make(Ref, len(ref)-i)
+ tail[0] = VarTerm(string(ref[i].Value.(String)))
+ copy(tail[1:], ref[i+1:])
+ return node, tail
+ }
+ node = next
+ }
+ return node, nil
+}
+
+// DepthFirst performs a depth-first traversal of the module tree rooted at n.
+// If f returns true, traversal will not continue to the children of n.
+func (n *ModuleTreeNode) DepthFirst(f func(*ModuleTreeNode) bool) {
+ if f(n) {
+ return
+ }
+ for _, node := range n.Children {
+ node.DepthFirst(f)
+ }
+}
+
+// TreeNode represents a node in the rule tree. The rule tree is keyed by
+// rule path.
+type TreeNode struct {
+ Key Value
+ Values []any
+ Children map[Value]*TreeNode
+ Sorted []Value
+ Hide bool
+}
+
+func (n *TreeNode) String() string {
+ return fmt.Sprintf("", n.Key, n.Values, n.Sorted, n.Hide)
+}
+
+// NewRuleTree returns a new TreeNode that represents the root
+// of the rule tree populated with the given rules.
+func NewRuleTree(mtree *ModuleTreeNode) *TreeNode {
+ root := TreeNode{
+ Key: mtree.Key,
+ }
+
+ mtree.DepthFirst(func(m *ModuleTreeNode) bool {
+ for _, mod := range m.Modules {
+ if len(mod.Rules) == 0 {
+ root.add(mod.Package.Path, nil)
+ }
+ for _, rule := range mod.Rules {
+ root.add(rule.Ref().GroundPrefix(), rule)
+ }
+ }
+ return false
+ })
+
+ // ensure that data.system's TreeNode is hidden
+ node, tail := root.find(DefaultRootRef.Append(NewTerm(SystemDocumentKey)))
+ if len(tail) == 0 { // found
+ node.Hide = true
+ }
+
+ root.DepthFirst(func(x *TreeNode) bool {
+ x.sort()
+ return false
+ })
+
+ return &root
+}
+
+func (n *TreeNode) add(path Ref, rule *Rule) {
+ node, tail := n.find(path)
+ if len(tail) > 0 {
+ sub := treeNodeFromRef(tail, rule)
+ if node.Children == nil {
+ node.Children = make(map[Value]*TreeNode, 1)
+ }
+ node.Children[sub.Key] = sub
+ node.Sorted = append(node.Sorted, sub.Key)
+ } else if rule != nil {
+ node.Values = append(node.Values, rule)
+ }
+}
+
+// Size returns the number of rules in the tree.
+func (n *TreeNode) Size() int {
+ s := len(n.Values)
+ for _, c := range n.Children {
+ s += c.Size()
+ }
+ return s
+}
+
+// Child returns n's child with key k.
+func (n *TreeNode) Child(k Value) *TreeNode {
+ switch k.(type) {
+ case Ref, Call:
+ return nil
+ default:
+ return n.Children[k]
+ }
+}
+
+// Find dereferences ref along the tree
+func (n *TreeNode) Find(ref Ref) *TreeNode {
+ node := n
+ for _, r := range ref {
+ node = node.Child(r.Value)
+ if node == nil {
+ return nil
+ }
+ }
+ return node
+}
+
+// Iteratively dereferences ref along the node's subtree.
+// - If matching fails immediately, the tail will contain the full ref.
+// - Partial matching will result in a tail of non-zero length.
+// - A complete match will result in a 0 length tail.
+func (n *TreeNode) find(ref Ref) (*TreeNode, Ref) {
+ node := n
+ for i := range ref {
+ next := node.Child(ref[i].Value)
+ if next == nil {
+ tail := make(Ref, len(ref)-i)
+ copy(tail, ref[i:])
+ return node, tail
+ }
+ node = next
+ }
+ return node, nil
+}
+
+// DepthFirst performs a depth-first traversal of the rule tree rooted at n. If
+// f returns true, traversal will not continue to the children of n.
+func (n *TreeNode) DepthFirst(f func(*TreeNode) bool) {
+ if f(n) {
+ return
+ }
+ for _, node := range n.Children {
+ node.DepthFirst(f)
+ }
+}
+
+func (n *TreeNode) sort() {
+ slices.SortFunc(n.Sorted, Value.Compare)
+}
+
+func treeNodeFromRef(ref Ref, rule *Rule) *TreeNode {
+ depth := len(ref) - 1
+ key := ref[depth].Value
+ node := &TreeNode{
+ Key: key,
+ Children: nil,
+ }
+ if rule != nil {
+ node.Values = []any{rule}
+ }
+
+ for i := len(ref) - 2; i >= 0; i-- {
+ key := ref[i].Value
+ node = &TreeNode{
+ Key: key,
+ Children: map[Value]*TreeNode{ref[i+1].Value: node},
+ Sorted: []Value{ref[i+1].Value},
+ }
+ }
+ return node
+}
+
+// flattenChildren flattens all children's rule refs into a sorted array.
+func (n *TreeNode) flattenChildren() []Ref {
+ ret := newRefSet()
+ for _, sub := range n.Children { // we only want the children, so don't use n.DepthFirst() right away
+ sub.DepthFirst(func(x *TreeNode) bool {
+ for _, r := range x.Values {
+ rule := r.(*Rule)
+ ret.AddPrefix(rule.Ref())
+ }
+ return false
+ })
+ }
+
+ slices.SortFunc(ret.s, RefCompare)
+ return ret.s
+}
+
+// Graph represents the graph of dependencies between rules.
+type Graph struct {
+ adj map[util.T]map[util.T]struct{}
+ radj map[util.T]map[util.T]struct{}
+ nodes map[util.T]struct{}
+ sorted []util.T
+}
+
+// NewGraph returns a new Graph based on modules. The list function must return
+// the rules referred to directly by the ref.
+func NewGraph(modules map[string]*Module, list func(Ref) []*Rule) *Graph {
+
+ graph := &Graph{
+ adj: map[util.T]map[util.T]struct{}{},
+ radj: map[util.T]map[util.T]struct{}{},
+ nodes: map[util.T]struct{}{},
+ sorted: nil,
+ }
+
+ // Create visitor to walk a rule AST and add edges to the rule graph for
+ // each dependency.
+ vis := func(a *Rule) *GenericVisitor {
+ stop := false
+ return NewGenericVisitor(func(x any) bool {
+ switch x := x.(type) {
+ case Ref:
+ for _, b := range list(x) {
+ for node := b; node != nil; node = node.Else {
+ graph.addDependency(a, node)
+ }
+ }
+ case *Rule:
+ if stop {
+ // Do not recurse into else clauses (which will be handled
+ // by the outer visitor.)
+ return true
+ }
+ stop = true
+ }
+ return false
+ })
+ }
+
+ // Walk over all rules, add them to graph, and build adjacency lists.
+ for _, module := range modules {
+ WalkRules(module, func(a *Rule) bool {
+ graph.addNode(a)
+ vis(a).Walk(a)
+ return false
+ })
+ }
+
+ return graph
+}
+
+// Dependencies returns the set of rules that x depends on.
+func (g *Graph) Dependencies(x util.T) map[util.T]struct{} {
+ return g.adj[x]
+}
+
+// Dependents returns the set of rules that depend on x.
+func (g *Graph) Dependents(x util.T) map[util.T]struct{} {
+ return g.radj[x]
+}
+
+// Sort returns a slice of rules sorted by dependencies. If a cycle is found,
+// ok is set to false.
+func (g *Graph) Sort() (sorted []util.T, ok bool) {
+ if g.sorted != nil {
+ return g.sorted, true
+ }
+
+ sorter := &graphSort{
+ sorted: make([]util.T, 0, len(g.nodes)),
+ deps: g.Dependencies,
+ marked: map[util.T]struct{}{},
+ temp: map[util.T]struct{}{},
+ }
+
+ for node := range g.nodes {
+ if !sorter.Visit(node) {
+ return nil, false
+ }
+ }
+
+ g.sorted = sorter.sorted
+ return g.sorted, true
+}
+
+func (g *Graph) addDependency(u util.T, v util.T) {
+
+ if _, ok := g.nodes[u]; !ok {
+ g.addNode(u)
+ }
+
+ if _, ok := g.nodes[v]; !ok {
+ g.addNode(v)
+ }
+
+ edges, ok := g.adj[u]
+ if !ok {
+ edges = map[util.T]struct{}{}
+ g.adj[u] = edges
+ }
+
+ edges[v] = struct{}{}
+
+ edges, ok = g.radj[v]
+ if !ok {
+ edges = map[util.T]struct{}{}
+ g.radj[v] = edges
+ }
+
+ edges[u] = struct{}{}
+}
+
+func (g *Graph) addNode(n util.T) {
+ g.nodes[n] = struct{}{}
+}
+
+type graphSort struct {
+ sorted []util.T
+ deps func(util.T) map[util.T]struct{}
+ marked map[util.T]struct{}
+ temp map[util.T]struct{}
+}
+
+func (sort *graphSort) Marked(node util.T) bool {
+ _, marked := sort.marked[node]
+ return marked
+}
+
+func (sort *graphSort) Visit(node util.T) (ok bool) {
+ if _, ok := sort.temp[node]; ok {
+ return false
+ }
+ if sort.Marked(node) {
+ return true
+ }
+ sort.temp[node] = struct{}{}
+ for other := range sort.deps(node) {
+ if !sort.Visit(other) {
+ return false
+ }
+ }
+ sort.marked[node] = struct{}{}
+ delete(sort.temp, node)
+ sort.sorted = append(sort.sorted, node)
+ return true
+}
+
+// GraphTraversal is a Traversal that understands the dependency graph
+type GraphTraversal struct {
+ graph *Graph
+ visited map[util.T]struct{}
+}
+
+// NewGraphTraversal returns a Traversal for the dependency graph
+func NewGraphTraversal(graph *Graph) *GraphTraversal {
+ return &GraphTraversal{
+ graph: graph,
+ visited: map[util.T]struct{}{},
+ }
+}
+
+// Edges lists all dependency connections for a given node
+func (g *GraphTraversal) Edges(x util.T) []util.T {
+ r := []util.T{}
+ for v := range g.graph.Dependencies(x) {
+ r = append(r, v)
+ }
+ return r
+}
+
+// Visited returns whether a node has been visited, setting a node to visited if not
+func (g *GraphTraversal) Visited(u util.T) bool {
+ _, ok := g.visited[u]
+ g.visited[u] = struct{}{}
+ return ok
+}
+
+type unsafePair struct {
+ Expr *Expr
+ Vars VarSet
+}
+
+type unsafeVarLoc struct {
+ Var Var
+ Loc *Location
+}
+
+type unsafeVars map[*Expr]VarSet
+
+func (vs unsafeVars) Add(e *Expr, v Var) {
+ if u, ok := vs[e]; ok {
+ u[v] = struct{}{}
+ } else {
+ vs[e] = VarSet{v: struct{}{}}
+ }
+}
+
+func (vs unsafeVars) Set(e *Expr, s VarSet) {
+ vs[e] = s
+}
+
+func (vs unsafeVars) Update(o unsafeVars) {
+ for k, v := range o {
+ if _, ok := vs[k]; !ok {
+ vs[k] = VarSet{}
+ }
+ vs[k].Update(v)
+ }
+}
+
+func (vs unsafeVars) Vars() (result []unsafeVarLoc) {
+
+ locs := map[Var]*Location{}
+
+ // If var appears in multiple sets then pick first by location.
+ for expr, vars := range vs {
+ for v := range vars {
+ if locs[v].Compare(expr.Location) > 0 {
+ locs[v] = expr.Location
+ }
+ }
+ }
+
+ for v, loc := range locs {
+ result = append(result, unsafeVarLoc{
+ Var: v,
+ Loc: loc,
+ })
+ }
+
+ slices.SortFunc(result, func(a, b unsafeVarLoc) int {
+ return a.Loc.Compare(b.Loc)
+ })
+
+ return result
+}
+
+func (vs unsafeVars) Slice() (result []unsafePair) {
+ for expr, vs := range vs {
+ result = append(result, unsafePair{
+ Expr: expr,
+ Vars: vs,
+ })
+ }
+ return
+}
+
+// reorderBodyForSafety returns a copy of the body ordered such that
+// left to right evaluation of the body will not encounter unbound variables
+// in input positions or negated expressions.
+//
+// Expressions are added to the re-ordered body as soon as they are considered
+// safe. If multiple expressions become safe in the same pass, they are added
+// in their original order. This results in minimal re-ordering of the body.
+//
+// If the body cannot be reordered to ensure safety, the second return value
+// contains a mapping of expressions to unsafe variables in those expressions.
+func reorderBodyForSafety(builtins map[string]*Builtin, arity func(Ref) int, globals VarSet, body Body) (Body, unsafeVars) {
+ vis := varVisitorPool.Get().WithParams(SafetyCheckVisitorParams)
+ vis.WalkBody(body)
+
+ defer varVisitorPool.Put(vis)
+
+ bodyVars := vis.Vars().Copy()
+ safe := bodyVars.Intersect(globals)
+ unsafe := make(unsafeVars, len(bodyVars)-len(safe))
+
+ for _, e := range body {
+ vis.Clear().WithParams(SafetyCheckVisitorParams).Walk(e)
+ for v := range vis.Vars() {
+ if _, ok := safe[v]; !ok {
+ unsafe.Add(e, v)
+ }
+ }
+ }
+
+ reordered := make(Body, 0, len(body))
+ output := VarSet{}
+
+ for {
+ n := len(reordered)
+
+ for _, e := range body {
+ if reordered.Contains(e) {
+ continue
+ }
+
+ ovs := outputVarsForExpr(e, arity, safe, output)
+
+ // check closures: is this expression closing over variables that
+ // haven't been made safe by what's already included in `reordered`?
+ vs := unsafeVarsInClosures(e)
+ cv := vs.Intersect(bodyVars).Diff(globals)
+ ob := outputVarsForBody(reordered, arity, safe)
+
+ if cv.DiffCount(ob) > 0 {
+ uv := cv.Diff(ob)
+ if uv.Equal(ovs) { // special case "closure-self"
+ continue
+ }
+ unsafe.Set(e, uv)
+ }
+
+ for v := range unsafe[e] {
+ if ovs.Contains(v) || safe.Contains(v) {
+ delete(unsafe[e], v)
+ }
+ }
+
+ if len(unsafe[e]) == 0 {
+ delete(unsafe, e)
+ reordered.Append(e)
+ safe.Update(ovs) // this expression's outputs are safe
+ }
+ }
+
+ if len(reordered) == n { // fixed point, could not add any expr of body
+ break
+ }
+ }
+
+ // Recursively visit closures and perform the safety checks on them.
+ // Update the globals at each expression to include the variables that could
+ // be closed over.
+ g := globals.Copy()
+ xform := &bodySafetyTransformer{
+ builtins: builtins,
+ arity: arity,
+ }
+ gvis := &GenericVisitor{}
+ for i, e := range reordered {
+ if i > 0 {
+ vis.Walk(reordered[i-1])
+ g.Update(vis.Vars())
+ vis.Clear().WithParams(SafetyCheckVisitorParams)
+ }
+ xform.current = e
+ xform.globals = g
+ xform.unsafe = unsafe
+ gvis.f = xform.Visit
+ gvis.Walk(e)
+ }
+
+ return reordered, unsafe
+}
+
+type bodySafetyTransformer struct {
+ builtins map[string]*Builtin
+ arity func(Ref) int
+ current *Expr
+ globals VarSet
+ unsafe unsafeVars
+}
+
+func (xform *bodySafetyTransformer) Visit(x any) bool {
+ switch term := x.(type) {
+ case *Term:
+ switch x := term.Value.(type) {
+ case *object:
+ cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) {
+ kcpy := k.Copy()
+ NewGenericVisitor(xform.Visit).Walk(kcpy)
+ vcpy := v.Copy()
+ NewGenericVisitor(xform.Visit).Walk(vcpy)
+ return kcpy, vcpy, nil
+ })
+ term.Value = cpy
+ return true
+ case *set:
+ cpy, _ := x.Map(func(v *Term) (*Term, error) {
+ vcpy := v.Copy()
+ NewGenericVisitor(xform.Visit).Walk(vcpy)
+ return vcpy, nil
+ })
+ term.Value = cpy
+ return true
+ case *ArrayComprehension:
+ xform.reorderArrayComprehensionSafety(x)
+ return true
+ case *ObjectComprehension:
+ xform.reorderObjectComprehensionSafety(x)
+ return true
+ case *SetComprehension:
+ xform.reorderSetComprehensionSafety(x)
+ return true
+ }
+ case *Expr:
+ if ev, ok := term.Terms.(*Every); ok {
+ xform.globals.Update(ev.KeyValueVars())
+ ev.Body = xform.reorderComprehensionSafety(NewVarSet(), ev.Body)
+ return true
+ }
+ }
+ return false
+}
+
+func (xform *bodySafetyTransformer) reorderComprehensionSafety(tv VarSet, body Body) Body {
+ bv := body.Vars(SafetyCheckVisitorParams)
+ bv.Update(xform.globals)
+
+ if tv.DiffCount(bv) > 0 {
+ uv := tv.Diff(bv)
+ for v := range uv {
+ xform.unsafe.Add(xform.current, v)
+ }
+ }
+
+ r, u := reorderBodyForSafety(xform.builtins, xform.arity, xform.globals, body)
+ if len(u) == 0 {
+ return r
+ }
+
+ xform.unsafe.Update(u)
+ return body
+}
+
+func (xform *bodySafetyTransformer) reorderArrayComprehensionSafety(ac *ArrayComprehension) {
+ ac.Body = xform.reorderComprehensionSafety(ac.Term.Vars(), ac.Body)
+}
+
+func (xform *bodySafetyTransformer) reorderObjectComprehensionSafety(oc *ObjectComprehension) {
+ tv := oc.Key.Vars()
+ tv.Update(oc.Value.Vars())
+ oc.Body = xform.reorderComprehensionSafety(tv, oc.Body)
+}
+
+func (xform *bodySafetyTransformer) reorderSetComprehensionSafety(sc *SetComprehension) {
+ sc.Body = xform.reorderComprehensionSafety(sc.Term.Vars(), sc.Body)
+}
+
+// unsafeVarsInClosures collects vars that are contained in closures within
+// this expression.
+func unsafeVarsInClosures(e *Expr) VarSet {
+ vs := VarSet{}
+ WalkClosures(e, func(x any) bool {
+ vis := &VarVisitor{vars: vs}
+ if ev, ok := x.(*Every); ok {
+ vis.WalkBody(ev.Body)
+ return true
+ }
+ vis.Walk(x)
+ return true
+ })
+ return vs
+}
+
+// OutputVarsFromBody returns all variables which are the "output" for
+// the given body. For safety checks this means that they would be
+// made safe by the body.
+func OutputVarsFromBody(c *Compiler, body Body, safe VarSet) VarSet {
+ return outputVarsForBody(body, c.GetArity, safe)
+}
+
+func outputVarsForBody(body Body, arity func(Ref) int, safe VarSet) VarSet {
+ o := safe.Copy()
+ output := VarSet{}
+ for _, e := range body {
+ o.Update(outputVarsForExpr(e, arity, o, output))
+ }
+ return o.Diff(safe)
+}
+
+// OutputVarsFromExpr returns all variables which are the "output" for
+// the given expression. For safety checks this means that they would be
+// made safe by the expr.
+func OutputVarsFromExpr(c *Compiler, expr *Expr, safe VarSet) VarSet {
+ return outputVarsForExpr(expr, c.GetArity, safe, VarSet{})
+}
+
+func outputVarsForExpr(expr *Expr, arity func(Ref) int, safe VarSet, output VarSet) VarSet {
+ // Negated expressions must be safe.
+ if expr.Negated {
+ return VarSet{}
+ }
+
+ var vis *VarVisitor
+
+ // With modifier inputs must be safe.
+ for _, with := range expr.With {
+ vis = vis.ClearOrNew().WithParams(SafetyCheckVisitorParams)
+ vis.Walk(with)
+ if vis.Vars().DiffCount(safe) > 0 {
+ return VarSet{}
+ }
+ }
+
+ switch terms := expr.Terms.(type) {
+ case *Term:
+ return outputVarsForTerms(expr, safe)
+ case []*Term:
+ if expr.IsEquality() {
+ return outputVarsForExprEq(expr, safe, output)
+ }
+
+ operator, ok := terms[0].Value.(Ref)
+ if !ok {
+ return VarSet{}
+ }
+
+ ar := arity(operator)
+ if ar < 0 {
+ return VarSet{}
+ }
+
+ return outputVarsForExprCall(expr, ar, safe, terms, vis, output)
+ case *Every:
+ return outputVarsForTerms(terms.Domain, safe)
+ default:
+ panic("illegal expression")
+ }
+}
+
+func outputVarsForExprEq(expr *Expr, safe VarSet, output VarSet) VarSet {
+ if !validEqAssignArgCount(expr) {
+ return safe
+ }
+
+ output.Update(outputVarsForTerms(expr, safe))
+ output.Update(safe)
+ output.Update(Unify(output, expr.Operand(0), expr.Operand(1)))
+
+ diff := output.Diff(safe)
+
+ clear(output)
+
+ return diff
+}
+
+func outputVarsForExprCall(expr *Expr, arity int, safe VarSet, terms []*Term, vis *VarVisitor, output VarSet) VarSet {
+ clear(output)
+
+ output.Update(outputVarsForTerms(expr, safe))
+
+ numInputTerms := arity + 1
+ if numInputTerms >= len(terms) {
+ return output
+ }
+
+ params := VarVisitorParams{
+ SkipClosures: true,
+ SkipSets: true,
+ SkipObjectKeys: true,
+ SkipRefHead: true,
+ }
+ vis = vis.ClearOrNew().WithParams(params)
+ vis.WalkArgs(Args(terms[:numInputTerms]))
+
+ unsafe := vis.Vars().Diff(output).DiffCount(safe)
+ if unsafe > 0 {
+ return VarSet{}
+ }
+
+ vis = vis.Clear().WithParams(params)
+ vis.WalkArgs(Args(terms[numInputTerms:]))
+ output.Update(vis.vars)
+ return output
+}
+
+func outputVarsForTerms(expr any, safe VarSet) VarSet {
+ output := VarSet{}
+ WalkTerms(expr, func(x *Term) bool {
+ switch r := x.Value.(type) {
+ case *SetComprehension, *ArrayComprehension, *ObjectComprehension:
+ return true
+ case Ref:
+ if !isRefSafe(r, safe) {
+ return true
+ }
+ if !r.IsGround() {
+ // Avoiding r.OutputVars() here as it won't allow reusing the visitor.
+ vis := varVisitorPool.Get().WithParams(VarVisitorParams{SkipRefHead: true})
+ vis.WalkRef(r)
+ output.Update(vis.Vars())
+ varVisitorPool.Put(vis)
+ }
+ }
+ return false
+ })
+ return output
+}
+
+type equalityFactory struct {
+ gen *localVarGenerator
+}
+
+func newEqualityFactory(gen *localVarGenerator) *equalityFactory {
+ return &equalityFactory{gen}
+}
+
+func (f *equalityFactory) Generate(other *Term) *Expr {
+ term := NewTerm(f.gen.Generate()).SetLocation(other.Location)
+ expr := Equality.Expr(term, other)
+ expr.Generated = true
+ expr.Location = other.Location
+ return expr
+}
+
+// TODO: Move to internal package?
+const LocalVarPrefix = "__local"
+
+type localVarGenerator struct {
+ exclude VarSet
+ suffix string
+ next int
+}
+
+func newLocalVarGeneratorForModuleSet(sorted []string, modules map[string]*Module) *localVarGenerator {
+ vis := NewVarVisitor()
+ for _, key := range sorted {
+ vis.Walk(modules[key])
+ }
+ return &localVarGenerator{exclude: vis.vars, next: 0}
+}
+
+func newLocalVarGenerator(suffix string, node any) *localVarGenerator {
+ vis := NewVarVisitor()
+ vis.Walk(node)
+ return &localVarGenerator{exclude: vis.vars, suffix: suffix, next: 0}
+}
+
+func (l *localVarGenerator) Generate() Var {
+ for {
+ result := Var(LocalVarPrefix + l.suffix + strconv.Itoa(l.next) + "__")
+ l.next++
+ if !l.exclude.Contains(result) {
+ return result
+ }
+ }
+}
+
+func getGlobals(pkg *Package, rules []Ref, imports []*Import) map[Var]*usedRef {
+ globals := make(map[Var]*usedRef, len(rules)+len(imports))
+
+ for _, ref := range rules {
+ v := ref[0].Value.(Var)
+ globals[v] = &usedRef{ref: pkg.Path.Append(StringTerm(string(v)))}
+ }
+
+ for _, imp := range imports {
+ path := imp.Path.Value.(Ref)
+ if FutureRootDocument.Equal(path[0]) || RegoRootDocument.Equal(path[0]) {
+ continue
+ }
+ globals[imp.Name()] = &usedRef{ref: path}
+ }
+
+ return globals
+}
+
+func requiresEval(x *Term) bool {
+ if x == nil {
+ return false
+ }
+ return ContainsRefs(x) || ContainsComprehensions(x)
+}
+
+func resolveRef(globals map[Var]*usedRef, ignore *declaredVarStack, ref Ref) Ref {
+
+ r := Ref{}
+ for i, x := range ref {
+ switch v := x.Value.(type) {
+ case Var:
+ if g, ok := globals[v]; ok && !ignore.Contains(v) {
+ cpy := g.ref.Copy()
+ for i := range cpy {
+ cpy[i].SetLocation(x.Location)
+ }
+ if i == 0 {
+ r = cpy
+ } else {
+ r = append(r, NewTerm(cpy).SetLocation(x.Location))
+ }
+ g.used = true
+ } else {
+ r = append(r, x)
+ }
+ case Ref, *Array, Object, Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call:
+ r = append(r, resolveRefsInTerm(globals, ignore, x))
+ default:
+ r = append(r, x)
+ }
+ }
+
+ return r
+}
+
+type usedRef struct {
+ ref Ref
+ used bool
+}
+
+func resolveRefsInRule(globals map[Var]*usedRef, rule *Rule) error {
+ ignore := &declaredVarStack{}
+
+ vars := NewVarSet()
+ var vis *GenericVisitor
+ var err error
+
+ // Walk args to collect vars and transform body so that callers can shadow
+ // root documents.
+ vis = NewGenericVisitor(func(x any) bool {
+ if err != nil {
+ return true
+ }
+ switch x := x.(type) {
+ case Var:
+ vars.Add(x)
+
+ // Object keys cannot be pattern matched so only walk values.
+ case *object:
+ x.Foreach(func(_, v *Term) {
+ vis.Walk(v)
+ })
+
+ // Skip terms that could contain vars that cannot be pattern matched.
+ case Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call:
+ return true
+
+ case *Term:
+ if _, ok := x.Value.(Ref); ok {
+ if RootDocumentRefs.Contains(x) {
+ // We could support args named input, data, etc. however
+ // this would require rewriting terms in the head and body.
+ // Preventing root document shadowing is simpler, and
+ // arguably, will prevent confusing names from being used.
+ // NOTE: this check is also performed as part of strict-mode in
+ // checkRootDocumentOverrides.
+ err = fmt.Errorf("args must not shadow %v (use a different variable name)", x)
+ return true
+ }
+ }
+ }
+ return false
+ })
+
+ vis.Walk(rule.Head.Args)
+
+ if err != nil {
+ return err
+ }
+
+ ignore.Push(vars)
+ ignore.Push(declaredVars(rule.Body))
+
+ ref := rule.Head.Ref()
+ for i := 1; i < len(ref); i++ {
+ ref[i] = resolveRefsInTerm(globals, ignore, ref[i])
+ }
+ if rule.Head.Key != nil {
+ rule.Head.Key = resolveRefsInTerm(globals, ignore, rule.Head.Key)
+ }
+
+ if rule.Head.Value != nil {
+ rule.Head.Value = resolveRefsInTerm(globals, ignore, rule.Head.Value)
+ }
+
+ rule.Body = resolveRefsInBody(globals, ignore, rule.Body)
+ return nil
+}
+
+func resolveRefsInBody(globals map[Var]*usedRef, ignore *declaredVarStack, body Body) Body {
+ r := make([]*Expr, 0, len(body))
+ for _, expr := range body {
+ r = append(r, resolveRefsInExpr(globals, ignore, expr))
+ }
+ return r
+}
+
+func resolveRefsInExpr(globals map[Var]*usedRef, ignore *declaredVarStack, expr *Expr) *Expr {
+ cpy := *expr
+ switch ts := expr.Terms.(type) {
+ case *Term:
+ cpy.Terms = resolveRefsInTerm(globals, ignore, ts)
+ case []*Term:
+ buf := make([]*Term, len(ts))
+ for i := range ts {
+ buf[i] = resolveRefsInTerm(globals, ignore, ts[i])
+ }
+ cpy.Terms = buf
+ case *SomeDecl:
+ if val, ok := ts.Symbols[0].Value.(Call); ok {
+ cpy.Terms = &SomeDecl{
+ Symbols: []*Term{CallTerm(resolveRefsInTermSlice(globals, ignore, val)...)},
+ Location: ts.Location,
+ }
+ }
+ case *Every:
+ locals := NewVarSet()
+ if ts.Key != nil {
+ locals.Update(ts.Key.Vars())
+ }
+ locals.Update(ts.Value.Vars())
+ ignore.Push(locals)
+ cpy.Terms = &Every{
+ Key: ts.Key.Copy(), // TODO(sr): do more?
+ Value: ts.Value.Copy(), // TODO(sr): do more?
+ Domain: resolveRefsInTerm(globals, ignore, ts.Domain),
+ Body: resolveRefsInBody(globals, ignore, ts.Body),
+ }
+ ignore.Pop()
+ }
+ for _, w := range cpy.With {
+ w.Target = resolveRefsInTerm(globals, ignore, w.Target)
+ w.Value = resolveRefsInTerm(globals, ignore, w.Value)
+ }
+ return &cpy
+}
+
+func resolveRefsInTerm(globals map[Var]*usedRef, ignore *declaredVarStack, term *Term) *Term {
+ switch v := term.Value.(type) {
+ case Var:
+ if g, ok := globals[v]; ok && !ignore.Contains(v) {
+ cpy := g.ref.Copy()
+ for i := range cpy {
+ cpy[i].SetLocation(term.Location)
+ }
+ g.used = true
+ return NewTerm(cpy).SetLocation(term.Location)
+ }
+ return term
+ case Ref:
+ fqn := resolveRef(globals, ignore, v)
+ cpy := *term
+ cpy.Value = fqn
+ return &cpy
+ case *object:
+ cpy := *term
+ cpy.Value, _ = v.Map(func(k, v *Term) (*Term, *Term, error) {
+ k = resolveRefsInTerm(globals, ignore, k)
+ v = resolveRefsInTerm(globals, ignore, v)
+ return k, v, nil
+ })
+ return &cpy
+ case *Array:
+ cpy := *term
+ cpy.Value = NewArray(resolveRefsInTermArray(globals, ignore, v)...)
+ return &cpy
+ case Call:
+ cpy := *term
+ cpy.Value = Call(resolveRefsInTermSlice(globals, ignore, v))
+ return &cpy
+ case Set:
+ s, _ := v.Map(func(e *Term) (*Term, error) {
+ return resolveRefsInTerm(globals, ignore, e), nil
+ })
+ cpy := *term
+ cpy.Value = s
+ return &cpy
+ case *ArrayComprehension:
+ ac := &ArrayComprehension{}
+ ignore.Push(declaredVars(v.Body))
+ ac.Term = resolveRefsInTerm(globals, ignore, v.Term)
+ ac.Body = resolveRefsInBody(globals, ignore, v.Body)
+ cpy := *term
+ cpy.Value = ac
+ ignore.Pop()
+ return &cpy
+ case *ObjectComprehension:
+ oc := &ObjectComprehension{}
+ ignore.Push(declaredVars(v.Body))
+ oc.Key = resolveRefsInTerm(globals, ignore, v.Key)
+ oc.Value = resolveRefsInTerm(globals, ignore, v.Value)
+ oc.Body = resolveRefsInBody(globals, ignore, v.Body)
+ cpy := *term
+ cpy.Value = oc
+ ignore.Pop()
+ return &cpy
+ case *SetComprehension:
+ sc := &SetComprehension{}
+ ignore.Push(declaredVars(v.Body))
+ sc.Term = resolveRefsInTerm(globals, ignore, v.Term)
+ sc.Body = resolveRefsInBody(globals, ignore, v.Body)
+ cpy := *term
+ cpy.Value = sc
+ ignore.Pop()
+ return &cpy
+ default:
+ return term
+ }
+}
+
+func resolveRefsInTermArray(globals map[Var]*usedRef, ignore *declaredVarStack, terms *Array) []*Term {
+ cpy := make([]*Term, terms.Len())
+ for i := range terms.Len() {
+ cpy[i] = resolveRefsInTerm(globals, ignore, terms.Elem(i))
+ }
+ return cpy
+}
+
+func resolveRefsInTermSlice(globals map[Var]*usedRef, ignore *declaredVarStack, terms []*Term) []*Term {
+ cpy := make([]*Term, len(terms))
+ for i := range terms {
+ cpy[i] = resolveRefsInTerm(globals, ignore, terms[i])
+ }
+ return cpy
+}
+
+type declaredVarStack []VarSet
+
+func (s declaredVarStack) Contains(v Var) bool {
+ for i := len(s) - 1; i >= 0; i-- {
+ if _, ok := s[i][v]; ok {
+ return ok
+ }
+ }
+ return false
+}
+
+func (s declaredVarStack) Add(v Var) {
+ s[len(s)-1].Add(v)
+}
+
+func (s *declaredVarStack) Push(vs VarSet) {
+ *s = append(*s, vs)
+}
+
+func (s *declaredVarStack) Pop() {
+ curr := *s
+ *s = curr[:len(curr)-1]
+}
+
+func declaredVars(x any) VarSet {
+ vars := NewVarSet()
+ vis := NewGenericVisitor(func(x any) bool {
+ switch x := x.(type) {
+ case *Expr:
+ if x.IsAssignment() && validEqAssignArgCount(x) {
+ WalkVars(x.Operand(0), func(v Var) bool {
+ vars.Add(v)
+ return false
+ })
+ } else if decl, ok := x.Terms.(*SomeDecl); ok {
+ for i := range decl.Symbols {
+ switch val := decl.Symbols[i].Value.(type) {
+ case Var:
+ vars.Add(val)
+ case Call:
+ args := val[1:]
+ if len(args) == 3 { // some x, y in xs
+ WalkVars(args[1], func(v Var) bool {
+ vars.Add(v)
+ return false
+ })
+ }
+ // some x in xs
+ WalkVars(args[0], func(v Var) bool {
+ vars.Add(v)
+ return false
+ })
+ }
+ }
+ }
+ case *ArrayComprehension, *SetComprehension, *ObjectComprehension:
+ return true
+ }
+ return false
+ })
+ vis.Walk(x)
+ return vars
+}
+
+// rewriteComprehensionTerms will rewrite comprehensions so that the term part
+// is bound to a variable in the body. This allows any type of term to be used
+// in the term part (even if the term requires evaluation.)
+//
+// For instance, given the following comprehension:
+//
+// [x[0] | x = y[_]; y = [1,2,3]]
+//
+// The comprehension would be rewritten as:
+//
+// [__local0__ | x = y[_]; y = [1,2,3]; __local0__ = x[0]]
+func rewriteComprehensionTerms(f *equalityFactory, node any) (any, error) {
+ return TransformComprehensions(node, func(x any) (Value, error) {
+ switch x := x.(type) {
+ case *ArrayComprehension:
+ if requiresEval(x.Term) {
+ expr := f.Generate(x.Term)
+ x.Term = expr.Operand(0)
+ x.Body.Append(expr)
+ }
+ return x, nil
+ case *SetComprehension:
+ if requiresEval(x.Term) {
+ expr := f.Generate(x.Term)
+ x.Term = expr.Operand(0)
+ x.Body.Append(expr)
+ }
+ return x, nil
+ case *ObjectComprehension:
+ if requiresEval(x.Key) {
+ expr := f.Generate(x.Key)
+ x.Key = expr.Operand(0)
+ x.Body.Append(expr)
+ }
+ if requiresEval(x.Value) {
+ expr := f.Generate(x.Value)
+ x.Value = expr.Operand(0)
+ x.Body.Append(expr)
+ }
+ return x, nil
+ }
+ panic("illegal type")
+ })
+}
+
+// rewriteEquals will rewrite exprs under x as unification calls instead of ==
+// calls. For example:
+//
+// data.foo == data.bar is rewritten as data.foo = data.bar
+//
+// This stage should only run the safety check (since == is a built-in with no
+// outputs, so the inputs must not be marked as safe.)
+//
+// This stage is not executed by the query compiler by default because when
+// callers specify == instead of = they expect to receive a true/false/undefined
+// result back whereas with = the result is only ever true/undefined. For
+// partial evaluation cases we do want to rewrite == to = to simplify the
+// result.
+func rewriteEquals(x any) (modified bool) {
+ unifyOp := Equality.Ref()
+ t := NewGenericTransformer(func(x any) (any, error) {
+ if x, ok := x.(*Expr); ok && x.IsCall() {
+ operator := x.Operator()
+ if operator.Equal(doubleEq) && len(x.Operands()) == 2 {
+ modified = true
+ x.SetOperator(NewTerm(unifyOp))
+ }
+ }
+ return x, nil
+ })
+ _, _ = Transform(t, x) // ignore error
+ return modified
+}
+
+func rewriteTestEqualities(f *equalityFactory, body Body) Body {
+ result := make(Body, 0, len(body))
+ for _, expr := range body {
+ // We can't rewrite negated expressions; if the extracted term is undefined, evaluation would fail before
+ // reaching the negation check.
+ if !expr.Negated && !expr.Generated {
+ switch {
+ case expr.IsEquality():
+ terms := expr.Terms.([]*Term)
+ result, terms[1] = rewriteDynamicsShallow(expr, f, terms[1], result)
+ result, terms[2] = rewriteDynamicsShallow(expr, f, terms[2], result)
+ case expr.IsEvery():
+ // We rewrite equalities inside of every-bodies as a fail here will be the cause of the test-rule fail.
+ // Failures inside other expressions with closures, such as comprehensions, won't cause the test-rule to fail, so we skip those.
+ every := expr.Terms.(*Every)
+ every.Body = rewriteTestEqualities(f, every.Body)
+ }
+ }
+ result = appendExpr(result, expr)
+ }
+ return result
+}
+
+func rewriteDynamicsShallow(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) {
+ switch term.Value.(type) {
+ case Ref, *ArrayComprehension, *SetComprehension, *ObjectComprehension:
+ generated := f.Generate(term)
+ generated.With = original.With
+ result.Append(generated)
+ connectGeneratedExprs(original, generated)
+ return result, result[len(result)-1].Operand(0)
+ }
+ return result, term
+}
+
+// rewriteDynamics will rewrite the body so that dynamic terms (i.e., refs and
+// comprehensions) are bound to vars earlier in the query. This translation
+// results in eager evaluation.
+//
+// For instance, given the following query:
+//
+// foo(data.bar) = 1
+//
+// The rewritten version will be:
+//
+// __local0__ = data.bar; foo(__local0__) = 1
+func rewriteDynamics(f *equalityFactory, body Body) Body {
+ result := make(Body, 0, len(body))
+ for _, expr := range body {
+ switch {
+ case expr.IsEquality():
+ result = rewriteDynamicsEqExpr(f, expr, result)
+ case expr.IsCall():
+ result = rewriteDynamicsCallExpr(f, expr, result)
+ case expr.IsEvery():
+ result = rewriteDynamicsEveryExpr(f, expr, result)
+ default:
+ result = rewriteDynamicsTermExpr(f, expr, result)
+ }
+ }
+ return result
+}
+
+func appendExpr(body Body, expr *Expr) Body {
+ body.Append(expr)
+ return body
+}
+
+func rewriteDynamicsEqExpr(f *equalityFactory, expr *Expr, result Body) Body {
+ if !validEqAssignArgCount(expr) {
+ return appendExpr(result, expr)
+ }
+ terms := expr.Terms.([]*Term)
+ result, terms[1] = rewriteDynamicsInTerm(expr, f, terms[1], result)
+ result, terms[2] = rewriteDynamicsInTerm(expr, f, terms[2], result)
+ return appendExpr(result, expr)
+}
+
+func rewriteDynamicsCallExpr(f *equalityFactory, expr *Expr, result Body) Body {
+ terms := expr.Terms.([]*Term)
+ for i := 1; i < len(terms); i++ {
+ result, terms[i] = rewriteDynamicsOne(expr, f, terms[i], result)
+ }
+ return appendExpr(result, expr)
+}
+
+func rewriteDynamicsEveryExpr(f *equalityFactory, expr *Expr, result Body) Body {
+ ev := expr.Terms.(*Every)
+ result, ev.Domain = rewriteDynamicsOne(expr, f, ev.Domain, result)
+ ev.Body = rewriteDynamics(f, ev.Body)
+ return appendExpr(result, expr)
+}
+
+func rewriteDynamicsTermExpr(f *equalityFactory, expr *Expr, result Body) Body {
+ term := expr.Terms.(*Term)
+ result, expr.Terms = rewriteDynamicsInTerm(expr, f, term, result)
+ return appendExpr(result, expr)
+}
+
+func rewriteDynamicsInTerm(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) {
+ switch v := term.Value.(type) {
+ case Ref:
+ for i := 1; i < len(v); i++ {
+ result, v[i] = rewriteDynamicsOne(original, f, v[i], result)
+ }
+ case *ArrayComprehension:
+ v.Body = rewriteDynamics(f, v.Body)
+ case *SetComprehension:
+ v.Body = rewriteDynamics(f, v.Body)
+ case *ObjectComprehension:
+ v.Body = rewriteDynamics(f, v.Body)
+ default:
+ result, term = rewriteDynamicsOne(original, f, term, result)
+ }
+ return result, term
+}
+
+func rewriteDynamicsOne(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) {
+ switch v := term.Value.(type) {
+ case Ref:
+ for i := 1; i < len(v); i++ {
+ result, v[i] = rewriteDynamicsOne(original, f, v[i], result)
+ }
+ generated := f.Generate(term)
+ generated.With = original.With
+ result.Append(generated)
+ connectGeneratedExprs(original, generated)
+ return result, result[len(result)-1].Operand(0)
+ case *Array:
+ for i := range v.Len() {
+ var t *Term
+ result, t = rewriteDynamicsOne(original, f, v.Elem(i), result)
+ v.set(i, t)
+ }
+ return result, term
+ case *object:
+ cpy := NewObject()
+ v.Foreach(func(key, value *Term) {
+ result, key = rewriteDynamicsOne(original, f, key, result)
+ result, value = rewriteDynamicsOne(original, f, value, result)
+ cpy.Insert(key, value)
+ })
+ return result, NewTerm(cpy).SetLocation(term.Location)
+ case Set:
+ cpy := NewSet()
+ for _, term := range v.Slice() {
+ var rw *Term
+ result, rw = rewriteDynamicsOne(original, f, term, result)
+ cpy.Add(rw)
+ }
+ return result, NewTerm(cpy).SetLocation(term.Location)
+ case *ArrayComprehension:
+ var extra *Expr
+ v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term)
+ result.Append(extra)
+ connectGeneratedExprs(original, extra)
+ return result, result[len(result)-1].Operand(0)
+ case *SetComprehension:
+ var extra *Expr
+ v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term)
+ result.Append(extra)
+ connectGeneratedExprs(original, extra)
+ return result, result[len(result)-1].Operand(0)
+ case *ObjectComprehension:
+ var extra *Expr
+ v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term)
+ result.Append(extra)
+ connectGeneratedExprs(original, extra)
+ return result, result[len(result)-1].Operand(0)
+ }
+ return result, term
+}
+
+func rewriteDynamicsComprehensionBody(original *Expr, f *equalityFactory, body Body, term *Term) (Body, *Expr) {
+ body = rewriteDynamics(f, body)
+ generated := f.Generate(term)
+ generated.With = original.With
+ return body, generated
+}
+
+func rewriteExprTermsInHead(gen *localVarGenerator, rule *Rule) {
+ for i := range rule.Head.Args {
+ support, output := expandExprTerm(gen, rule.Head.Args[i])
+ for j := range support {
+ rule.Body.Append(support[j])
+ }
+ rule.Head.Args[i] = output
+ }
+ if rule.Head.Key != nil {
+ support, output := expandExprTerm(gen, rule.Head.Key)
+ for i := range support {
+ rule.Body.Append(support[i])
+ }
+ rule.Head.Key = output
+ }
+ if rule.Head.Value != nil {
+ support, output := expandExprTerm(gen, rule.Head.Value)
+ for i := range support {
+ rule.Body.Append(support[i])
+ }
+ rule.Head.Value = output
+ }
+}
+
+func rewriteExprTermsInBody(gen *localVarGenerator, body Body) Body {
+ cpy := make(Body, 0, len(body))
+ for i := range body {
+ for _, expr := range expandExpr(gen, body[i]) {
+ cpy.Append(expr)
+ }
+ }
+ return cpy
+}
+
+func expandExpr(gen *localVarGenerator, expr *Expr) (result []*Expr) {
+ for i := range expr.With {
+ extras, value := expandExprTerm(gen, expr.With[i].Value)
+ expr.With[i].Value = value
+ result = append(result, extras...)
+ }
+ switch terms := expr.Terms.(type) {
+ case *Term:
+ extras, term := expandExprTerm(gen, terms)
+ if len(expr.With) > 0 {
+ for i := range extras {
+ extras[i].With = expr.With
+ }
+ }
+ result = append(result, extras...)
+ expr.Terms = term
+ result = append(result, expr)
+ case []*Term:
+ for i := 1; i < len(terms); i++ {
+ var extras []*Expr
+ extras, terms[i] = expandExprTerm(gen, terms[i])
+ connectGeneratedExprs(expr, extras...)
+ if len(expr.With) > 0 {
+ for i := range extras {
+ extras[i].With = expr.With
+ }
+ }
+ result = append(result, extras...)
+ }
+ result = append(result, expr)
+ case *Every:
+ var extras []*Expr
+
+ term := NewTerm(gen.Generate()).SetLocation(terms.Domain.Location)
+ eq := Equality.Expr(term, terms.Domain).SetLocation(terms.Domain.Location)
+ eq.Generated = true
+ eq.With = expr.With
+ extras = expandExpr(gen, eq)
+ terms.Domain = term
+
+ terms.Body = rewriteExprTermsInBody(gen, terms.Body)
+ result = append(result, extras...)
+ result = append(result, expr)
+ }
+ return
+}
+
+func connectGeneratedExprs(parent *Expr, children ...*Expr) {
+ for _, child := range children {
+ child.generatedFrom = parent
+ parent.generates = append(parent.generates, child)
+ }
+}
+
+func expandExprTerm(gen *localVarGenerator, term *Term) (support []*Expr, output *Term) {
+ output = term
+ switch v := term.Value.(type) {
+ case Call:
+ for i := 1; i < len(v); i++ {
+ var extras []*Expr
+ extras, v[i] = expandExprTerm(gen, v[i])
+ support = append(support, extras...)
+ }
+ output = NewTerm(gen.Generate()).SetLocation(term.Location)
+ expr := v.MakeExpr(output).SetLocation(term.Location)
+ expr.Generated = true
+ support = append(support, expr)
+ case Ref:
+ support = expandExprRef(gen, v)
+ case *Array:
+ support = expandExprTermArray(gen, v)
+ case *object:
+ cpy, _ := v.Map(func(k, v *Term) (*Term, *Term, error) {
+ extras1, expandedKey := expandExprTerm(gen, k)
+ extras2, expandedValue := expandExprTerm(gen, v)
+ support = append(support, extras1...)
+ support = append(support, extras2...)
+ return expandedKey, expandedValue, nil
+ })
+ output = NewTerm(cpy).SetLocation(term.Location)
+ case Set:
+ cpy, _ := v.Map(func(x *Term) (*Term, error) {
+ extras, expanded := expandExprTerm(gen, x)
+ support = append(support, extras...)
+ return expanded, nil
+ })
+ output = NewTerm(cpy).SetLocation(term.Location)
+ case *ArrayComprehension:
+ support, term := expandExprTerm(gen, v.Term)
+ for i := range support {
+ v.Body.Append(support[i])
+ }
+ v.Term = term
+ v.Body = rewriteExprTermsInBody(gen, v.Body)
+ case *SetComprehension:
+ support, term := expandExprTerm(gen, v.Term)
+ for i := range support {
+ v.Body.Append(support[i])
+ }
+ v.Term = term
+ v.Body = rewriteExprTermsInBody(gen, v.Body)
+ case *ObjectComprehension:
+ support, key := expandExprTerm(gen, v.Key)
+ for i := range support {
+ v.Body.Append(support[i])
+ }
+ v.Key = key
+ support, value := expandExprTerm(gen, v.Value)
+ for i := range support {
+ v.Body.Append(support[i])
+ }
+ v.Value = value
+ v.Body = rewriteExprTermsInBody(gen, v.Body)
+ }
+ return
+}
+
+func expandExprRef(gen *localVarGenerator, v []*Term) (support []*Expr) {
+ // Start by calling a normal expandExprTerm on all terms.
+ support = expandExprTermSlice(gen, v)
+
+ // Rewrite references in order to support indirect references. We rewrite
+ // e.g.
+ //
+ // [1, 2, 3][i]
+ //
+ // to
+ //
+ // __local_var = [1, 2, 3]
+ // __local_var[i]
+ //
+ // to support these. This only impacts the reference subject, i.e. the
+ // first item in the slice.
+ var subject = v[0]
+ switch subject.Value.(type) {
+ case *Array, Object, Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call:
+ f := newEqualityFactory(gen)
+ assignToLocal := f.Generate(subject)
+ support = append(support, assignToLocal)
+ v[0] = assignToLocal.Operand(0)
+ }
+ return
+}
+
+func expandExprTermArray(gen *localVarGenerator, arr *Array) (support []*Expr) {
+ for i := range arr.Len() {
+ extras, v := expandExprTerm(gen, arr.Elem(i))
+ arr.set(i, v)
+ support = append(support, extras...)
+ }
+ return
+}
+
+func expandExprTermSlice(gen *localVarGenerator, v []*Term) (support []*Expr) {
+ for i := range v {
+ var extras []*Expr
+ extras, v[i] = expandExprTerm(gen, v[i])
+ support = append(support, extras...)
+ }
+ return
+}
+
+type localDeclaredVars struct {
+ vars []*declaredVarSet
+
+ // rewritten contains a mapping of *all* user-defined variables
+ // that have been rewritten whereas vars contains the state
+ // from the current query (not any nested queries, and all vars
+ // seen).
+ rewritten map[Var]Var
+
+ // indicates if an assignment (:= operator) has been seen *ever*
+ assignment bool
+}
+
+type varOccurrence uint8
+
+const (
+ newVar varOccurrence = iota
+ argVar
+ seenVar
+ assignedVar
+ declaredVar
+)
+
+type declaredVarSet struct {
+ vs map[Var]Var
+ occurrence map[Var]varOccurrence
+ count map[Var]int
+}
+
+func newDeclaredVarSet() *declaredVarSet {
+ return &declaredVarSet{
+ vs: map[Var]Var{},
+ occurrence: map[Var]varOccurrence{},
+ count: map[Var]int{},
+ }
+}
+
+func (s *declaredVarSet) clear() *declaredVarSet {
+ clear(s.vs)
+ clear(s.occurrence)
+ clear(s.count)
+
+ return s
+}
+
+func newLocalDeclaredVars() *localDeclaredVars {
+ return &localDeclaredVars{
+ vars: []*declaredVarSet{newDeclaredVarSet()},
+ rewritten: map[Var]Var{},
+ }
+}
+
+func (s *localDeclaredVars) Clear() {
+ var vs *declaredVarSet
+ if len(s.vars) > 0 {
+ vs = s.vars[0]
+ }
+
+ clear(s.vars)
+ clear(s.rewritten)
+
+ s.vars = s.vars[:0]
+
+ if vs != nil {
+ s.vars = append(s.vars, vs.clear())
+ }
+ if s.vars[0] == nil {
+ s.vars[0] = newDeclaredVarSet()
+ }
+ s.assignment = false
+}
+
+func (s *localDeclaredVars) Copy() *localDeclaredVars {
+ stack := &localDeclaredVars{
+ vars: make([]*declaredVarSet, 0, len(s.vars)),
+ }
+
+ for i := range s.vars {
+ stack.vars = append(stack.vars, newDeclaredVarSet())
+ maps.Copy(stack.vars[0].vs, s.vars[i].vs)
+ maps.Copy(stack.vars[0].occurrence, s.vars[i].occurrence)
+ maps.Copy(stack.vars[0].count, s.vars[i].count)
+ }
+
+ stack.rewritten = maps.Clone(s.rewritten)
+
+ return stack
+}
+
+func (s *localDeclaredVars) Push() {
+ s.vars = append(s.vars, newDeclaredVarSet())
+}
+
+func (s *localDeclaredVars) Pop() *declaredVarSet {
+ sl := s.vars
+ curr := sl[len(sl)-1]
+ s.vars = sl[:len(sl)-1]
+ return curr
+}
+
+func (s localDeclaredVars) Peek() *declaredVarSet {
+ return s.vars[len(s.vars)-1]
+}
+
+func (s localDeclaredVars) Insert(x, y Var, occurrence varOccurrence) {
+ elem := s.vars[len(s.vars)-1]
+ elem.vs[x] = y
+ elem.occurrence[x] = occurrence
+
+ elem.count[x] = 1
+
+ // If the variable has been rewritten (where x != y, with y being
+ // the generated value), store it in the map of rewritten vars.
+ // Assume that the generated values are unique for the compilation.
+ if !x.Equal(y) {
+ s.rewritten[y] = x
+ }
+}
+
+func (s localDeclaredVars) Declared(x Var) (y Var, ok bool) {
+ for i := len(s.vars) - 1; i >= 0; i-- {
+ if y, ok = s.vars[i].vs[x]; ok {
+ return
+ }
+ }
+ return
+}
+
+// Occurrence returns a flag that indicates whether x has occurred in the
+// current scope.
+func (s localDeclaredVars) Occurrence(x Var) varOccurrence {
+ return s.vars[len(s.vars)-1].occurrence[x]
+}
+
+// GlobalOccurrence returns a flag that indicates whether x has occurred in the
+// global scope.
+func (s localDeclaredVars) GlobalOccurrence(x Var) (varOccurrence, bool) {
+ for i := len(s.vars) - 1; i >= 0; i-- {
+ if occ, ok := s.vars[i].occurrence[x]; ok {
+ return occ, true
+ }
+ }
+ return newVar, false
+}
+
+// Seen marks x as seen by incrementing its counter
+func (s localDeclaredVars) Seen(x Var) {
+ for i := len(s.vars) - 1; i >= 0; i-- {
+ dvs := s.vars[i]
+ if c, ok := dvs.count[x]; ok {
+ dvs.count[x] = c + 1
+ return
+ }
+ }
+
+ s.vars[len(s.vars)-1].count[x] = 1
+}
+
+// Count returns how many times x has been seen
+func (s localDeclaredVars) Count(x Var) int {
+ for i := len(s.vars) - 1; i >= 0; i-- {
+ if c, ok := s.vars[i].count[x]; ok {
+ return c
+ }
+ }
+
+ return 0
+}
+
+// rewriteLocalVars rewrites bodies to remove assignment/declaration
+// expressions. For example:
+//
+// a := 1; p[a]
+//
+// Is rewritten to:
+//
+// __local0__ = 1; p[__local0__]
+//
+// During rewriting, assignees are validated to prevent use before declaration.
+func rewriteLocalVars(g *localVarGenerator, stack *localDeclaredVars, used VarSet, body Body, strict bool) (Body, map[Var]Var, Errors) {
+ var errs Errors
+ body, errs = rewriteDeclaredVarsInBody(g, stack, used, body, errs, strict)
+ return body, stack.Peek().vs, errs
+}
+
+func rewriteDeclaredVarsInBody(g *localVarGenerator, stack *localDeclaredVars, used VarSet, body Body, errs Errors, strict bool) (Body, Errors) {
+ var cpy Body
+
+ for i := range body {
+ var expr *Expr
+ switch {
+ case body[i].IsAssignment():
+ stack.assignment = true
+ expr, errs = rewriteDeclaredAssignment(g, stack, body[i], errs, strict)
+ case body[i].IsSome():
+ expr, errs = rewriteSomeDeclStatement(g, stack, body[i], errs, strict)
+ case body[i].IsEvery():
+ expr, errs = rewriteEveryStatement(g, stack, body[i], errs, strict)
+ default:
+ expr, errs = rewriteDeclaredVarsInExpr(g, stack, body[i], errs, strict)
+ }
+ if expr != nil {
+ cpy.Append(expr)
+ }
+ }
+
+ // If the body only contained a var statement it will be empty at this
+ // point. Append true to the body to ensure that it's non-empty (zero length
+ // bodies are not supported.)
+ if len(cpy) == 0 {
+ cpy.Append(NewExpr(BooleanTerm(true)))
+ }
+
+ errs = checkUnusedAssignedVars(body, stack, used, errs, strict)
+ return cpy, checkUnusedDeclaredVars(body, stack, used, cpy, errs)
+}
+
+func checkUnusedAssignedVars(body Body, stack *localDeclaredVars, used VarSet, errs Errors, strict bool) Errors {
+ if !strict || len(errs) > 0 {
+ return errs
+ }
+
+ dvs := stack.Peek()
+
+ hasAssignedVars := false
+ for _, occ := range dvs.occurrence {
+ if occ == assignedVar {
+ hasAssignedVars = true
+ }
+ }
+ if !hasAssignedVars {
+ return errs
+ }
+
+ unused := NewVarSet()
+
+ for v, occ := range dvs.occurrence {
+ // A var that was assigned in this scope must have been seen (used) more than once (the time of assignment) in
+ // the same, or nested, scope to be counted as used.
+ if !v.IsWildcard() && stack.Count(v) <= 1 && occ == assignedVar {
+ unused.Add(dvs.vs[v])
+ }
+ }
+
+ rewrittenUsed := NewVarSet()
+ for v := range used {
+ if gv, ok := stack.Declared(v); ok {
+ rewrittenUsed.Add(gv)
+ } else {
+ rewrittenUsed.Add(v)
+ }
+ }
+
+ unused = unused.Diff(rewrittenUsed)
+ if len(unused) == 0 {
+ return errs
+ }
+
+ reversed := make(map[Var]Var, len(dvs.vs))
+ for k, v := range dvs.vs {
+ reversed[v] = k
+ }
+
+ for _, gv := range unused.Sorted() {
+ found := false
+ for i := range body {
+ if body[i].Vars(VarVisitorParams{}).Contains(gv) {
+ errs = append(errs, NewError(CompileErr, body[i].Loc(), "assigned var %v unused", reversed[gv]))
+ found = true
+ break
+ }
+ }
+ if !found {
+ errs = append(errs, NewError(CompileErr, body[0].Loc(), "assigned var %v unused", reversed[gv]))
+ }
+ }
+
+ return errs
+}
+
+func checkUnusedDeclaredVars(body Body, stack *localDeclaredVars, used VarSet, cpy Body, errs Errors) Errors {
+
+ // NOTE(tsandall): Do not generate more errors if there are existing
+ // declaration errors.
+ if len(errs) > 0 {
+ return errs
+ }
+
+ dvs := stack.Peek()
+
+ hasDeclaredVars := false
+ for _, occ := range dvs.occurrence {
+ if occ == declaredVar {
+ hasDeclaredVars = true
+ }
+ }
+ if !hasDeclaredVars {
+ return errs
+ }
+
+ declared := NewVarSet()
+
+ for v, occ := range dvs.occurrence {
+ if occ == declaredVar {
+ declared.Add(dvs.vs[v])
+ }
+ }
+
+ bodyvars := cpy.Vars(VarVisitorParams{})
+
+ for v := range used {
+ if gv, ok := stack.Declared(v); ok {
+ bodyvars.Add(gv)
+ } else {
+ bodyvars.Add(v)
+ }
+ }
+
+ dbv := declared.Diff(bodyvars)
+ if dbv.DiffCount(used) == 0 {
+ return errs
+ }
+
+ reversed := make(map[Var]Var, len(dvs.vs))
+ for k, v := range dvs.vs {
+ reversed[v] = k
+ }
+
+ for _, gv := range dbv.Diff(used).Sorted() {
+ rv := reversed[gv]
+ if !rv.IsGenerated() {
+ // Scan through body exprs, looking for a match between the
+ // bad var's original name, and each expr's declared vars.
+ foundUnusedVarByName := false
+ for i := range body {
+ varsDeclaredInExpr := declaredVars(body[i])
+ if varsDeclaredInExpr.Contains(rv) {
+ // TODO(philipc): Clean up the offset logic here when the parser
+ // reports more accurate locations.
+ errs = append(errs, NewError(CompileErr, body[i].Loc(), "declared var %v unused", rv))
+ foundUnusedVarByName = true
+ break
+ }
+ }
+ // Default error location returned.
+ if !foundUnusedVarByName {
+ errs = append(errs, NewError(CompileErr, body[0].Loc(), "declared var %v unused", rv))
+ }
+ }
+ }
+
+ return errs
+}
+
+func rewriteEveryStatement(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) {
+ e := expr.Copy()
+ every := e.Terms.(*Every)
+
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, every.Domain, errs, strict)
+
+ stack.Push()
+ defer stack.Pop()
+
+ // if the key exists, rewrite
+ if every.Key != nil {
+ if v := every.Key.Value.(Var); !v.IsWildcard() {
+ gv, err := rewriteDeclaredVar(g, stack, v, declaredVar)
+ if err != nil {
+ return nil, append(errs, NewError(CompileErr, every.Loc(), "%s", err.Error()))
+ }
+ every.Key.Value = gv
+ }
+ } else { // if the key doesn't exist, add dummy local
+ every.Key = NewTerm(g.Generate())
+ }
+
+ // value is always present
+ if v := every.Value.Value.(Var); !v.IsWildcard() {
+ gv, err := rewriteDeclaredVar(g, stack, v, declaredVar)
+ if err != nil {
+ return nil, append(errs, NewError(CompileErr, every.Loc(), "%s", err.Error()))
+ }
+ every.Value.Value = gv
+ }
+
+ used := NewVarSet()
+ every.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, every.Body, errs, strict)
+
+ return rewriteDeclaredVarsInExpr(g, stack, e, errs, strict)
+}
+
+func rewriteSomeDeclStatement(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) {
+ e := expr.Copy()
+ decl := e.Terms.(*SomeDecl)
+ for i := range decl.Symbols {
+ switch v := decl.Symbols[i].Value.(type) {
+ case Var:
+ if _, err := rewriteDeclaredVar(g, stack, v, declaredVar); err != nil {
+ return nil, append(errs, NewError(CompileErr, decl.Loc(), "%s", err.Error()))
+ }
+ case Call:
+ var key, val, container *Term
+ switch len(v) {
+ case 4: // member3
+ key = v[1]
+ val = v[2]
+ container = v[3]
+ case 3: // member
+ key = NewTerm(g.Generate())
+ val = v[1]
+ container = v[2]
+ }
+
+ var rhs *Term
+ switch c := container.Value.(type) {
+ case Ref:
+ rhs = RefTerm(append(c, key)...)
+ default:
+ rhs = RefTerm(container, key)
+ }
+ e.Terms = []*Term{
+ RefTerm(VarTerm(Equality.Name)), val, rhs,
+ }
+
+ output := VarSet{}
+
+ for _, v0 := range outputVarsForExprEq(e, container.Vars(), output).Sorted() {
+ if _, err := rewriteDeclaredVar(g, stack, v0, declaredVar); err != nil {
+ return nil, append(errs, NewError(CompileErr, decl.Loc(), "%s", err.Error()))
+ }
+ }
+ return rewriteDeclaredVarsInExpr(g, stack, e, errs, strict)
+ }
+ }
+ return nil, errs
+}
+
+func rewriteDeclaredVarsInExpr(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) {
+ vis := NewGenericVisitor(func(x any) bool {
+ var stop bool
+ switch x := x.(type) {
+ case *Term:
+ stop, errs = rewriteDeclaredVarsInTerm(g, stack, x, errs, strict)
+ case *With:
+ stop, errs = true, rewriteDeclaredVarsInWithRecursive(g, stack, x, errs, strict)
+ }
+ return stop
+ })
+ vis.Walk(expr)
+ return expr, errs
+}
+
+func rewriteDeclaredAssignment(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) {
+
+ if expr.Negated {
+ errs = append(errs, NewError(CompileErr, expr.Location, "cannot assign vars inside negated expression"))
+ return expr, errs
+ }
+
+ numErrsBefore := len(errs)
+
+ if !validEqAssignArgCount(expr) {
+ return expr, errs
+ }
+
+ // Rewrite terms on right hand side capture seen vars and recursively
+ // process comprehensions before left hand side is processed. Also
+ // rewrite with modifier.
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, expr.Operand(1), errs, strict)
+
+ for _, w := range expr.With {
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, w.Value, errs, strict)
+ }
+
+ // Rewrite vars on left hand side with unique names. Catch redeclaration
+ // and invalid term types here.
+ var vis func(t *Term) bool
+
+ vis = func(t *Term) bool {
+ switch v := t.Value.(type) {
+ case Var:
+ if gv, err := rewriteDeclaredVar(g, stack, v, assignedVar); err != nil {
+ errs = append(errs, NewError(CompileErr, t.Location, "%s", err.Error()))
+ } else {
+ t.Value = gv
+ }
+ return true
+ case *Array:
+ return false
+ case *object:
+ v.Foreach(func(_, v *Term) {
+ WalkTerms(v, vis)
+ })
+ return true
+ case Ref:
+ if RootDocumentRefs.Contains(t) {
+ if gv, err := rewriteDeclaredVar(g, stack, v[0].Value.(Var), assignedVar); err != nil {
+ errs = append(errs, NewError(CompileErr, t.Location, "%s", err.Error()))
+ } else {
+ t.Value = gv
+ }
+ return true
+ }
+ }
+ errs = append(errs, NewError(CompileErr, t.Location, "cannot assign to %v", ValueName(t.Value)))
+ return true
+ }
+
+ WalkTerms(expr.Operand(0), vis)
+
+ if len(errs) == numErrsBefore {
+ loc := expr.Operator()[0].Location
+ expr.SetOperator(RefTerm(VarTerm(Equality.Name).SetLocation(loc)).SetLocation(loc))
+ }
+
+ return expr, errs
+}
+
+func rewriteDeclaredVarsInTerm(g *localVarGenerator, stack *localDeclaredVars, term *Term, errs Errors, strict bool) (bool, Errors) {
+ switch v := term.Value.(type) {
+ case Var:
+ if gv, ok := stack.Declared(v); ok {
+ term.Value = gv
+ stack.Seen(v)
+ } else if stack.Occurrence(v) == newVar {
+ stack.Insert(v, v, seenVar)
+ }
+ case Ref:
+ if RootDocumentRefs.Contains(term) {
+ x := v[0].Value.(Var)
+ if occ, ok := stack.GlobalOccurrence(x); ok && occ != seenVar {
+ gv, _ := stack.Declared(x)
+ term.Value = gv
+ }
+
+ return true, errs
+ }
+ return false, errs
+ case Call:
+ ref := v[0]
+ WalkVars(ref, func(v Var) bool {
+ if gv, ok := stack.Declared(v); ok && !gv.Equal(v) {
+ // We will rewrite the ref of a function call, which is never ok since we don't have first-class functions.
+ errs = append(errs, NewError(CompileErr, term.Location, "called function %s shadowed", ref))
+ return true
+ }
+ return false
+ })
+ return false, errs
+ case *object:
+ cpy, _ := v.Map(func(k, v *Term) (*Term, *Term, error) {
+ kcpy := k.Copy()
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, kcpy, errs, strict)
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, v, errs, strict)
+ return kcpy, v, nil
+ })
+ term.Value = cpy
+ case Set:
+ cpy, _ := v.Map(func(elem *Term) (*Term, error) {
+ elemcpy := elem.Copy()
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, elemcpy, errs, strict)
+ return elemcpy, nil
+ })
+ term.Value = cpy
+ case *ArrayComprehension:
+ errs = rewriteDeclaredVarsInArrayComprehension(g, stack, v, errs, strict)
+ case *SetComprehension:
+ errs = rewriteDeclaredVarsInSetComprehension(g, stack, v, errs, strict)
+ case *ObjectComprehension:
+ errs = rewriteDeclaredVarsInObjectComprehension(g, stack, v, errs, strict)
+ default:
+ return false, errs
+ }
+ return true, errs
+}
+
+func rewriteDeclaredVarsInTermRecursive(g *localVarGenerator, stack *localDeclaredVars, term *Term, errs Errors, strict bool) Errors {
+ WalkTerms(term, func(t *Term) bool {
+ var stop bool
+ stop, errs = rewriteDeclaredVarsInTerm(g, stack, t, errs, strict)
+ return stop
+ })
+ return errs
+}
+
+func rewriteDeclaredVarsInWithRecursive(g *localVarGenerator, stack *localDeclaredVars, w *With, errs Errors, strict bool) Errors {
+ // NOTE(sr): `with input as` and `with input.a.b.c as` are deliberately skipped here: `input` could
+ // have been shadowed by a local variable/argument but should NOT be replaced in the `with` target.
+ //
+ // We cannot drop `input` from the stack since it's conceivable to do `with input[input] as` where
+ // the second input is meant to be the local var. It's a terrible idea, but when you're shadowing
+ // `input` those might be your thing.
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, w.Target, errs, strict)
+ if sdwInput, ok := stack.Declared(InputRootDocument.Value.(Var)); ok { // Was "input" shadowed...
+ switch value := w.Target.Value.(type) {
+ case Var:
+ if sdwInput.Equal(value) { // ...and replaced? If so, fix it
+ w.Target.Value = InputRootRef
+ }
+ case Ref:
+ if sdwInput.Equal(value[0].Value.(Var)) {
+ w.Target.Value.(Ref)[0].Value = InputRootDocument.Value
+ }
+ }
+ }
+ // No special handling of the `with` value
+ return rewriteDeclaredVarsInTermRecursive(g, stack, w.Value, errs, strict)
+}
+
+func rewriteDeclaredVarsInArrayComprehension(g *localVarGenerator, stack *localDeclaredVars, v *ArrayComprehension, errs Errors, strict bool) Errors {
+ used := NewVarSet()
+ used.Update(v.Term.Vars())
+
+ stack.Push()
+ v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict)
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Term, errs, strict)
+ stack.Pop()
+ return errs
+}
+
+func rewriteDeclaredVarsInSetComprehension(g *localVarGenerator, stack *localDeclaredVars, v *SetComprehension, errs Errors, strict bool) Errors {
+ used := NewVarSet()
+ used.Update(v.Term.Vars())
+
+ stack.Push()
+ v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict)
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Term, errs, strict)
+ stack.Pop()
+ return errs
+}
+
+func rewriteDeclaredVarsInObjectComprehension(g *localVarGenerator, stack *localDeclaredVars, v *ObjectComprehension, errs Errors, strict bool) Errors {
+ used := NewVarSet()
+ used.Update(v.Key.Vars())
+ used.Update(v.Value.Vars())
+
+ stack.Push()
+ v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict)
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Key, errs, strict)
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Value, errs, strict)
+ stack.Pop()
+ return errs
+}
+
+func rewriteDeclaredVar(g *localVarGenerator, stack *localDeclaredVars, v Var, occ varOccurrence) (gv Var, err error) {
+ switch stack.Occurrence(v) {
+ case seenVar:
+ return gv, fmt.Errorf("var %v referenced above", v)
+ case assignedVar:
+ return gv, fmt.Errorf("var %v assigned above", v)
+ case declaredVar:
+ return gv, fmt.Errorf("var %v declared above", v)
+ case argVar:
+ return gv, fmt.Errorf("arg %v redeclared", v)
+ }
+ gv = g.Generate()
+ stack.Insert(v, gv, occ)
+ return
+}
+
+// rewriteWithModifiersInBody will rewrite the body so that with modifiers do
+// not contain terms that require evaluation as values. If this function
+// encounters an invalid with modifier target then it will raise an error.
+func rewriteWithModifiersInBody(c *Compiler, unsafeBuiltinsMap map[string]struct{}, f *equalityFactory, body Body) (Body, *Error) {
+ var result Body
+ for i := range body {
+ exprs, err := rewriteWithModifier(c, unsafeBuiltinsMap, f, body[i])
+ if err != nil {
+ return nil, err
+ }
+ if len(exprs) > 0 {
+ for _, expr := range exprs {
+ result.Append(expr)
+ }
+ } else {
+ result.Append(body[i])
+ }
+ }
+ return result, nil
+}
+
+func rewriteWithModifier(c *Compiler, unsafeBuiltinsMap map[string]struct{}, f *equalityFactory, expr *Expr) ([]*Expr, *Error) {
+
+ var result []*Expr
+ for i := range expr.With {
+ eval, err := validateWith(c, unsafeBuiltinsMap, expr, i)
+ if err != nil {
+ return nil, err
+ }
+
+ if eval {
+ eq := f.Generate(expr.With[i].Value)
+ result = append(result, eq)
+ expr.With[i].Value = eq.Operand(0)
+ }
+ }
+
+ return append(result, expr), nil
+}
+
+func validateWith(c *Compiler, unsafeBuiltinsMap map[string]struct{}, expr *Expr, i int) (bool, *Error) {
+ target, value := expr.With[i].Target, expr.With[i].Value
+
+ // Ensure that values that are built-ins are rewritten to Ref (not Var)
+ if v, ok := value.Value.(Var); ok {
+ if _, ok := c.builtins[v.String()]; ok {
+ value.Value = Ref([]*Term{NewTerm(v)})
+ }
+ }
+ isBuiltinRefOrVar, err := isBuiltinRefOrVar(c.builtins, unsafeBuiltinsMap, target)
+ if err != nil {
+ return false, err
+ }
+
+ isAllowedUnknownFuncCall := false
+ if c.allowUndefinedFuncCalls {
+ switch target.Value.(type) {
+ case Ref, Var:
+ isAllowedUnknownFuncCall = true
+ }
+ }
+
+ switch {
+ case isDataRef(target):
+ ref := target.Value.(Ref)
+ targetNode := c.RuleTree
+ for i := range len(ref) - 1 {
+ child := targetNode.Child(ref[i].Value)
+ if child == nil {
+ break
+ } else if len(child.Values) > 0 {
+ return false, NewError(CompileErr, target.Loc(), "with keyword cannot partially replace virtual document(s)")
+ }
+ targetNode = child
+ }
+
+ if targetNode != nil {
+ // NOTE(sr): at this point in the compiler stages, we don't have a fully-populated
+ // TypeEnv yet -- so we have to make do with this check to see if the replacement
+ // target is a function. It's probably wrong for arity-0 functions, but those are
+ // and edge case anyways.
+ if child := targetNode.Child(ref[len(ref)-1].Value); child != nil {
+ for _, v := range child.Values {
+ if len(v.(*Rule).Head.Args) > 0 {
+ if ok, err := validateWithFunctionValue(c.builtins, unsafeBuiltinsMap, c.RuleTree, value); err != nil || ok {
+ return false, err // err may be nil
+ }
+ }
+ }
+ }
+ }
+
+ // If the with-value is a ref to a function, but not a call, we can't rewrite it
+ if r, ok := value.Value.(Ref); ok {
+ // TODO: check that target ref doesn't exist?
+ if valueNode := c.RuleTree.Find(r); valueNode != nil {
+ for _, v := range valueNode.Values {
+ if len(v.(*Rule).Head.Args) > 0 {
+ return false, nil
+ }
+ }
+ }
+ }
+ case isInputRef(target): // ok, valid
+ case isBuiltinRefOrVar:
+
+ // NOTE(sr): first we ensure that parsed Var builtins (`count`, `concat`, etc)
+ // are rewritten to their proper Ref convention
+ if v, ok := target.Value.(Var); ok {
+ target.Value = Ref([]*Term{NewTerm(v)})
+ }
+
+ targetRef := target.Value.(Ref)
+ bi := c.builtins[targetRef.String()] // safe because isBuiltinRefOrVar checked this
+ if err := validateWithBuiltinTarget(bi, targetRef, target.Loc()); err != nil {
+ return false, err
+ }
+
+ if ok, err := validateWithFunctionValue(c.builtins, unsafeBuiltinsMap, c.RuleTree, value); err != nil || ok {
+ return false, err // err may be nil
+ }
+ case isAllowedUnknownFuncCall:
+ // The target isn't a ref to the input doc, data doc, or a known built-in, but it might be a ref to an unknown built-in.
+ return false, nil
+ default:
+ return false, NewError(TypeErr, target.Location, "with keyword target must reference existing %v, %v, or a function", InputRootDocument, DefaultRootDocument)
+ }
+ return requiresEval(value), nil
+}
+
+func validateWithBuiltinTarget(bi *Builtin, target Ref, loc *location.Location) *Error {
+ switch bi.Name {
+ case Equality.Name,
+ RegoMetadataChain.Name,
+ RegoMetadataRule.Name:
+ return NewError(CompileErr, loc, "with keyword replacing built-in function: replacement of %q invalid", bi.Name)
+ }
+
+ switch {
+ case target.HasPrefix(Ref([]*Term{VarTerm("internal")})):
+ return NewError(CompileErr, loc, "with keyword replacing built-in function: replacement of internal function %q invalid", target)
+
+ case bi.Relation:
+ return NewError(CompileErr, loc, "with keyword replacing built-in function: target must not be a relation")
+
+ case bi.Decl.Result() == nil:
+ return NewError(CompileErr, loc, "with keyword replacing built-in function: target must not be a void function")
+ }
+ return nil
+}
+
+func validateWithFunctionValue(bs map[string]*Builtin, unsafeMap map[string]struct{}, ruleTree *TreeNode, value *Term) (bool, *Error) {
+ if v, ok := value.Value.(Ref); ok {
+ if ruleTree.Find(v) != nil { // ref exists in rule tree
+ return true, nil
+ }
+ }
+ return isBuiltinRefOrVar(bs, unsafeMap, value)
+}
+
+func isInputRef(term *Term) bool {
+ if ref, ok := term.Value.(Ref); ok {
+ if ref.HasPrefix(InputRootRef) {
+ return true
+ }
+ }
+ return false
+}
+
+func isDataRef(term *Term) bool {
+ if ref, ok := term.Value.(Ref); ok {
+ if ref.HasPrefix(DefaultRootRef) {
+ return true
+ }
+ }
+ return false
+}
+
+func isBuiltinRefOrVar(bs map[string]*Builtin, unsafeBuiltinsMap map[string]struct{}, term *Term) (bool, *Error) {
+ switch v := term.Value.(type) {
+ case Ref, Var:
+ if _, ok := unsafeBuiltinsMap[v.String()]; ok {
+ return false, NewError(CompileErr, term.Location, "with keyword replacing built-in function: target must not be unsafe: %q", v)
+ }
+ _, ok := bs[v.String()]
+ return ok, nil
+ }
+ return false, nil
+}
+
+func isVirtual(node *TreeNode, ref Ref) bool {
+ for i := range ref {
+ child := node.Child(ref[i].Value)
+ if child == nil {
+ return false
+ } else if len(child.Values) > 0 {
+ return true
+ }
+ node = child
+ }
+ return true
+}
+
+func safetyErrorSlice(unsafe unsafeVars, rewritten map[Var]Var) (result Errors) {
+ if len(unsafe) == 0 {
+ return
+ }
+
+ for _, pair := range unsafe.Vars() {
+ v := pair.Var
+ if w, ok := rewritten[v]; ok {
+ v = w
+ }
+ if !v.IsGenerated() {
+ if _, ok := allFutureKeywords[string(v)]; ok {
+ result = append(result, NewError(UnsafeVarErr, pair.Loc,
+ "var %[1]v is unsafe (hint: `import future.keywords.%[1]v` to import a future keyword)", v))
+ continue
+ }
+ result = append(result, NewError(UnsafeVarErr, pair.Loc, "var %v is unsafe", v))
+ }
+ }
+
+ if len(result) > 0 {
+ return
+ }
+
+ // If the expression contains unsafe generated variables, report which
+ // expressions are unsafe instead of the variables that are unsafe (since
+ // the latter are not meaningful to the user.)
+ pairs := unsafe.Slice()
+
+ slices.SortFunc(pairs, func(a, b unsafePair) int {
+ return a.Expr.Location.Compare(b.Expr.Location)
+ })
+
+ // Report at most one error per generated variable.
+ seen := NewVarSet()
+
+ for _, expr := range pairs {
+ before := len(seen)
+ for v := range expr.Vars {
+ if v.IsGenerated() {
+ seen.Add(v)
+ }
+ }
+ if len(seen) > before {
+ result = append(result, NewError(UnsafeVarErr, expr.Expr.Location, "expression is unsafe"))
+ }
+ }
+
+ return
+}
+
+func checkUnsafeBuiltins(unsafeBuiltinsMap map[string]struct{}, node any) Errors {
+ var errs Errors
+ WalkExprs(node, func(x *Expr) bool {
+ if x.IsCall() {
+ operator := x.Operator().String()
+ if _, ok := unsafeBuiltinsMap[operator]; ok {
+ errs = append(errs, NewError(TypeErr, x.Loc(), "unsafe built-in function calls in expression: %v", operator))
+ }
+ }
+ return false
+ })
+ return errs
+}
+
+func rewriteVarsInRef(vars ...map[Var]Var) varRewriter {
+ return func(node Ref) Ref {
+ i, _ := TransformVars(node, func(v Var) (Value, error) {
+ for _, m := range vars {
+ if u, ok := m[v]; ok {
+ return u, nil
+ }
+ }
+ return v, nil
+ })
+ return i.(Ref)
+ }
+}
+
+// NOTE(sr): This is duplicated with compile/compile.go; but moving it into another location
+// would cause a circular dependency -- the refSet definition needs ast.Ref. If we make it
+// public in the ast package, the compile package could take it from there, but it would also
+// increase our public interface. Let's reconsider if we need it in a third place.
+type refSet struct {
+ s []Ref
+}
+
+func newRefSet(x ...Ref) *refSet {
+ result := &refSet{}
+ for i := range x {
+ result.AddPrefix(x[i])
+ }
+ return result
+}
+
+// ContainsPrefix returns true if r is prefixed by any of the existing refs in the set.
+func (rs *refSet) ContainsPrefix(r Ref) bool {
+ return slices.ContainsFunc(rs.s, r.HasPrefix)
+}
+
+// AddPrefix inserts r into the set if r is not prefixed by any existing
+// refs in the set. If any existing refs are prefixed by r, those existing
+// refs are removed.
+func (rs *refSet) AddPrefix(r Ref) {
+ if rs.ContainsPrefix(r) {
+ return
+ }
+ cpy := []Ref{r}
+ for i := range rs.s {
+ if !rs.s[i].HasPrefix(r) {
+ cpy = append(cpy, rs.s[i])
+ }
+ }
+ rs.s = cpy
+}
+
+// Sorted returns a sorted slice of terms for refs in the set.
+func (rs *refSet) Sorted() []*Term {
+ terms := make([]*Term, len(rs.s))
+ for i := range rs.s {
+ terms[i] = NewTerm(rs.s[i])
+ }
+ slices.SortFunc(terms, TermValueCompare)
+ return terms
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/compilehelper.go b/vendor/github.com/open-policy-agent/opa/v1/ast/compilehelper.go
new file mode 100644
index 0000000000..7d81d45e6d
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/compilehelper.go
@@ -0,0 +1,62 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+// CompileModules takes a set of Rego modules represented as strings and
+// compiles them for evaluation. The keys of the map are used as filenames.
+func CompileModules(modules map[string]string) (*Compiler, error) {
+ return CompileModulesWithOpt(modules, CompileOpts{})
+}
+
+// CompileOpts defines a set of options for the compiler.
+type CompileOpts struct {
+ EnablePrintStatements bool
+ ParserOptions ParserOptions
+}
+
+// CompileModulesWithOpt takes a set of Rego modules represented as strings and
+// compiles them for evaluation. The keys of the map are used as filenames.
+func CompileModulesWithOpt(modules map[string]string, opts CompileOpts) (*Compiler, error) {
+
+ parsed := make(map[string]*Module, len(modules))
+
+ for f, module := range modules {
+ var pm *Module
+ var err error
+ if pm, err = ParseModuleWithOpts(f, module, opts.ParserOptions); err != nil {
+ return nil, err
+ }
+ parsed[f] = pm
+ }
+
+ compiler := NewCompiler().
+ WithDefaultRegoVersion(opts.ParserOptions.RegoVersion).
+ WithEnablePrintStatements(opts.EnablePrintStatements)
+ compiler.Compile(parsed)
+
+ if compiler.Failed() {
+ return nil, compiler.Errors
+ }
+
+ return compiler, nil
+}
+
+// MustCompileModules compiles a set of Rego modules represented as strings. If
+// the compilation process fails, this function panics.
+func MustCompileModules(modules map[string]string) *Compiler {
+ return MustCompileModulesWithOpts(modules, CompileOpts{})
+}
+
+// MustCompileModulesWithOpts compiles a set of Rego modules represented as strings. If
+// the compilation process fails, this function panics.
+func MustCompileModulesWithOpts(modules map[string]string, opts CompileOpts) *Compiler {
+
+ compiler, err := CompileModulesWithOpt(modules, opts)
+ if err != nil {
+ panic(err)
+ }
+
+ return compiler
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/compilemetrics.go b/vendor/github.com/open-policy-agent/opa/v1/ast/compilemetrics.go
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/ast/compilemetrics.go
rename to vendor/github.com/open-policy-agent/opa/v1/ast/compilemetrics.go
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/conflicts.go b/vendor/github.com/open-policy-agent/opa/v1/ast/conflicts.go
new file mode 100644
index 0000000000..685cc6b694
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/conflicts.go
@@ -0,0 +1,79 @@
+// Copyright 2019 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "slices"
+ "strings"
+)
+
+// CheckPathConflicts returns a set of errors indicating paths that
+// are in conflict with the result of the provided callable.
+func CheckPathConflicts(c *Compiler, exists func([]string) (bool, error)) Errors {
+ var errs Errors
+
+ root := c.RuleTree.Child(DefaultRootDocument.Value)
+ if root == nil {
+ return nil
+ }
+
+ if len(c.pathConflictCheckRoots) == 0 || slices.Contains(c.pathConflictCheckRoots, "") {
+ for _, child := range root.Children {
+ errs = append(errs, checkDocumentConflicts(child, exists, nil)...)
+ }
+ return errs
+ }
+
+ for _, rootPath := range c.pathConflictCheckRoots {
+ // traverse AST from `path` to go to the new root
+ paths := strings.Split(rootPath, "/")
+ node := root
+ for _, key := range paths {
+ node = node.Child(String(key))
+ if node == nil {
+ break
+ }
+ }
+
+ if node == nil {
+ // could not find the node from the AST (e.g. `path` is from a data file)
+ // then no conflict is possible
+ continue
+ }
+
+ for _, child := range node.Children {
+ errs = append(errs, checkDocumentConflicts(child, exists, paths)...)
+ }
+ }
+
+ return errs
+}
+
+func checkDocumentConflicts(node *TreeNode, exists func([]string) (bool, error), path []string) Errors {
+
+ switch key := node.Key.(type) {
+ case String:
+ path = append(path, string(key))
+ default: // other key types cannot conflict with data
+ return nil
+ }
+
+ if len(node.Values) > 0 {
+ s := strings.Join(path, "/")
+ if ok, err := exists(path); err != nil {
+ return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflict check for data path %v: %v", s, err.Error())}
+ } else if ok {
+ return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflicting rule for data path %v found", s)}
+ }
+ }
+
+ var errs Errors
+
+ for _, child := range node.Children {
+ errs = append(errs, checkDocumentConflicts(child, exists, path)...)
+ }
+
+ return errs
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/default_module_loader.go b/vendor/github.com/open-policy-agent/opa/v1/ast/default_module_loader.go
new file mode 100644
index 0000000000..528c253e16
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/default_module_loader.go
@@ -0,0 +1,14 @@
+// Copyright 2025 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+var defaultModuleLoader ModuleLoader
+
+// DefaultModuleLoader lets you inject an `ast.ModuleLoader` that will
+// always be used. If another one is provided with the ast package,
+// they will both be consulted to enrich the set of modules dynamically.
+func DefaultModuleLoader(ml ModuleLoader) {
+ defaultModuleLoader = ml
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/doc.go b/vendor/github.com/open-policy-agent/opa/v1/ast/doc.go
new file mode 100644
index 0000000000..62b04e301e
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/doc.go
@@ -0,0 +1,36 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package ast declares Rego syntax tree types and also includes a parser and compiler for preparing policies for execution in the policy engine.
+//
+// Rego policies are defined using a relatively small set of types: modules, package and import declarations, rules, expressions, and terms. At their core, policies consist of rules that are defined by one or more expressions over documents available to the policy engine. The expressions are defined by intrinsic values (terms) such as strings, objects, variables, etc.
+//
+// Rego policies are typically defined in text files and then parsed and compiled by the policy engine at runtime. The parsing stage takes the text or string representation of the policy and converts it into an abstract syntax tree (AST) that consists of the types mentioned above. The AST is organized as follows:
+//
+// Module
+// |
+// +--- Package (Reference)
+// |
+// +--- Imports
+// | |
+// | +--- Import (Term)
+// |
+// +--- Rules
+// |
+// +--- Rule
+// |
+// +--- Head
+// | |
+// | +--- Name (Variable)
+// | |
+// | +--- Key (Term)
+// | |
+// | +--- Value (Term)
+// |
+// +--- Body
+// |
+// +--- Expression (Term | Terms | Variable Declaration)
+//
+// At query time, the policy engine expects policies to have been compiled. The compilation stage takes one or more modules and compiles them into a format that the policy engine supports.
+package ast
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/env.go b/vendor/github.com/open-policy-agent/opa/v1/ast/env.go
new file mode 100644
index 0000000000..12d4be8918
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/env.go
@@ -0,0 +1,528 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/open-policy-agent/opa/v1/types"
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+// TypeEnv contains type info for static analysis such as type checking.
+type TypeEnv struct {
+ tree *typeTreeNode
+ next *TypeEnv
+ newChecker func() *typeChecker
+}
+
+// newTypeEnv returns an empty TypeEnv. The constructor is not exported because
+// type environments should only be created by the type checker.
+func newTypeEnv(f func() *typeChecker) *TypeEnv {
+ return &TypeEnv{
+ tree: newTypeTree(),
+ newChecker: f,
+ }
+}
+
+// Get returns the type of x.
+// Deprecated: Use GetByValue or GetByRef instead, as they are more efficient.
+func (env *TypeEnv) Get(x any) types.Type {
+ if term, ok := x.(*Term); ok {
+ x = term.Value
+ }
+
+ if v, ok := x.(Value); ok {
+ return env.GetByValue(v)
+ }
+
+ panic("unreachable")
+}
+
+// GetByValue returns the type of v.
+func (env *TypeEnv) GetByValue(v Value) types.Type {
+ switch x := v.(type) {
+
+ // Scalars.
+ case Null:
+ return types.Nl
+ case Boolean:
+ return types.B
+ case Number:
+ return types.N
+ case String:
+ return types.S
+
+ // Composites.
+ case *Array:
+ static := make([]types.Type, x.Len())
+ for i := range static {
+ tpe := env.GetByValue(x.Elem(i).Value)
+ static[i] = tpe
+ }
+
+ var dynamic types.Type
+ if len(static) == 0 {
+ dynamic = types.A
+ }
+
+ return types.NewArray(static, dynamic)
+
+ case *lazyObj:
+ return env.GetByValue(x.force())
+ case *object:
+ static := []*types.StaticProperty{}
+ var dynamic *types.DynamicProperty
+
+ x.Foreach(func(k, v *Term) {
+ if IsConstant(k.Value) {
+ kjson, err := JSON(k.Value)
+ if err == nil {
+ tpe := env.GetByValue(v.Value)
+ static = append(static, types.NewStaticProperty(kjson, tpe))
+ return
+ }
+ }
+ // Can't handle it as a static property, fallback to dynamic
+ typeK := env.GetByValue(k.Value)
+ typeV := env.GetByValue(v.Value)
+ dynamic = types.NewDynamicProperty(typeK, typeV)
+ })
+
+ if len(static) == 0 && dynamic == nil {
+ dynamic = types.NewDynamicProperty(types.A, types.A)
+ }
+
+ return types.NewObject(static, dynamic)
+
+ case Set:
+ var tpe types.Type
+ x.Foreach(func(elem *Term) {
+ tpe = types.Or(tpe, env.GetByValue(elem.Value))
+ })
+ if tpe == nil {
+ tpe = types.A
+ }
+ return types.NewSet(tpe)
+
+ // Comprehensions.
+ case *ArrayComprehension:
+ cpy, errs := env.newChecker().CheckBody(env, x.Body)
+ if len(errs) == 0 {
+ return types.NewArray(nil, cpy.GetByValue(x.Term.Value))
+ }
+ return nil
+ case *ObjectComprehension:
+ cpy, errs := env.newChecker().CheckBody(env, x.Body)
+ if len(errs) == 0 {
+ return types.NewObject(nil, types.NewDynamicProperty(cpy.GetByValue(x.Key.Value), cpy.GetByValue(x.Value.Value)))
+ }
+ return nil
+ case *SetComprehension:
+ cpy, errs := env.newChecker().CheckBody(env, x.Body)
+ if len(errs) == 0 {
+ return types.NewSet(cpy.GetByValue(x.Term.Value))
+ }
+ return nil
+
+ // Refs.
+ case Ref:
+ return env.GetByRef(x)
+
+ // Vars.
+ case Var:
+ if node := env.tree.Child(v); node != nil {
+ return node.Value()
+ }
+ if env.next != nil {
+ return env.next.GetByValue(v)
+ }
+ return nil
+
+ // Calls.
+ case Call:
+ return nil
+ }
+
+ return env.Get(v)
+}
+
+// GetByRef returns the type of the value referred to by ref.
+func (env *TypeEnv) GetByRef(ref Ref) types.Type {
+ node := env.tree.Child(ref[0].Value)
+ if node == nil {
+ return env.getRefFallback(ref)
+ }
+
+ return env.getRefRec(node, ref, ref[1:])
+}
+
+func (env *TypeEnv) getRefFallback(ref Ref) types.Type {
+
+ if env.next != nil {
+ return env.next.GetByRef(ref)
+ }
+
+ if RootDocumentNames.Contains(ref[0]) {
+ return types.A
+ }
+
+ return nil
+}
+
+func (env *TypeEnv) getRefRec(node *typeTreeNode, ref, tail Ref) types.Type {
+ if len(tail) == 0 {
+ return env.getRefRecExtent(node)
+ }
+
+ if node.Leaf() {
+ if node.children.Len() > 0 {
+ if child := node.Child(tail[0].Value); child != nil {
+ return env.getRefRec(child, ref, tail[1:])
+ }
+ }
+ return selectRef(node.Value(), tail)
+ }
+
+ if !IsConstant(tail[0].Value) {
+ return selectRef(env.getRefRecExtent(node), tail)
+ }
+
+ child := node.Child(tail[0].Value)
+ if child == nil {
+ return env.getRefFallback(ref)
+ }
+
+ return env.getRefRec(child, ref, tail[1:])
+}
+
+func (env *TypeEnv) getRefRecExtent(node *typeTreeNode) types.Type {
+
+ if node.Leaf() {
+ return node.Value()
+ }
+
+ children := []*types.StaticProperty{}
+
+ node.Children().Iter(func(key Value, child *typeTreeNode) bool {
+ tpe := env.getRefRecExtent(child)
+
+ // NOTE(sr): Converting to Golang-native types here is an extension of what we did
+ // before -- only supporting strings. But since we cannot differentiate sets and arrays
+ // that way, we could reconsider.
+ switch key.(type) {
+ case String, Number, Boolean: // skip anything else
+ propKey, err := JSON(key)
+ if err != nil {
+ panic(fmt.Errorf("unreachable, ValueToInterface: %w", err))
+ }
+ children = append(children, types.NewStaticProperty(propKey, tpe))
+ }
+ return false
+ })
+
+ // TODO(tsandall): for now, these objects can have any dynamic properties
+ // because we don't have schema for base docs. Once schemas are supported
+ // we can improve this.
+ return types.NewObject(children, types.NewDynamicProperty(types.S, types.A))
+}
+
+func (env *TypeEnv) wrap() *TypeEnv {
+ cpy := *env
+ cpy.next = env
+ cpy.tree = newTypeTree()
+ return &cpy
+}
+
+// typeTreeNode is used to store type information in a tree.
+type typeTreeNode struct {
+ key Value
+ value types.Type
+ children *util.HasherMap[Value, *typeTreeNode]
+}
+
+func newTypeTree() *typeTreeNode {
+ return &typeTreeNode{
+ key: nil,
+ value: nil,
+ children: util.NewHasherMap[Value, *typeTreeNode](ValueEqual),
+ }
+}
+
+func (n *typeTreeNode) Child(key Value) *typeTreeNode {
+ value, ok := n.children.Get(key)
+ if !ok {
+ return nil
+ }
+ return value
+}
+
+func (n *typeTreeNode) Children() *util.HasherMap[Value, *typeTreeNode] {
+ return n.children
+}
+
+func (n *typeTreeNode) Get(path Ref) types.Type {
+ curr := n
+ for _, term := range path {
+ child, ok := curr.children.Get(term.Value)
+ if !ok {
+ return nil
+ }
+ curr = child
+ }
+ return curr.Value()
+}
+
+func (n *typeTreeNode) Leaf() bool {
+ return n.value != nil
+}
+
+func (n *typeTreeNode) PutOne(key Value, tpe types.Type) {
+ c, ok := n.children.Get(key)
+
+ var child *typeTreeNode
+ if !ok {
+ child = newTypeTree()
+ child.key = key
+ n.children.Put(key, child)
+ } else {
+ child = c
+ }
+
+ child.value = tpe
+}
+
+func (n *typeTreeNode) Put(path Ref, tpe types.Type) {
+ curr := n
+ for _, term := range path {
+ c, ok := curr.children.Get(term.Value)
+
+ var child *typeTreeNode
+ if !ok {
+ child = newTypeTree()
+ child.key = term.Value
+ curr.children.Put(child.key, child)
+ } else {
+ child = c
+ }
+
+ curr = child
+ }
+ curr.value = tpe
+}
+
+// Insert inserts tpe at path in the tree, but also merges the value into any types.Object present along that path.
+// If a types.Object is inserted, any leafs already present further down the tree are merged into the inserted object.
+// path must be ground.
+func (n *typeTreeNode) Insert(path Ref, tpe types.Type, env *TypeEnv) {
+ curr := n
+ for i, term := range path {
+ c, ok := curr.children.Get(term.Value)
+
+ var child *typeTreeNode
+ if !ok {
+ child = newTypeTree()
+ child.key = term.Value
+ curr.children.Put(child.key, child)
+ } else {
+ child = c
+ if child.value != nil && i+1 < len(path) {
+ // If child has an object value, merge the new value into it.
+ if o, ok := child.value.(*types.Object); ok {
+ var err error
+ child.value, err = insertIntoObject(o, path[i+1:], tpe, env)
+ if err != nil {
+ panic(fmt.Errorf("unreachable, insertIntoObject: %w", err))
+ }
+ }
+ }
+ }
+
+ curr = child
+ }
+
+ curr.value = mergeTypes(curr.value, tpe)
+
+ if _, ok := tpe.(*types.Object); ok && curr.children.Len() > 0 {
+ // merge all leafs into the inserted object
+ leafs := curr.Leafs()
+ for p, t := range leafs {
+ var err error
+ curr.value, err = insertIntoObject(curr.value.(*types.Object), *p, t, env)
+ if err != nil {
+ panic(fmt.Errorf("unreachable, insertIntoObject: %w", err))
+ }
+ }
+ }
+}
+
+// mergeTypes merges the types of 'a' and 'b'. If both are sets, their 'of' types are joined with an types.Or.
+// If both are objects, the key types of their dynamic properties are joined with types.Or:s, and their value types
+// are recursively merged (using mergeTypes).
+// If 'a' and 'b' are both objects, and at least one of them have static properties, they are joined
+// with an types.Or, instead of being merged.
+// If 'a' is an Any containing an Object, and 'b' is an Object (or vice versa); AND both objects have no
+// static properties, they are merged.
+// If 'a' and 'b' are different types, they are joined with an types.Or.
+func mergeTypes(a, b types.Type) types.Type {
+ if a == nil {
+ return b
+ }
+
+ if b == nil {
+ return a
+ }
+
+ switch a := a.(type) {
+ case *types.Object:
+ if bObj, ok := b.(*types.Object); ok && len(a.StaticProperties()) == 0 && len(bObj.StaticProperties()) == 0 {
+ if len(a.StaticProperties()) > 0 || len(bObj.StaticProperties()) > 0 {
+ return types.Or(a, bObj)
+ }
+
+ aDynProps := a.DynamicProperties()
+ bDynProps := bObj.DynamicProperties()
+ dynProps := types.NewDynamicProperty(
+ types.Or(aDynProps.Key, bDynProps.Key),
+ mergeTypes(aDynProps.Value, bDynProps.Value))
+ return types.NewObject(nil, dynProps)
+ } else if bAny, ok := b.(types.Any); ok && len(a.StaticProperties()) == 0 {
+ // If a is an object type with no static components ...
+ for _, t := range bAny {
+ if tObj, ok := t.(*types.Object); ok && len(tObj.StaticProperties()) == 0 {
+ // ... and b is a types.Any containing an object with no static components, we merge them.
+ aDynProps := a.DynamicProperties()
+ tDynProps := tObj.DynamicProperties()
+ tDynProps.Key = types.Or(tDynProps.Key, aDynProps.Key)
+ tDynProps.Value = types.Or(tDynProps.Value, aDynProps.Value)
+ return bAny
+ }
+ }
+ }
+ case *types.Set:
+ if bSet, ok := b.(*types.Set); ok {
+ return types.NewSet(types.Or(a.Of(), bSet.Of()))
+ }
+ case types.Any:
+ if _, ok := b.(types.Any); !ok {
+ return mergeTypes(b, a)
+ }
+ }
+
+ return types.Or(a, b)
+}
+
+func (n *typeTreeNode) String() string {
+ b := strings.Builder{}
+
+ if k := n.key; k != nil {
+ b.WriteString(k.String())
+ } else {
+ b.WriteString("-")
+ }
+
+ if v := n.value; v != nil {
+ b.WriteString(": ")
+ b.WriteString(v.String())
+ }
+
+ n.children.Iter(func(_ Value, child *typeTreeNode) bool {
+ b.WriteString("\n\t+ ")
+ s := child.String()
+ s = strings.ReplaceAll(s, "\n", "\n\t")
+ b.WriteString(s)
+
+ return false
+ })
+
+ return b.String()
+}
+
+func insertIntoObject(o *types.Object, path Ref, tpe types.Type, env *TypeEnv) (*types.Object, error) {
+ if len(path) == 0 {
+ return o, nil
+ }
+
+ key := env.GetByValue(path[0].Value)
+
+ if len(path) == 1 {
+ var dynamicProps *types.DynamicProperty
+ if dp := o.DynamicProperties(); dp != nil {
+ dynamicProps = types.NewDynamicProperty(types.Or(o.DynamicProperties().Key, key), types.Or(o.DynamicProperties().Value, tpe))
+ } else {
+ dynamicProps = types.NewDynamicProperty(key, tpe)
+ }
+ return types.NewObject(o.StaticProperties(), dynamicProps), nil
+ }
+
+ child, err := insertIntoObject(types.NewObject(nil, nil), path[1:], tpe, env)
+ if err != nil {
+ return nil, err
+ }
+
+ var dynamicProps *types.DynamicProperty
+ if dp := o.DynamicProperties(); dp != nil {
+ dynamicProps = types.NewDynamicProperty(types.Or(o.DynamicProperties().Key, key), types.Or(o.DynamicProperties().Value, child))
+ } else {
+ dynamicProps = types.NewDynamicProperty(key, child)
+ }
+ return types.NewObject(o.StaticProperties(), dynamicProps), nil
+}
+
+func (n *typeTreeNode) Leafs() map[*Ref]types.Type {
+ leafs := map[*Ref]types.Type{}
+ n.children.Iter(func(_ Value, v *typeTreeNode) bool {
+ collectLeafs(v, nil, leafs)
+ return false
+ })
+ return leafs
+}
+
+func collectLeafs(n *typeTreeNode, path Ref, leafs map[*Ref]types.Type) {
+ nPath := append(path, NewTerm(n.key))
+ if n.Leaf() {
+ leafs[&nPath] = n.Value()
+ return
+ }
+ n.children.Iter(func(_ Value, v *typeTreeNode) bool {
+ collectLeafs(v, nPath, leafs)
+ return false
+ })
+}
+
+func (n *typeTreeNode) Value() types.Type {
+ return n.value
+}
+
+// selectConstant returns the attribute of the type referred to by the term. If
+// the attribute type cannot be determined, nil is returned.
+func selectConstant(tpe types.Type, term *Term) types.Type {
+ x, err := JSON(term.Value)
+ if err == nil {
+ return types.Select(tpe, x)
+ }
+ return nil
+}
+
+// selectRef returns the type of the nested attribute referred to by ref. If
+// the attribute type cannot be determined, nil is returned. If the ref
+// contains vars or refs, then the returned type will be a union of the
+// possible types.
+func selectRef(tpe types.Type, ref Ref) types.Type {
+
+ if tpe == nil || len(ref) == 0 {
+ return tpe
+ }
+
+ head, tail := ref[0], ref[1:]
+
+ switch head.Value.(type) {
+ case Var, Ref, *Array, Object, Set:
+ return selectRef(types.Values(tpe), tail)
+ default:
+ return selectRef(selectConstant(tpe, head), tail)
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/errors.go b/vendor/github.com/open-policy-agent/opa/v1/ast/errors.go
new file mode 100644
index 0000000000..75160afc6e
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/errors.go
@@ -0,0 +1,124 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "fmt"
+ "slices"
+ "strconv"
+ "strings"
+)
+
+// Errors represents a series of errors encountered during parsing, compiling,
+// etc.
+type Errors []*Error
+
+func (e Errors) Error() string {
+
+ if len(e) == 0 {
+ return "no error(s)"
+ }
+
+ if len(e) == 1 {
+ return fmt.Sprintf("1 error occurred: %v", e[0].Error())
+ }
+
+ s := make([]string, len(e))
+ for i, err := range e {
+ s[i] = err.Error()
+ }
+
+ return fmt.Sprintf("%d errors occurred:\n%s", len(e), strings.Join(s, "\n"))
+}
+
+// Sort sorts the error slice by location. If the locations are equal then the
+// error message is compared.
+func (e Errors) Sort() {
+ slices.SortFunc(e, func(a, b *Error) int {
+ if cmp := a.Location.Compare(b.Location); cmp != 0 {
+ return cmp
+ }
+
+ return strings.Compare(a.Error(), b.Error())
+ })
+}
+
+const (
+ // ParseErr indicates an unclassified parse error occurred.
+ ParseErr = "rego_parse_error"
+
+ // CompileErr indicates an unclassified compile error occurred.
+ CompileErr = "rego_compile_error"
+
+ // TypeErr indicates a type error was caught.
+ TypeErr = "rego_type_error"
+
+ // UnsafeVarErr indicates an unsafe variable was found during compilation.
+ UnsafeVarErr = "rego_unsafe_var_error"
+
+ // RecursionErr indicates recursion was found during compilation.
+ RecursionErr = "rego_recursion_error"
+
+ // FormatErr indicates an error occurred during formatting.
+ FormatErr = "rego_format_error"
+)
+
+// IsError returns true if err is an AST error with code.
+func IsError(code string, err error) bool {
+ if err, ok := err.(*Error); ok {
+ return err.Code == code
+ }
+ return false
+}
+
+// ErrorDetails defines the interface for detailed error messages.
+type ErrorDetails interface {
+ Lines() []string
+}
+
+// Error represents a single error caught during parsing, compiling, etc.
+type Error struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+ Location *Location `json:"location,omitempty"`
+ Details ErrorDetails `json:"details,omitempty"`
+}
+
+func (e *Error) Error() string {
+
+ var prefix string
+
+ if e.Location != nil {
+
+ if len(e.Location.File) > 0 {
+ prefix += e.Location.File + ":" + strconv.Itoa(e.Location.Row)
+ } else {
+ prefix += strconv.Itoa(e.Location.Row) + ":" + strconv.Itoa(e.Location.Col)
+ }
+ }
+
+ msg := fmt.Sprintf("%v: %v", e.Code, e.Message)
+
+ if len(prefix) > 0 {
+ msg = prefix + ": " + msg
+ }
+
+ if e.Details != nil {
+ for _, line := range e.Details.Lines() {
+ msg += "\n\t" + line
+ }
+ }
+
+ return msg
+}
+
+// NewError returns a new Error object.
+func NewError(code string, loc *Location, f string, a ...any) *Error {
+ return &Error{
+ Code: code,
+ Location: loc,
+ Message: fmt.Sprintf(f, a...),
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/index.go b/vendor/github.com/open-policy-agent/opa/v1/ast/index.go
new file mode 100644
index 0000000000..845447b6dc
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/index.go
@@ -0,0 +1,980 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "fmt"
+ "slices"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+// RuleIndex defines the interface for rule indices.
+type RuleIndex interface {
+
+ // Build tries to construct an index for the given rules. If the index was
+ // constructed, it returns true, otherwise false.
+ Build(rules []*Rule) bool
+
+ // Lookup searches the index for rules that will match the provided
+ // resolver. If the resolver returns an error, it is returned via err.
+ Lookup(resolver ValueResolver) (*IndexResult, error)
+
+ // AllRules traverses the index and returns all rules that will match
+ // the provided resolver without any optimizations (effectively with
+ // indexing disabled). If the resolver returns an error, it is returned
+ // via err.
+ AllRules(resolver ValueResolver) (*IndexResult, error)
+}
+
+// IndexResult contains the result of an index lookup.
+type IndexResult struct {
+ Rules []*Rule
+ Else map[*Rule][]*Rule
+ Default *Rule
+ Kind RuleKind
+ EarlyExit bool
+ OnlyGroundRefs bool
+}
+
+// NewIndexResult returns a new IndexResult object.
+func NewIndexResult(kind RuleKind) *IndexResult {
+ return &IndexResult{
+ Kind: kind,
+ }
+}
+
+// Empty returns true if there are no rules to evaluate.
+func (ir *IndexResult) Empty() bool {
+ return len(ir.Rules) == 0 && ir.Default == nil
+}
+
+type baseDocEqIndex struct {
+ isVirtual func(Ref) bool
+ root *trieNode
+ defaultRule *Rule
+ kind RuleKind
+ onlyGroundRefs bool
+}
+
+var (
+ equalityRef = Equality.Ref()
+ equalRef = Equal.Ref()
+ globMatchRef = GlobMatch.Ref()
+ internalPrintRef = InternalPrint.Ref()
+ internalTestCaseRef = InternalTestCase.Ref()
+
+ skipIndexing = NewSet(NewTerm(internalPrintRef), NewTerm(internalTestCaseRef))
+)
+
+func newBaseDocEqIndex(isVirtual func(Ref) bool) *baseDocEqIndex {
+ return &baseDocEqIndex{
+ isVirtual: isVirtual,
+ root: newTrieNodeImpl(),
+ onlyGroundRefs: true,
+ }
+}
+
+func (i *baseDocEqIndex) Build(rules []*Rule) bool {
+ if len(rules) == 0 {
+ return false
+ }
+
+ i.kind = rules[0].Head.RuleKind()
+ indices := newrefindices(i.isVirtual)
+
+ // build indices for each rule.
+ for idx := range rules {
+ WalkRules(rules[idx], func(rule *Rule) bool {
+ if rule.Default {
+ i.defaultRule = rule
+ return false
+ }
+ if i.onlyGroundRefs {
+ i.onlyGroundRefs = rule.Head.Reference.IsGround()
+ }
+ var skip bool
+ for i := range rule.Body {
+ if op := rule.Body[i].OperatorTerm(); op != nil && skipIndexing.Contains(op) {
+ skip = true
+ break
+ }
+ }
+ if !skip {
+ for i := range rule.Body {
+ indices.Update(rule, rule.Body[i])
+ }
+ }
+ return false
+ })
+ }
+
+ // build trie out of indices.
+ for idx := range rules {
+ var prio int
+ WalkRules(rules[idx], func(rule *Rule) bool {
+ if rule.Default {
+ return false
+ }
+ node := i.root
+ if indices.Indexed(rule) {
+ for _, ref := range indices.Sorted() {
+ node = node.Insert(ref, indices.Value(rule, ref), indices.Mapper(rule, ref))
+ }
+ }
+ // Insert rule into trie with (insertion order, priority order)
+ // tuple. Retaining the insertion order allows us to return rules
+ // in the order they were passed to this function.
+ node.append([...]int{idx, prio}, rule)
+ prio++
+ return false
+ })
+ }
+ return true
+}
+
+func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) {
+ tr := ttrPool.Get().(*trieTraversalResult)
+
+ defer func() {
+ clear(tr.unordered)
+ tr.ordering = tr.ordering[:0]
+ tr.multiple = false
+ tr.exist = nil
+
+ ttrPool.Put(tr)
+ }()
+
+ err := i.root.Traverse(resolver, tr)
+ if err != nil {
+ return nil, err
+ }
+
+ result := IndexResultPool.Get()
+
+ result.Kind = i.kind
+ result.Default = i.defaultRule
+ result.OnlyGroundRefs = i.onlyGroundRefs
+
+ if result.Rules == nil {
+ result.Rules = make([]*Rule, 0, len(tr.ordering))
+ } else {
+ result.Rules = result.Rules[:0]
+ }
+
+ clear(result.Else)
+
+ for _, pos := range tr.ordering {
+ slices.SortFunc(tr.unordered[pos], func(a, b *ruleNode) int {
+ return a.prio[1] - b.prio[1]
+ })
+ nodes := tr.unordered[pos]
+ root := nodes[0].rule
+
+ result.Rules = append(result.Rules, root)
+ if len(nodes) > 1 {
+ if result.Else == nil {
+ result.Else = map[*Rule][]*Rule{}
+ }
+
+ result.Else[root] = make([]*Rule, len(nodes)-1)
+ for i := 1; i < len(nodes); i++ {
+ result.Else[root][i-1] = nodes[i].rule
+ }
+ }
+ }
+
+ if !tr.multiple {
+ // even when the indexer hasn't seen multiple values, the rule itself could be one
+ // where early exit shouldn't be applied.
+ var lastValue Value
+ for i := range result.Rules {
+ if result.Rules[i].Head.DocKind() != CompleteDoc {
+ tr.multiple = true
+ break
+ }
+ if result.Rules[i].Head.Value != nil {
+ if lastValue != nil && !ValueEqual(lastValue, result.Rules[i].Head.Value.Value) {
+ tr.multiple = true
+ break
+ }
+ lastValue = result.Rules[i].Head.Value.Value
+ }
+ }
+ }
+
+ result.EarlyExit = !tr.multiple
+
+ return result, nil
+}
+
+func (i *baseDocEqIndex) AllRules(ValueResolver) (*IndexResult, error) {
+ tr := newTrieTraversalResult()
+
+ // Walk over the rule trie and accumulate _all_ rules
+ rw := &ruleWalker{result: tr}
+ i.root.Do(rw)
+
+ result := NewIndexResult(i.kind)
+ result.Default = i.defaultRule
+ result.OnlyGroundRefs = i.onlyGroundRefs
+ result.Rules = make([]*Rule, 0, len(tr.ordering))
+
+ for _, pos := range tr.ordering {
+ slices.SortFunc(tr.unordered[pos], func(a, b *ruleNode) int {
+ return a.prio[1] - b.prio[1]
+ })
+ nodes := tr.unordered[pos]
+ root := nodes[0].rule
+ result.Rules = append(result.Rules, root)
+ if len(nodes) > 1 {
+ if result.Else == nil {
+ result.Else = map[*Rule][]*Rule{}
+ }
+
+ result.Else[root] = make([]*Rule, len(nodes)-1)
+ for i := 1; i < len(nodes); i++ {
+ result.Else[root][i-1] = nodes[i].rule
+ }
+ }
+ }
+
+ result.EarlyExit = !tr.multiple
+
+ return result, nil
+}
+
+type ruleWalker struct {
+ result *trieTraversalResult
+}
+
+func (r *ruleWalker) Do(x any) trieWalker {
+ tn := x.(*trieNode)
+ r.result.Add(tn)
+ return r
+}
+
+type valueMapper struct {
+ Key string
+ MapValue func(Value) Value
+}
+
+type refindex struct {
+ Ref Ref
+ Value Value
+ Mapper *valueMapper
+}
+
+type refindices struct {
+ isVirtual func(Ref) bool
+ rules map[*Rule][]*refindex
+ frequency *util.HasherMap[Ref, int]
+ sorted []Ref
+}
+
+func newrefindices(isVirtual func(Ref) bool) *refindices {
+ return &refindices{
+ isVirtual: isVirtual,
+ rules: map[*Rule][]*refindex{},
+ frequency: util.NewHasherMap[Ref, int](RefEqual),
+ }
+}
+
+// anyValue is a fake variable we used to put "naked ref" expressions
+// into the rule index
+var anyValue = Var("__any__")
+
+// Update attempts to update the refindices for the given expression in the
+// given rule. If the expression cannot be indexed the update does not affect
+// the indices.
+func (i *refindices) Update(rule *Rule, expr *Expr) {
+
+ if len(expr.With) > 0 {
+ // NOTE(tsandall): In the future, we may need to consider expressions
+ // that have with statements applied to them.
+ return
+ }
+
+ if expr.Negated {
+ // NOTE(sr): We could try to cover simple expressions, like
+ // not input.funky => input.funky == false or undefined (two refindex?)
+ return
+ }
+
+ op := expr.Operator()
+ if op == nil {
+ if ts, ok := expr.Terms.(*Term); ok {
+ // NOTE(sr): If we wanted to cover function args, we'd need to also
+ // check for type "Var" here. But since it's impossible to call a
+ // function with a undefined argument, there's no point to recording
+ // "needs to be anything" for function args
+ if ref, ok := ts.Value.(Ref); ok { // "naked ref"
+ i.updateEq(rule, ref, anyValue)
+ }
+ }
+ }
+
+ a, b := expr.Operand(0), expr.Operand(1)
+ switch {
+ case op.Equal(equalityRef):
+ i.updateEq(rule, a.Value, b.Value)
+
+ case op.Equal(equalRef) && len(expr.Operands()) == 2:
+ // NOTE(tsandall): if equal() is called with more than two arguments the
+ // output value is being captured in which case the indexer cannot
+ // exclude the rule if the equal() call would return false (because the
+ // false value must still be produced.)
+ i.updateEq(rule, a.Value, b.Value)
+
+ case op.Equal(globMatchRef) && len(expr.Operands()) == 3:
+ // NOTE(sr): Same as with equal() above -- 4 operands means the output
+ // of `glob.match` is captured and the rule can thus not be excluded.
+ i.updateGlobMatch(rule, expr)
+ }
+}
+
+// Sorted returns a sorted list of references that the indices were built from.
+// References that appear more frequently in the indexed rules are ordered
+// before less frequently appearing references.
+func (i *refindices) Sorted() []Ref {
+
+ if i.sorted == nil {
+ counts := make([]int, 0, i.frequency.Len())
+ i.sorted = make([]Ref, 0, i.frequency.Len())
+
+ i.frequency.Iter(func(k Ref, v int) bool {
+ counts = append(counts, v)
+ i.sorted = append(i.sorted, k)
+ return false
+ })
+
+ sort.Slice(i.sorted, func(a, b int) bool {
+ if counts[a] > counts[b] {
+ return true
+ } else if counts[b] > counts[a] {
+ return false
+ }
+ return i.sorted[a][0].Loc().Compare(i.sorted[b][0].Loc()) < 0
+ })
+ }
+
+ return i.sorted
+}
+
+func (i *refindices) Indexed(rule *Rule) bool {
+ return len(i.rules[rule]) > 0
+}
+
+func (i *refindices) Value(rule *Rule, ref Ref) Value {
+ if index := i.index(rule, ref); index != nil {
+ return index.Value
+ }
+ return nil
+}
+
+func (i *refindices) Mapper(rule *Rule, ref Ref) *valueMapper {
+ if index := i.index(rule, ref); index != nil {
+ return index.Mapper
+ }
+ return nil
+}
+
+func (i *refindices) updateEq(rule *Rule, a, b Value) {
+ args := rule.Head.Args
+ if idx, ok := eqOperandsToRefAndValue(i.isVirtual, args, a, b); ok {
+ i.insert(rule, idx)
+ return
+ }
+ if idx, ok := eqOperandsToRefAndValue(i.isVirtual, args, b, a); ok {
+ i.insert(rule, idx)
+ return
+ }
+}
+
+func (i *refindices) updateGlobMatch(rule *Rule, expr *Expr) {
+ args := rule.Head.Args
+
+ delim, ok := globDelimiterToString(expr.Operand(1))
+ if !ok {
+ return
+ }
+
+ if arr := globPatternToArray(expr.Operand(0), delim); arr != nil {
+ // The 3rd operand of glob.match is the value to match. We assume the
+ // 3rd operand was a reference that has been rewritten and bound to a
+ // variable earlier in the query OR a function argument variable.
+ match := expr.Operand(2)
+ if _, ok := match.Value.(Var); ok {
+ var ref Ref
+ for _, other := range i.rules[rule] {
+ if _, ok := other.Value.(Var); ok && other.Value.Compare(match.Value) == 0 {
+ ref = other.Ref
+ }
+ }
+ if ref == nil {
+ for j, arg := range args {
+ if arg.Equal(match) {
+ ref = Ref{FunctionArgRootDocument, InternedTerm(j)}
+ }
+ }
+ }
+ if ref != nil {
+ i.insert(rule, &refindex{
+ Ref: ref,
+ Value: arr.Value,
+ Mapper: &valueMapper{
+ Key: delim,
+ MapValue: func(v Value) Value {
+ if s, ok := v.(String); ok {
+ return stringSliceToArray(splitStringEscaped(string(s), delim))
+ }
+ return v
+ },
+ },
+ })
+ }
+ }
+ }
+}
+
+func (i *refindices) insert(rule *Rule, index *refindex) {
+ count, _ := i.frequency.Get(index.Ref)
+ i.frequency.Put(index.Ref, count+1)
+
+ for pos, other := range i.rules[rule] {
+ if other.Ref.Equal(index.Ref) {
+ i.rules[rule][pos] = index
+ return
+ }
+ }
+
+ i.rules[rule] = append(i.rules[rule], index)
+}
+
+func (i *refindices) index(rule *Rule, ref Ref) *refindex {
+ for _, index := range i.rules[rule] {
+ if index.Ref.Equal(ref) {
+ return index
+ }
+ }
+ return nil
+}
+
+type trieWalker interface {
+ Do(any) trieWalker
+}
+
+type trieTraversalResult struct {
+ unordered map[int][]*ruleNode
+ ordering []int
+ exist *Term
+ multiple bool
+}
+
+var ttrPool = sync.Pool{
+ New: func() any {
+ return newTrieTraversalResult()
+ },
+}
+
+func newTrieTraversalResult() *trieTraversalResult {
+ return &trieTraversalResult{
+ unordered: map[int][]*ruleNode{},
+ }
+}
+
+func (tr *trieTraversalResult) Add(t *trieNode) {
+ for _, node := range t.rules {
+ root := node.prio[0]
+ nodes, ok := tr.unordered[root]
+ if !ok {
+ tr.ordering = append(tr.ordering, root)
+ }
+ tr.unordered[root] = append(nodes, node)
+ }
+ if t.multiple {
+ tr.multiple = true
+ }
+ if tr.multiple || t.value == nil {
+ return
+ }
+ if t.value.IsGround() && tr.exist == nil || tr.exist.Equal(t.value) {
+ tr.exist = t.value
+ return
+ }
+ tr.multiple = true
+}
+
+type trieNode struct {
+ ref Ref
+ mappers []*valueMapper
+ next *trieNode
+ any *trieNode
+ undefined *trieNode
+ scalars *util.HasherMap[Value, *trieNode]
+ array *trieNode
+ rules []*ruleNode
+ value *Term
+ multiple bool
+}
+
+func (node *trieNode) String() string {
+ var flags []string
+ flags = append(flags, fmt.Sprintf("self:%p", node))
+ if len(node.ref) > 0 {
+ flags = append(flags, node.ref.String())
+ }
+ if node.next != nil {
+ flags = append(flags, fmt.Sprintf("next:%p", node.next))
+ }
+ if node.any != nil {
+ flags = append(flags, fmt.Sprintf("any:%p", node.any))
+ }
+ if node.undefined != nil {
+ flags = append(flags, fmt.Sprintf("undefined:%p", node.undefined))
+ }
+ if node.array != nil {
+ flags = append(flags, fmt.Sprintf("array:%p", node.array))
+ }
+ if node.scalars.Len() > 0 {
+ buf := make([]string, 0, node.scalars.Len())
+ node.scalars.Iter(func(key Value, val *trieNode) bool {
+ buf = append(buf, fmt.Sprintf("scalar(%v):%p", key, val))
+ return false
+ })
+ sort.Strings(buf)
+ flags = append(flags, strings.Join(buf, " "))
+ }
+ if len(node.rules) > 0 {
+ flags = append(flags, fmt.Sprintf("%d rule(s)", len(node.rules)))
+ }
+ if len(node.mappers) > 0 {
+ flags = append(flags, fmt.Sprintf("%d mapper(s)", len(node.mappers)))
+ }
+ if node.value != nil {
+ flags = append(flags, "value exists")
+ }
+ return strings.Join(flags, " ")
+}
+
+func (node *trieNode) append(prio [2]int, rule *Rule) {
+ node.rules = append(node.rules, &ruleNode{prio, rule})
+
+ if node.value != nil && rule.Head.Value != nil && !node.value.Equal(rule.Head.Value) {
+ node.multiple = true
+ }
+
+ if node.value == nil && rule.Head.DocKind() == CompleteDoc {
+ node.value = rule.Head.Value
+ }
+}
+
+type ruleNode struct {
+ prio [2]int
+ rule *Rule
+}
+
+func newTrieNodeImpl() *trieNode {
+ return &trieNode{
+ scalars: util.NewHasherMap[Value, *trieNode](ValueEqual),
+ }
+}
+
+func (node *trieNode) Do(walker trieWalker) {
+ next := walker.Do(node)
+ if next == nil {
+ return
+ }
+ if node.any != nil {
+ node.any.Do(next)
+ }
+ if node.undefined != nil {
+ node.undefined.Do(next)
+ }
+
+ node.scalars.Iter(func(_ Value, child *trieNode) bool {
+ child.Do(next)
+ return false
+ })
+
+ if node.array != nil {
+ node.array.Do(next)
+ }
+ if node.next != nil {
+ node.next.Do(next)
+ }
+}
+
+func (node *trieNode) Insert(ref Ref, value Value, mapper *valueMapper) *trieNode {
+
+ if node.next == nil {
+ node.next = newTrieNodeImpl()
+ node.next.ref = ref
+ }
+
+ if mapper != nil {
+ node.next.addMapper(mapper)
+ }
+
+ return node.next.insertValue(value)
+}
+
+func (node *trieNode) Traverse(resolver ValueResolver, tr *trieTraversalResult) error {
+
+ if node == nil {
+ return nil
+ }
+
+ tr.Add(node)
+
+ return node.next.traverse(resolver, tr)
+}
+
+func (node *trieNode) addMapper(mapper *valueMapper) {
+ for i := range node.mappers {
+ if node.mappers[i].Key == mapper.Key {
+ return
+ }
+ }
+ node.mappers = append(node.mappers, mapper)
+}
+
+func (node *trieNode) insertValue(value Value) *trieNode {
+
+ switch value := value.(type) {
+ case nil:
+ if node.undefined == nil {
+ node.undefined = newTrieNodeImpl()
+ }
+ return node.undefined
+ case Var:
+ if node.any == nil {
+ node.any = newTrieNodeImpl()
+ }
+ return node.any
+ case Null, Boolean, Number, String:
+ child, ok := node.scalars.Get(value)
+ if !ok {
+ child = newTrieNodeImpl()
+ node.scalars.Put(value, child)
+ }
+ return child
+ case *Array:
+ if node.array == nil {
+ node.array = newTrieNodeImpl()
+ }
+ return node.array.insertArray(value)
+ }
+
+ panic("illegal value")
+}
+
+func (node *trieNode) insertArray(arr *Array) *trieNode {
+
+ if arr.Len() == 0 {
+ return node
+ }
+
+ switch head := arr.Elem(0).Value.(type) {
+ case Var:
+ if node.any == nil {
+ node.any = newTrieNodeImpl()
+ }
+ return node.any.insertArray(arr.Slice(1, -1))
+ case Null, Boolean, Number, String:
+ child, ok := node.scalars.Get(head)
+ if !ok {
+ child = newTrieNodeImpl()
+ node.scalars.Put(head, child)
+ }
+ return child.insertArray(arr.Slice(1, -1))
+ }
+
+ panic("illegal value")
+}
+
+func (node *trieNode) traverse(resolver ValueResolver, tr *trieTraversalResult) error {
+
+ if node == nil {
+ return nil
+ }
+
+ v, err := resolver.Resolve(node.ref)
+ if err != nil {
+ if IsUnknownValueErr(err) {
+ return node.traverseUnknown(resolver, tr)
+ }
+ return err
+ }
+
+ if node.undefined != nil {
+ err = node.undefined.Traverse(resolver, tr)
+ if err != nil {
+ return err
+ }
+ }
+
+ if v == nil {
+ return nil
+ }
+
+ if node.any != nil {
+ err = node.any.Traverse(resolver, tr)
+ if err != nil {
+ return err
+ }
+ }
+
+ if err := node.traverseValue(resolver, tr, v); err != nil {
+ return err
+ }
+
+ for i := range node.mappers {
+ if err := node.traverseValue(resolver, tr, node.mappers[i].MapValue(v)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (node *trieNode) traverseValue(resolver ValueResolver, tr *trieTraversalResult, value Value) error {
+
+ switch value := value.(type) {
+ case *Array:
+ if node.array == nil {
+ return nil
+ }
+ return node.array.traverseArray(resolver, tr, value)
+
+ case Null, Boolean, Number, String:
+ child, ok := node.scalars.Get(value)
+ if !ok {
+ return nil
+ }
+ return child.Traverse(resolver, tr)
+ }
+
+ return nil
+}
+
+func (node *trieNode) traverseArray(resolver ValueResolver, tr *trieTraversalResult, arr *Array) error {
+
+ if arr.Len() == 0 {
+ return node.Traverse(resolver, tr)
+ }
+
+ if node.any != nil {
+ err := node.any.traverseArray(resolver, tr, arr.Slice(1, -1))
+ if err != nil {
+ return err
+ }
+ }
+
+ head := arr.Elem(0).Value
+
+ if !IsScalar(head) {
+ return nil
+ }
+
+ switch head := head.(type) {
+ case Null, Boolean, Number, String:
+ child, ok := node.scalars.Get(head)
+ if !ok {
+ return nil
+ }
+ return child.traverseArray(resolver, tr, arr.Slice(1, -1))
+ }
+
+ panic("illegal value")
+}
+
+func (node *trieNode) traverseUnknown(resolver ValueResolver, tr *trieTraversalResult) error {
+
+ if node == nil {
+ return nil
+ }
+
+ if err := node.Traverse(resolver, tr); err != nil {
+ return err
+ }
+
+ if err := node.undefined.traverseUnknown(resolver, tr); err != nil {
+ return err
+ }
+
+ if err := node.any.traverseUnknown(resolver, tr); err != nil {
+ return err
+ }
+
+ if err := node.array.traverseUnknown(resolver, tr); err != nil {
+ return err
+ }
+
+ var iterErr error
+ node.scalars.Iter(func(_ Value, child *trieNode) bool {
+ return child.traverseUnknown(resolver, tr) != nil
+ })
+
+ return iterErr
+}
+
+// If term `a` is one of the function's operands, we store a Ref: `args[0]`
+// for the argument number. So for `f(x, y) { x = 10; y = 12 }`, we'll
+// bind `args[0]` and `args[1]` to this rule when called for (x=10) and
+// (y=12) respectively.
+func eqOperandsToRefAndValue(isVirtual func(Ref) bool, args []*Term, a, b Value) (*refindex, bool) {
+ switch v := a.(type) {
+ case Var:
+ for i, arg := range args {
+ if arg.Value.Compare(a) == 0 {
+ if bval, ok := indexValue(b); ok {
+ return &refindex{Ref: Ref{FunctionArgRootDocument, InternedTerm(i)}, Value: bval}, true
+ }
+ }
+ }
+ case Ref:
+ if !RootDocumentNames.Contains(v[0]) {
+ return nil, false
+ }
+ if isVirtual(v) {
+ return nil, false
+ }
+ if v.IsNested() || !v.IsGround() {
+ return nil, false
+ }
+ if bval, ok := indexValue(b); ok {
+ return &refindex{Ref: v, Value: bval}, true
+ }
+ }
+ return nil, false
+}
+
+func indexValue(b Value) (Value, bool) {
+ switch b := b.(type) {
+ case Null, Boolean, Number, String, Var:
+ return b, true
+ case *Array:
+ stop := false
+ first := true
+ vis := NewGenericVisitor(func(x any) bool {
+ if first {
+ first = false
+ return false
+ }
+ switch x.(type) {
+ // No nested structures or values that require evaluation (other than var).
+ case *Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Ref:
+ stop = true
+ }
+ return stop
+ })
+ vis.Walk(b)
+ if !stop {
+ return b, true
+ }
+ }
+
+ return nil, false
+}
+
+func globDelimiterToString(delim *Term) (string, bool) {
+
+ arr, ok := delim.Value.(*Array)
+ if !ok {
+ return "", false
+ }
+
+ var result string
+
+ if arr.Len() == 0 {
+ result = "."
+ } else {
+ for i := range arr.Len() {
+ term := arr.Elem(i)
+ s, ok := term.Value.(String)
+ if !ok {
+ return "", false
+ }
+ result += string(s)
+ }
+ }
+
+ return result, true
+}
+
+var globwildcard = VarTerm("$globwildcard")
+
+func globPatternToArray(pattern *Term, delim string) *Term {
+
+ s, ok := pattern.Value.(String)
+ if !ok {
+ return nil
+ }
+
+ parts := splitStringEscaped(string(s), delim)
+ arr := make([]*Term, len(parts))
+
+ for i := range parts {
+ if parts[i] == "*" {
+ arr[i] = globwildcard
+ } else {
+ var escaped bool
+ for _, c := range parts[i] {
+ if c == '\\' {
+ escaped = !escaped
+ continue
+ }
+ if !escaped {
+ switch c {
+ case '[', '?', '{', '*':
+ // TODO(tsandall): super glob and character pattern
+ // matching not supported yet.
+ return nil
+ }
+ }
+ escaped = false
+ }
+ arr[i] = StringTerm(parts[i])
+ }
+ }
+
+ return ArrayTerm(arr...)
+}
+
+// splits s on characters in delim except if delim characters have been escaped
+// with reverse solidus.
+func splitStringEscaped(s string, delim string) []string {
+
+ var last, curr int
+ var escaped bool
+ var result []string
+
+ for ; curr < len(s); curr++ {
+ if s[curr] == '\\' || escaped {
+ escaped = !escaped
+ continue
+ }
+ if strings.ContainsRune(delim, rune(s[curr])) {
+ result = append(result, s[last:curr])
+ last = curr + 1
+ }
+ }
+
+ result = append(result, s[last:])
+
+ return result
+}
+
+func stringSliceToArray(s []string) *Array {
+ arr := make([]*Term, len(s))
+ for i, v := range s {
+ arr[i] = StringTerm(v)
+ }
+ return NewArray(arr...)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/internal/scanner/scanner.go b/vendor/github.com/open-policy-agent/opa/v1/ast/internal/scanner/scanner.go
similarity index 90%
rename from vendor/github.com/open-policy-agent/opa/ast/internal/scanner/scanner.go
rename to vendor/github.com/open-policy-agent/opa/v1/ast/internal/scanner/scanner.go
index a0200ac18d..3741d37188 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/internal/scanner/scanner.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/internal/scanner/scanner.go
@@ -10,7 +10,8 @@ import (
"unicode"
"unicode/utf8"
- "github.com/open-policy-agent/opa/ast/internal/tokens"
+ "github.com/open-policy-agent/opa/v1/ast/internal/tokens"
+ "github.com/open-policy-agent/opa/v1/util"
)
const bom = 0xFEFF
@@ -18,31 +19,31 @@ const bom = 0xFEFF
// Scanner is used to tokenize an input stream of
// Rego source code.
type Scanner struct {
+ keywords map[string]tokens.Token
+ bs []byte
+ errors []Error
+ tabs []int
offset int
row int
col int
- bs []byte
- curr rune
width int
- errors []Error
- keywords map[string]tokens.Token
- tabs []int
+ curr rune
regoV1Compatible bool
}
// Error represents a scanner error.
type Error struct {
- Pos Position
Message string
+ Pos Position
}
// Position represents a point in the scanned source code.
type Position struct {
+ Tabs []int // positions of any tabs preceding Col
Offset int // start offset in bytes
End int // end offset in bytes
Row int // line number computed in bytes
Col int // column number computed in bytes
- Tabs []int // positions of any tabs preceding Col
}
// New returns an initialized scanner that will scan
@@ -100,8 +101,8 @@ func (s *Scanner) Keyword(lit string) tokens.Token {
func (s *Scanner) AddKeyword(kw string, tok tokens.Token) {
s.keywords[kw] = tok
- switch tok {
- case tokens.Every: // importing 'every' means also importing 'in'
+ if tok == tokens.Every {
+ // importing 'every' means also importing 'in'
s.keywords["in"] = tokens.In
}
}
@@ -115,6 +116,11 @@ func (s *Scanner) HasKeyword(keywords map[string]tokens.Token) bool {
return false
}
+func (s *Scanner) IsKeyword(str string) bool {
+ _, ok := s.keywords[str]
+ return ok
+}
+
func (s *Scanner) SetRegoV1Compatible() {
s.regoV1Compatible = true
}
@@ -164,7 +170,21 @@ func (s *Scanner) Scan() (tokens.Token, Position, string, []Error) {
var lit string
if s.isWhitespace() {
- lit = string(s.curr)
+ // string(rune) is an unnecessary heap allocation in this case as we know all
+ // the possible whitespace values, and can simply translate to string ourselves
+ switch s.curr {
+ case ' ':
+ lit = " "
+ case '\t':
+ lit = "\t"
+ case '\n':
+ lit = "\n"
+ case '\r':
+ lit = "\r"
+ default:
+ // unreachable unless isWhitespace changes
+ lit = string(s.curr)
+ }
s.next()
tok = tokens.Whitespace
} else if isLetter(s.curr) {
@@ -270,7 +290,8 @@ func (s *Scanner) scanIdentifier() string {
for isLetter(s.curr) || isDigit(s.curr) {
s.next()
}
- return string(s.bs[start : s.offset-1])
+
+ return util.ByteSliceToString(s.bs[start : s.offset-1])
}
func (s *Scanner) scanNumber() string {
@@ -321,7 +342,7 @@ func (s *Scanner) scanNumber() string {
}
}
- return string(s.bs[start : s.offset-1])
+ return util.ByteSliceToString(s.bs[start : s.offset-1])
}
func (s *Scanner) scanString() string {
@@ -355,7 +376,7 @@ func (s *Scanner) scanString() string {
}
}
- return string(s.bs[start : s.offset-1])
+ return util.ByteSliceToString(s.bs[start : s.offset-1])
}
func (s *Scanner) scanRawString() string {
@@ -370,7 +391,8 @@ func (s *Scanner) scanRawString() string {
break
}
}
- return string(s.bs[start : s.offset-1])
+
+ return util.ByteSliceToString(s.bs[start : s.offset-1])
}
func (s *Scanner) scanComment() string {
@@ -381,9 +403,10 @@ func (s *Scanner) scanComment() string {
end := s.offset - 1
// Trim carriage returns that precede the newline
if s.offset > 1 && s.bs[s.offset-2] == '\r' {
- end = end - 1
+ end -= 1
}
- return string(s.bs[start:end])
+
+ return util.ByteSliceToString(s.bs[start:end])
}
func (s *Scanner) next() {
@@ -413,7 +436,7 @@ func (s *Scanner) next() {
if s.curr == '\n' {
s.row++
s.col = 0
- s.tabs = []int{}
+ s.tabs = s.tabs[:0]
} else {
s.col++
if s.curr == '\t' {
diff --git a/vendor/github.com/open-policy-agent/opa/ast/internal/tokens/tokens.go b/vendor/github.com/open-policy-agent/opa/v1/ast/internal/tokens/tokens.go
similarity index 93%
rename from vendor/github.com/open-policy-agent/opa/ast/internal/tokens/tokens.go
rename to vendor/github.com/open-policy-agent/opa/v1/ast/internal/tokens/tokens.go
index 623ed7ed21..4033ba81ae 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/internal/tokens/tokens.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/internal/tokens/tokens.go
@@ -4,12 +4,14 @@
package tokens
+import "maps"
+
// Token represents a single Rego source code token
// for use by the Parser.
-type Token int
+type Token uint8
func (t Token) String() string {
- if t < 0 || int(t) >= len(strings) {
+ if int(t) >= len(strings) {
return "unknown"
}
return strings[t]
@@ -137,11 +139,7 @@ var keywords = map[string]Token{
// Keywords returns a copy of the default string -> Token keyword map.
func Keywords() map[string]Token {
- cpy := make(map[string]Token, len(keywords))
- for k, v := range keywords {
- cpy[k] = v
- }
- return cpy
+ return maps.Clone(keywords)
}
// IsKeyword returns if a token is a keyword
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/interning.go b/vendor/github.com/open-policy-agent/opa/v1/ast/interning.go
new file mode 100644
index 0000000000..fc5a89f69a
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/interning.go
@@ -0,0 +1,1838 @@
+// Copyright 2024 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "strconv"
+)
+
+type internable interface {
+ bool | string | int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64
+}
+
+// NOTE! Great care must be taken **not** to modify the terms returned
+// from these functions, as they are shared across all callers.
+// This package is currently considered experimental, and may change
+// at any time without notice.
+
+var (
+ InternedNullValue Value = Null{}
+ InternedNullTerm = &Term{Value: InternedNullValue}
+
+ InternedBooleanTrueValue Value = Boolean(true)
+ InternedBooleanFalseValue Value = Boolean(false)
+ InternedBooleanTrueTerm = &Term{Value: InternedBooleanTrueValue}
+ InternedBooleanFalseTerm = &Term{Value: InternedBooleanFalseValue}
+
+ InternedEmptyString = StringTerm("")
+ InternedEmptyObject = ObjectTerm()
+ InternedEmptyArray = ArrayTerm()
+ InternedEmptySet = SetTerm()
+
+ InternedEmptyArrayValue = NewArray()
+
+ // since this is by far the most common negative number
+ minusOneValue Value = Number("-1")
+ minusOneTerm = &Term{Value: minusOneValue}
+
+ internedStringTerms = map[string]*Term{
+ "": InternedEmptyString,
+ }
+)
+
+// InternStringTerm interns the given strings as terms. Note that Interning is
+// considered experimental and should not be relied upon by external code.
+// WARNING: This must **only** be called at initialization time, as the
+// interned terms are shared globally, and the underlying map is not thread-safe.
+func InternStringTerm(str ...string) {
+ for _, s := range str {
+ if _, ok := internedStringTerms[s]; ok {
+ continue
+ }
+
+ internedStringTerms[s] = StringTerm(s)
+ }
+}
+
+// HasInternedValue returns true if the given value is interned, otherwise false.
+func HasInternedValue[T internable](v T) bool {
+ switch value := any(v).(type) {
+ case bool:
+ return true
+ case int:
+ return HasInternedIntNumberTerm(value)
+ case int8:
+ return HasInternedIntNumberTerm(int(value))
+ case int16:
+ return HasInternedIntNumberTerm(int(value))
+ case int32:
+ return HasInternedIntNumberTerm(int(value))
+ case int64:
+ return HasInternedIntNumberTerm(int(value))
+ case uint:
+ return HasInternedIntNumberTerm(int(value))
+ case uint8:
+ return HasInternedIntNumberTerm(int(value))
+ case uint16:
+ return HasInternedIntNumberTerm(int(value))
+ case uint32:
+ return HasInternedIntNumberTerm(int(value))
+ case uint64:
+ return HasInternedIntNumberTerm(int(value))
+ case string:
+ _, ok := internedStringTerms[value]
+ return ok
+ }
+ return false
+}
+
+// InternedValue returns an interned Value for scalar v, if the value is
+// interned. If the value is not interned, a new Value is returned.
+func InternedValue[T internable](v T) Value {
+ return InternedValueOr(v, internedTermValue)
+}
+
+// InternedValueOr returns an interned Value for scalar v. Calls supplier
+// to produce a Value if the value is not interned.
+func InternedValueOr[T internable](v T, supplier func(T) Value) Value {
+ switch value := any(v).(type) {
+ case bool:
+ return internedBooleanValue(value)
+ case int:
+ return internedIntNumberValue(value)
+ case int8:
+ return internedIntNumberValue(int(value))
+ case int16:
+ return internedIntNumberValue(int(value))
+ case int32:
+ return internedIntNumberValue(int(value))
+ case int64:
+ return internedIntNumberValue(int(value))
+ case uint:
+ return internedIntNumberValue(int(value))
+ case uint8:
+ return internedIntNumberValue(int(value))
+ case uint16:
+ return internedIntNumberValue(int(value))
+ case uint32:
+ return internedIntNumberValue(int(value))
+ case uint64:
+ return internedIntNumberValue(int(value))
+ }
+ return supplier(v)
+}
+
+// Interned returns a possibly interned term for the given scalar value.
+// If the value is not interned, a new term is created for that value.
+func InternedTerm[T internable](v T) *Term {
+ switch value := any(v).(type) {
+ case bool:
+ return internedBooleanTerm(value)
+ case string:
+ return internedStringTerm(value)
+ case int:
+ return internedIntNumberTerm(value)
+ case int8:
+ return internedIntNumberTerm(int(value))
+ case int16:
+ return internedIntNumberTerm(int(value))
+ case int32:
+ return internedIntNumberTerm(int(value))
+ case int64:
+ return internedIntNumberTerm(int(value))
+ case uint:
+ return internedIntNumberTerm(int(value))
+ case uint8:
+ return internedIntNumberTerm(int(value))
+ case uint16:
+ return internedIntNumberTerm(int(value))
+ case uint32:
+ return internedIntNumberTerm(int(value))
+ case uint64:
+ return internedIntNumberTerm(int(value))
+ default:
+ panic("unreachable")
+ }
+}
+
+// InternedIntFromString returns a term with the given integer value if the string
+// maps to an interned term. If the string does not map to an interned term, nil is
+// returned.
+func InternedIntNumberTermFromString(s string) *Term {
+ if term, ok := stringToIntNumberTermMap[s]; ok {
+ return term
+ }
+
+ return nil
+}
+
+// HasInternedIntNumberTerm returns true if the given integer value maps to an interned
+// term, otherwise false.
+func HasInternedIntNumberTerm(i int) bool {
+ return i >= -1 && i < len(intNumberTerms)
+}
+
+// Returns an interned string term representing the integer value i, if
+// interned. If not, creates a new StringTerm for the integer value.
+func InternedIntegerString(i int) *Term {
+ // Cheapest option - we don't need to call strconv.Itoa
+ if HasInternedIntNumberTerm(i) {
+ if interned, ok := internedStringTerms[IntNumberTerm(i).String()]; ok {
+ return interned
+ }
+ }
+
+ // Next cheapest option — the string could still be interned if the store
+ // has been extended with more terms than we cucrrently intern.
+ s := strconv.Itoa(i)
+ if interned, ok := internedStringTerms[s]; ok {
+ return interned
+ }
+
+ // Nope, create a new term
+ return StringTerm(s)
+}
+
+func internedBooleanValue(b bool) Value {
+ if b {
+ return InternedBooleanTrueValue
+ }
+
+ return InternedBooleanFalseValue
+}
+
+// InternedBooleanTerm returns an interned term with the given boolean value.
+func internedBooleanTerm(b bool) *Term {
+ if b {
+ return InternedBooleanTrueTerm
+ }
+
+ return InternedBooleanFalseTerm
+}
+
+func internedIntNumberValue(i int) Value {
+ if i >= 0 && i < len(intNumberTerms) {
+ return intNumberValues[i]
+ }
+
+ if i == -1 {
+ return minusOneValue
+ }
+
+ return Number(strconv.Itoa(i))
+}
+
+// InternedIntNumberTerm returns a term with the given integer value. The term is
+// cached between -1 to 512, and for values outside of that range, this function
+// is equivalent to IntNumberTerm.
+func internedIntNumberTerm(i int) *Term {
+ if i >= 0 && i < len(intNumberTerms) {
+ return intNumberTerms[i]
+ }
+
+ if i == -1 {
+ return minusOneTerm
+ }
+
+ return &Term{Value: Number(strconv.Itoa(i))}
+}
+
+// InternedStringTerm returns an interned term with the given string value. If the
+// provided string is not interned, a new term is created for that value. It does *not*
+// modify the global interned terms map.
+func internedStringTerm(s string) *Term {
+ if term, ok := internedStringTerms[s]; ok {
+ return term
+ }
+
+ return StringTerm(s)
+}
+
+func internedTermValue[T internable](v T) Value {
+ return InternedTerm(v).Value
+}
+
+func init() {
+ InternStringTerm(
+ // Numbers
+ "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10",
+ "11", "12", "13", "14", "15", "16", "17", "18", "19", "20",
+ "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38",
+ "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", "54", "55", "56",
+ "57", "58", "59", "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "70", "71", "72", "73", "74",
+ "75", "76", "77", "78", "79", "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "90", "91", "92",
+ "93", "94", "95", "96", "97", "98", "99", "100",
+ // Types
+ "null", "boolean", "number", "string", "array", "object", "set", "var", "ref", "true", "false",
+ // Runtime
+ "config", "env", "version", "commit", "authorization_enabled", "skip_known_schema_check",
+ // Annotations
+ "annotations", "scope", "title", "entrypoint", "description", "organizations", "authors", "related_resources",
+ "schemas", "custom", "name", "email", "schema", "definition", "document", "package", "rule", "subpackages",
+ // Debug
+ "text", "value", "bindings", "expressions",
+ // Various
+ "data", "input", "result", "keywords", "path", "v1", "error", "partial",
+ // HTTP
+ "code", "message", "status_code", "method", "url", "uri",
+ // JWT
+ "enc", "cty", "iss", "exp", "nbf", "aud", "secret", "cert",
+ // Decisions
+ "revision", "labels", "decision_id", "bundles", "query", "mapped_result", "nd_builtin_cache",
+ "erased", "masked", "requested_by", "timestamp", "metrics", "req_id",
+
+ // Whitespace
+ " ", "\n", "\t",
+ )
+}
+
+var stringToIntNumberTermMap = map[string]*Term{
+ "-1": minusOneTerm,
+ "0": intNumberTerms[0],
+ "1": intNumberTerms[1],
+ "2": intNumberTerms[2],
+ "3": intNumberTerms[3],
+ "4": intNumberTerms[4],
+ "5": intNumberTerms[5],
+ "6": intNumberTerms[6],
+ "7": intNumberTerms[7],
+ "8": intNumberTerms[8],
+ "9": intNumberTerms[9],
+ "10": intNumberTerms[10],
+ "11": intNumberTerms[11],
+ "12": intNumberTerms[12],
+ "13": intNumberTerms[13],
+ "14": intNumberTerms[14],
+ "15": intNumberTerms[15],
+ "16": intNumberTerms[16],
+ "17": intNumberTerms[17],
+ "18": intNumberTerms[18],
+ "19": intNumberTerms[19],
+ "20": intNumberTerms[20],
+ "21": intNumberTerms[21],
+ "22": intNumberTerms[22],
+ "23": intNumberTerms[23],
+ "24": intNumberTerms[24],
+ "25": intNumberTerms[25],
+ "26": intNumberTerms[26],
+ "27": intNumberTerms[27],
+ "28": intNumberTerms[28],
+ "29": intNumberTerms[29],
+ "30": intNumberTerms[30],
+ "31": intNumberTerms[31],
+ "32": intNumberTerms[32],
+ "33": intNumberTerms[33],
+ "34": intNumberTerms[34],
+ "35": intNumberTerms[35],
+ "36": intNumberTerms[36],
+ "37": intNumberTerms[37],
+ "38": intNumberTerms[38],
+ "39": intNumberTerms[39],
+ "40": intNumberTerms[40],
+ "41": intNumberTerms[41],
+ "42": intNumberTerms[42],
+ "43": intNumberTerms[43],
+ "44": intNumberTerms[44],
+ "45": intNumberTerms[45],
+ "46": intNumberTerms[46],
+ "47": intNumberTerms[47],
+ "48": intNumberTerms[48],
+ "49": intNumberTerms[49],
+ "50": intNumberTerms[50],
+ "51": intNumberTerms[51],
+ "52": intNumberTerms[52],
+ "53": intNumberTerms[53],
+ "54": intNumberTerms[54],
+ "55": intNumberTerms[55],
+ "56": intNumberTerms[56],
+ "57": intNumberTerms[57],
+ "58": intNumberTerms[58],
+ "59": intNumberTerms[59],
+ "60": intNumberTerms[60],
+ "61": intNumberTerms[61],
+ "62": intNumberTerms[62],
+ "63": intNumberTerms[63],
+ "64": intNumberTerms[64],
+ "65": intNumberTerms[65],
+ "66": intNumberTerms[66],
+ "67": intNumberTerms[67],
+ "68": intNumberTerms[68],
+ "69": intNumberTerms[69],
+ "70": intNumberTerms[70],
+ "71": intNumberTerms[71],
+ "72": intNumberTerms[72],
+ "73": intNumberTerms[73],
+ "74": intNumberTerms[74],
+ "75": intNumberTerms[75],
+ "76": intNumberTerms[76],
+ "77": intNumberTerms[77],
+ "78": intNumberTerms[78],
+ "79": intNumberTerms[79],
+ "80": intNumberTerms[80],
+ "81": intNumberTerms[81],
+ "82": intNumberTerms[82],
+ "83": intNumberTerms[83],
+ "84": intNumberTerms[84],
+ "85": intNumberTerms[85],
+ "86": intNumberTerms[86],
+ "87": intNumberTerms[87],
+ "88": intNumberTerms[88],
+ "89": intNumberTerms[89],
+ "90": intNumberTerms[90],
+ "91": intNumberTerms[91],
+ "92": intNumberTerms[92],
+ "93": intNumberTerms[93],
+ "94": intNumberTerms[94],
+ "95": intNumberTerms[95],
+ "96": intNumberTerms[96],
+ "97": intNumberTerms[97],
+ "98": intNumberTerms[98],
+ "99": intNumberTerms[99],
+ "100": intNumberTerms[100],
+ "101": intNumberTerms[101],
+ "102": intNumberTerms[102],
+ "103": intNumberTerms[103],
+ "104": intNumberTerms[104],
+ "105": intNumberTerms[105],
+ "106": intNumberTerms[106],
+ "107": intNumberTerms[107],
+ "108": intNumberTerms[108],
+ "109": intNumberTerms[109],
+ "110": intNumberTerms[110],
+ "111": intNumberTerms[111],
+ "112": intNumberTerms[112],
+ "113": intNumberTerms[113],
+ "114": intNumberTerms[114],
+ "115": intNumberTerms[115],
+ "116": intNumberTerms[116],
+ "117": intNumberTerms[117],
+ "118": intNumberTerms[118],
+ "119": intNumberTerms[119],
+ "120": intNumberTerms[120],
+ "121": intNumberTerms[121],
+ "122": intNumberTerms[122],
+ "123": intNumberTerms[123],
+ "124": intNumberTerms[124],
+ "125": intNumberTerms[125],
+ "126": intNumberTerms[126],
+ "127": intNumberTerms[127],
+ "128": intNumberTerms[128],
+ "129": intNumberTerms[129],
+ "130": intNumberTerms[130],
+ "131": intNumberTerms[131],
+ "132": intNumberTerms[132],
+ "133": intNumberTerms[133],
+ "134": intNumberTerms[134],
+ "135": intNumberTerms[135],
+ "136": intNumberTerms[136],
+ "137": intNumberTerms[137],
+ "138": intNumberTerms[138],
+ "139": intNumberTerms[139],
+ "140": intNumberTerms[140],
+ "141": intNumberTerms[141],
+ "142": intNumberTerms[142],
+ "143": intNumberTerms[143],
+ "144": intNumberTerms[144],
+ "145": intNumberTerms[145],
+ "146": intNumberTerms[146],
+ "147": intNumberTerms[147],
+ "148": intNumberTerms[148],
+ "149": intNumberTerms[149],
+ "150": intNumberTerms[150],
+ "151": intNumberTerms[151],
+ "152": intNumberTerms[152],
+ "153": intNumberTerms[153],
+ "154": intNumberTerms[154],
+ "155": intNumberTerms[155],
+ "156": intNumberTerms[156],
+ "157": intNumberTerms[157],
+ "158": intNumberTerms[158],
+ "159": intNumberTerms[159],
+ "160": intNumberTerms[160],
+ "161": intNumberTerms[161],
+ "162": intNumberTerms[162],
+ "163": intNumberTerms[163],
+ "164": intNumberTerms[164],
+ "165": intNumberTerms[165],
+ "166": intNumberTerms[166],
+ "167": intNumberTerms[167],
+ "168": intNumberTerms[168],
+ "169": intNumberTerms[169],
+ "170": intNumberTerms[170],
+ "171": intNumberTerms[171],
+ "172": intNumberTerms[172],
+ "173": intNumberTerms[173],
+ "174": intNumberTerms[174],
+ "175": intNumberTerms[175],
+ "176": intNumberTerms[176],
+ "177": intNumberTerms[177],
+ "178": intNumberTerms[178],
+ "179": intNumberTerms[179],
+ "180": intNumberTerms[180],
+ "181": intNumberTerms[181],
+ "182": intNumberTerms[182],
+ "183": intNumberTerms[183],
+ "184": intNumberTerms[184],
+ "185": intNumberTerms[185],
+ "186": intNumberTerms[186],
+ "187": intNumberTerms[187],
+ "188": intNumberTerms[188],
+ "189": intNumberTerms[189],
+ "190": intNumberTerms[190],
+ "191": intNumberTerms[191],
+ "192": intNumberTerms[192],
+ "193": intNumberTerms[193],
+ "194": intNumberTerms[194],
+ "195": intNumberTerms[195],
+ "196": intNumberTerms[196],
+ "197": intNumberTerms[197],
+ "198": intNumberTerms[198],
+ "199": intNumberTerms[199],
+ "200": intNumberTerms[200],
+ "201": intNumberTerms[201],
+ "202": intNumberTerms[202],
+ "203": intNumberTerms[203],
+ "204": intNumberTerms[204],
+ "205": intNumberTerms[205],
+ "206": intNumberTerms[206],
+ "207": intNumberTerms[207],
+ "208": intNumberTerms[208],
+ "209": intNumberTerms[209],
+ "210": intNumberTerms[210],
+ "211": intNumberTerms[211],
+ "212": intNumberTerms[212],
+ "213": intNumberTerms[213],
+ "214": intNumberTerms[214],
+ "215": intNumberTerms[215],
+ "216": intNumberTerms[216],
+ "217": intNumberTerms[217],
+ "218": intNumberTerms[218],
+ "219": intNumberTerms[219],
+ "220": intNumberTerms[220],
+ "221": intNumberTerms[221],
+ "222": intNumberTerms[222],
+ "223": intNumberTerms[223],
+ "224": intNumberTerms[224],
+ "225": intNumberTerms[225],
+ "226": intNumberTerms[226],
+ "227": intNumberTerms[227],
+ "228": intNumberTerms[228],
+ "229": intNumberTerms[229],
+ "230": intNumberTerms[230],
+ "231": intNumberTerms[231],
+ "232": intNumberTerms[232],
+ "233": intNumberTerms[233],
+ "234": intNumberTerms[234],
+ "235": intNumberTerms[235],
+ "236": intNumberTerms[236],
+ "237": intNumberTerms[237],
+ "238": intNumberTerms[238],
+ "239": intNumberTerms[239],
+ "240": intNumberTerms[240],
+ "241": intNumberTerms[241],
+ "242": intNumberTerms[242],
+ "243": intNumberTerms[243],
+ "244": intNumberTerms[244],
+ "245": intNumberTerms[245],
+ "246": intNumberTerms[246],
+ "247": intNumberTerms[247],
+ "248": intNumberTerms[248],
+ "249": intNumberTerms[249],
+ "250": intNumberTerms[250],
+ "251": intNumberTerms[251],
+ "252": intNumberTerms[252],
+ "253": intNumberTerms[253],
+ "254": intNumberTerms[254],
+ "255": intNumberTerms[255],
+ "256": intNumberTerms[256],
+ "257": intNumberTerms[257],
+ "258": intNumberTerms[258],
+ "259": intNumberTerms[259],
+ "260": intNumberTerms[260],
+ "261": intNumberTerms[261],
+ "262": intNumberTerms[262],
+ "263": intNumberTerms[263],
+ "264": intNumberTerms[264],
+ "265": intNumberTerms[265],
+ "266": intNumberTerms[266],
+ "267": intNumberTerms[267],
+ "268": intNumberTerms[268],
+ "269": intNumberTerms[269],
+ "270": intNumberTerms[270],
+ "271": intNumberTerms[271],
+ "272": intNumberTerms[272],
+ "273": intNumberTerms[273],
+ "274": intNumberTerms[274],
+ "275": intNumberTerms[275],
+ "276": intNumberTerms[276],
+ "277": intNumberTerms[277],
+ "278": intNumberTerms[278],
+ "279": intNumberTerms[279],
+ "280": intNumberTerms[280],
+ "281": intNumberTerms[281],
+ "282": intNumberTerms[282],
+ "283": intNumberTerms[283],
+ "284": intNumberTerms[284],
+ "285": intNumberTerms[285],
+ "286": intNumberTerms[286],
+ "287": intNumberTerms[287],
+ "288": intNumberTerms[288],
+ "289": intNumberTerms[289],
+ "290": intNumberTerms[290],
+ "291": intNumberTerms[291],
+ "292": intNumberTerms[292],
+ "293": intNumberTerms[293],
+ "294": intNumberTerms[294],
+ "295": intNumberTerms[295],
+ "296": intNumberTerms[296],
+ "297": intNumberTerms[297],
+ "298": intNumberTerms[298],
+ "299": intNumberTerms[299],
+ "300": intNumberTerms[300],
+ "301": intNumberTerms[301],
+ "302": intNumberTerms[302],
+ "303": intNumberTerms[303],
+ "304": intNumberTerms[304],
+ "305": intNumberTerms[305],
+ "306": intNumberTerms[306],
+ "307": intNumberTerms[307],
+ "308": intNumberTerms[308],
+ "309": intNumberTerms[309],
+ "310": intNumberTerms[310],
+ "311": intNumberTerms[311],
+ "312": intNumberTerms[312],
+ "313": intNumberTerms[313],
+ "314": intNumberTerms[314],
+ "315": intNumberTerms[315],
+ "316": intNumberTerms[316],
+ "317": intNumberTerms[317],
+ "318": intNumberTerms[318],
+ "319": intNumberTerms[319],
+ "320": intNumberTerms[320],
+ "321": intNumberTerms[321],
+ "322": intNumberTerms[322],
+ "323": intNumberTerms[323],
+ "324": intNumberTerms[324],
+ "325": intNumberTerms[325],
+ "326": intNumberTerms[326],
+ "327": intNumberTerms[327],
+ "328": intNumberTerms[328],
+ "329": intNumberTerms[329],
+ "330": intNumberTerms[330],
+ "331": intNumberTerms[331],
+ "332": intNumberTerms[332],
+ "333": intNumberTerms[333],
+ "334": intNumberTerms[334],
+ "335": intNumberTerms[335],
+ "336": intNumberTerms[336],
+ "337": intNumberTerms[337],
+ "338": intNumberTerms[338],
+ "339": intNumberTerms[339],
+ "340": intNumberTerms[340],
+ "341": intNumberTerms[341],
+ "342": intNumberTerms[342],
+ "343": intNumberTerms[343],
+ "344": intNumberTerms[344],
+ "345": intNumberTerms[345],
+ "346": intNumberTerms[346],
+ "347": intNumberTerms[347],
+ "348": intNumberTerms[348],
+ "349": intNumberTerms[349],
+ "350": intNumberTerms[350],
+ "351": intNumberTerms[351],
+ "352": intNumberTerms[352],
+ "353": intNumberTerms[353],
+ "354": intNumberTerms[354],
+ "355": intNumberTerms[355],
+ "356": intNumberTerms[356],
+ "357": intNumberTerms[357],
+ "358": intNumberTerms[358],
+ "359": intNumberTerms[359],
+ "360": intNumberTerms[360],
+ "361": intNumberTerms[361],
+ "362": intNumberTerms[362],
+ "363": intNumberTerms[363],
+ "364": intNumberTerms[364],
+ "365": intNumberTerms[365],
+ "366": intNumberTerms[366],
+ "367": intNumberTerms[367],
+ "368": intNumberTerms[368],
+ "369": intNumberTerms[369],
+ "370": intNumberTerms[370],
+ "371": intNumberTerms[371],
+ "372": intNumberTerms[372],
+ "373": intNumberTerms[373],
+ "374": intNumberTerms[374],
+ "375": intNumberTerms[375],
+ "376": intNumberTerms[376],
+ "377": intNumberTerms[377],
+ "378": intNumberTerms[378],
+ "379": intNumberTerms[379],
+ "380": intNumberTerms[380],
+ "381": intNumberTerms[381],
+ "382": intNumberTerms[382],
+ "383": intNumberTerms[383],
+ "384": intNumberTerms[384],
+ "385": intNumberTerms[385],
+ "386": intNumberTerms[386],
+ "387": intNumberTerms[387],
+ "388": intNumberTerms[388],
+ "389": intNumberTerms[389],
+ "390": intNumberTerms[390],
+ "391": intNumberTerms[391],
+ "392": intNumberTerms[392],
+ "393": intNumberTerms[393],
+ "394": intNumberTerms[394],
+ "395": intNumberTerms[395],
+ "396": intNumberTerms[396],
+ "397": intNumberTerms[397],
+ "398": intNumberTerms[398],
+ "399": intNumberTerms[399],
+ "400": intNumberTerms[400],
+ "401": intNumberTerms[401],
+ "402": intNumberTerms[402],
+ "403": intNumberTerms[403],
+ "404": intNumberTerms[404],
+ "405": intNumberTerms[405],
+ "406": intNumberTerms[406],
+ "407": intNumberTerms[407],
+ "408": intNumberTerms[408],
+ "409": intNumberTerms[409],
+ "410": intNumberTerms[410],
+ "411": intNumberTerms[411],
+ "412": intNumberTerms[412],
+ "413": intNumberTerms[413],
+ "414": intNumberTerms[414],
+ "415": intNumberTerms[415],
+ "416": intNumberTerms[416],
+ "417": intNumberTerms[417],
+ "418": intNumberTerms[418],
+ "419": intNumberTerms[419],
+ "420": intNumberTerms[420],
+ "421": intNumberTerms[421],
+ "422": intNumberTerms[422],
+ "423": intNumberTerms[423],
+ "424": intNumberTerms[424],
+ "425": intNumberTerms[425],
+ "426": intNumberTerms[426],
+ "427": intNumberTerms[427],
+ "428": intNumberTerms[428],
+ "429": intNumberTerms[429],
+ "430": intNumberTerms[430],
+ "431": intNumberTerms[431],
+ "432": intNumberTerms[432],
+ "433": intNumberTerms[433],
+ "434": intNumberTerms[434],
+ "435": intNumberTerms[435],
+ "436": intNumberTerms[436],
+ "437": intNumberTerms[437],
+ "438": intNumberTerms[438],
+ "439": intNumberTerms[439],
+ "440": intNumberTerms[440],
+ "441": intNumberTerms[441],
+ "442": intNumberTerms[442],
+ "443": intNumberTerms[443],
+ "444": intNumberTerms[444],
+ "445": intNumberTerms[445],
+ "446": intNumberTerms[446],
+ "447": intNumberTerms[447],
+ "448": intNumberTerms[448],
+ "449": intNumberTerms[449],
+ "450": intNumberTerms[450],
+ "451": intNumberTerms[451],
+ "452": intNumberTerms[452],
+ "453": intNumberTerms[453],
+ "454": intNumberTerms[454],
+ "455": intNumberTerms[455],
+ "456": intNumberTerms[456],
+ "457": intNumberTerms[457],
+ "458": intNumberTerms[458],
+ "459": intNumberTerms[459],
+ "460": intNumberTerms[460],
+ "461": intNumberTerms[461],
+ "462": intNumberTerms[462],
+ "463": intNumberTerms[463],
+ "464": intNumberTerms[464],
+ "465": intNumberTerms[465],
+ "466": intNumberTerms[466],
+ "467": intNumberTerms[467],
+ "468": intNumberTerms[468],
+ "469": intNumberTerms[469],
+ "470": intNumberTerms[470],
+ "471": intNumberTerms[471],
+ "472": intNumberTerms[472],
+ "473": intNumberTerms[473],
+ "474": intNumberTerms[474],
+ "475": intNumberTerms[475],
+ "476": intNumberTerms[476],
+ "477": intNumberTerms[477],
+ "478": intNumberTerms[478],
+ "479": intNumberTerms[479],
+ "480": intNumberTerms[480],
+ "481": intNumberTerms[481],
+ "482": intNumberTerms[482],
+ "483": intNumberTerms[483],
+ "484": intNumberTerms[484],
+ "485": intNumberTerms[485],
+ "486": intNumberTerms[486],
+ "487": intNumberTerms[487],
+ "488": intNumberTerms[488],
+ "489": intNumberTerms[489],
+ "490": intNumberTerms[490],
+ "491": intNumberTerms[491],
+ "492": intNumberTerms[492],
+ "493": intNumberTerms[493],
+ "494": intNumberTerms[494],
+ "495": intNumberTerms[495],
+ "496": intNumberTerms[496],
+ "497": intNumberTerms[497],
+ "498": intNumberTerms[498],
+ "499": intNumberTerms[499],
+ "500": intNumberTerms[500],
+ "501": intNumberTerms[501],
+ "502": intNumberTerms[502],
+ "503": intNumberTerms[503],
+ "504": intNumberTerms[504],
+ "505": intNumberTerms[505],
+ "506": intNumberTerms[506],
+ "507": intNumberTerms[507],
+ "508": intNumberTerms[508],
+ "509": intNumberTerms[509],
+ "510": intNumberTerms[510],
+ "511": intNumberTerms[511],
+ "512": intNumberTerms[512],
+}
+
+var intNumberValues = [...]Value{
+ Number("0"),
+ Number("1"),
+ Number("2"),
+ Number("3"),
+ Number("4"),
+ Number("5"),
+ Number("6"),
+ Number("7"),
+ Number("8"),
+ Number("9"),
+ Number("10"),
+ Number("11"),
+ Number("12"),
+ Number("13"),
+ Number("14"),
+ Number("15"),
+ Number("16"),
+ Number("17"),
+ Number("18"),
+ Number("19"),
+ Number("20"),
+ Number("21"),
+ Number("22"),
+ Number("23"),
+ Number("24"),
+ Number("25"),
+ Number("26"),
+ Number("27"),
+ Number("28"),
+ Number("29"),
+ Number("30"),
+ Number("31"),
+ Number("32"),
+ Number("33"),
+ Number("34"),
+ Number("35"),
+ Number("36"),
+ Number("37"),
+ Number("38"),
+ Number("39"),
+ Number("40"),
+ Number("41"),
+ Number("42"),
+ Number("43"),
+ Number("44"),
+ Number("45"),
+ Number("46"),
+ Number("47"),
+ Number("48"),
+ Number("49"),
+ Number("50"),
+ Number("51"),
+ Number("52"),
+ Number("53"),
+ Number("54"),
+ Number("55"),
+ Number("56"),
+ Number("57"),
+ Number("58"),
+ Number("59"),
+ Number("60"),
+ Number("61"),
+ Number("62"),
+ Number("63"),
+ Number("64"),
+ Number("65"),
+ Number("66"),
+ Number("67"),
+ Number("68"),
+ Number("69"),
+ Number("70"),
+ Number("71"),
+ Number("72"),
+ Number("73"),
+ Number("74"),
+ Number("75"),
+ Number("76"),
+ Number("77"),
+ Number("78"),
+ Number("79"),
+ Number("80"),
+ Number("81"),
+ Number("82"),
+ Number("83"),
+ Number("84"),
+ Number("85"),
+ Number("86"),
+ Number("87"),
+ Number("88"),
+ Number("89"),
+ Number("90"),
+ Number("91"),
+ Number("92"),
+ Number("93"),
+ Number("94"),
+ Number("95"),
+ Number("96"),
+ Number("97"),
+ Number("98"),
+ Number("99"),
+ Number("100"),
+ Number("101"),
+ Number("102"),
+ Number("103"),
+ Number("104"),
+ Number("105"),
+ Number("106"),
+ Number("107"),
+ Number("108"),
+ Number("109"),
+ Number("110"),
+ Number("111"),
+ Number("112"),
+ Number("113"),
+ Number("114"),
+ Number("115"),
+ Number("116"),
+ Number("117"),
+ Number("118"),
+ Number("119"),
+ Number("120"),
+ Number("121"),
+ Number("122"),
+ Number("123"),
+ Number("124"),
+ Number("125"),
+ Number("126"),
+ Number("127"),
+ Number("128"),
+ Number("129"),
+ Number("130"),
+ Number("131"),
+ Number("132"),
+ Number("133"),
+ Number("134"),
+ Number("135"),
+ Number("136"),
+ Number("137"),
+ Number("138"),
+ Number("139"),
+ Number("140"),
+ Number("141"),
+ Number("142"),
+ Number("143"),
+ Number("144"),
+ Number("145"),
+ Number("146"),
+ Number("147"),
+ Number("148"),
+ Number("149"),
+ Number("150"),
+ Number("151"),
+ Number("152"),
+ Number("153"),
+ Number("154"),
+ Number("155"),
+ Number("156"),
+ Number("157"),
+ Number("158"),
+ Number("159"),
+ Number("160"),
+ Number("161"),
+ Number("162"),
+ Number("163"),
+ Number("164"),
+ Number("165"),
+ Number("166"),
+ Number("167"),
+ Number("168"),
+ Number("169"),
+ Number("170"),
+ Number("171"),
+ Number("172"),
+ Number("173"),
+ Number("174"),
+ Number("175"),
+ Number("176"),
+ Number("177"),
+ Number("178"),
+ Number("179"),
+ Number("180"),
+ Number("181"),
+ Number("182"),
+ Number("183"),
+ Number("184"),
+ Number("185"),
+ Number("186"),
+ Number("187"),
+ Number("188"),
+ Number("189"),
+ Number("190"),
+ Number("191"),
+ Number("192"),
+ Number("193"),
+ Number("194"),
+ Number("195"),
+ Number("196"),
+ Number("197"),
+ Number("198"),
+ Number("199"),
+ Number("200"),
+ Number("201"),
+ Number("202"),
+ Number("203"),
+ Number("204"),
+ Number("205"),
+ Number("206"),
+ Number("207"),
+ Number("208"),
+ Number("209"),
+ Number("210"),
+ Number("211"),
+ Number("212"),
+ Number("213"),
+ Number("214"),
+ Number("215"),
+ Number("216"),
+ Number("217"),
+ Number("218"),
+ Number("219"),
+ Number("220"),
+ Number("221"),
+ Number("222"),
+ Number("223"),
+ Number("224"),
+ Number("225"),
+ Number("226"),
+ Number("227"),
+ Number("228"),
+ Number("229"),
+ Number("230"),
+ Number("231"),
+ Number("232"),
+ Number("233"),
+ Number("234"),
+ Number("235"),
+ Number("236"),
+ Number("237"),
+ Number("238"),
+ Number("239"),
+ Number("240"),
+ Number("241"),
+ Number("242"),
+ Number("243"),
+ Number("244"),
+ Number("245"),
+ Number("246"),
+ Number("247"),
+ Number("248"),
+ Number("249"),
+ Number("250"),
+ Number("251"),
+ Number("252"),
+ Number("253"),
+ Number("254"),
+ Number("255"),
+ Number("256"),
+ Number("257"),
+ Number("258"),
+ Number("259"),
+ Number("260"),
+ Number("261"),
+ Number("262"),
+ Number("263"),
+ Number("264"),
+ Number("265"),
+ Number("266"),
+ Number("267"),
+ Number("268"),
+ Number("269"),
+ Number("270"),
+ Number("271"),
+ Number("272"),
+ Number("273"),
+ Number("274"),
+ Number("275"),
+ Number("276"),
+ Number("277"),
+ Number("278"),
+ Number("279"),
+ Number("280"),
+ Number("281"),
+ Number("282"),
+ Number("283"),
+ Number("284"),
+ Number("285"),
+ Number("286"),
+ Number("287"),
+ Number("288"),
+ Number("289"),
+ Number("290"),
+ Number("291"),
+ Number("292"),
+ Number("293"),
+ Number("294"),
+ Number("295"),
+ Number("296"),
+ Number("297"),
+ Number("298"),
+ Number("299"),
+ Number("300"),
+ Number("301"),
+ Number("302"),
+ Number("303"),
+ Number("304"),
+ Number("305"),
+ Number("306"),
+ Number("307"),
+ Number("308"),
+ Number("309"),
+ Number("310"),
+ Number("311"),
+ Number("312"),
+ Number("313"),
+ Number("314"),
+ Number("315"),
+ Number("316"),
+ Number("317"),
+ Number("318"),
+ Number("319"),
+ Number("320"),
+ Number("321"),
+ Number("322"),
+ Number("323"),
+ Number("324"),
+ Number("325"),
+ Number("326"),
+ Number("327"),
+ Number("328"),
+ Number("329"),
+ Number("330"),
+ Number("331"),
+ Number("332"),
+ Number("333"),
+ Number("334"),
+ Number("335"),
+ Number("336"),
+ Number("337"),
+ Number("338"),
+ Number("339"),
+ Number("340"),
+ Number("341"),
+ Number("342"),
+ Number("343"),
+ Number("344"),
+ Number("345"),
+ Number("346"),
+ Number("347"),
+ Number("348"),
+ Number("349"),
+ Number("350"),
+ Number("351"),
+ Number("352"),
+ Number("353"),
+ Number("354"),
+ Number("355"),
+ Number("356"),
+ Number("357"),
+ Number("358"),
+ Number("359"),
+ Number("360"),
+ Number("361"),
+ Number("362"),
+ Number("363"),
+ Number("364"),
+ Number("365"),
+ Number("366"),
+ Number("367"),
+ Number("368"),
+ Number("369"),
+ Number("370"),
+ Number("371"),
+ Number("372"),
+ Number("373"),
+ Number("374"),
+ Number("375"),
+ Number("376"),
+ Number("377"),
+ Number("378"),
+ Number("379"),
+ Number("380"),
+ Number("381"),
+ Number("382"),
+ Number("383"),
+ Number("384"),
+ Number("385"),
+ Number("386"),
+ Number("387"),
+ Number("388"),
+ Number("389"),
+ Number("390"),
+ Number("391"),
+ Number("392"),
+ Number("393"),
+ Number("394"),
+ Number("395"),
+ Number("396"),
+ Number("397"),
+ Number("398"),
+ Number("399"),
+ Number("400"),
+ Number("401"),
+ Number("402"),
+ Number("403"),
+ Number("404"),
+ Number("405"),
+ Number("406"),
+ Number("407"),
+ Number("408"),
+ Number("409"),
+ Number("410"),
+ Number("411"),
+ Number("412"),
+ Number("413"),
+ Number("414"),
+ Number("415"),
+ Number("416"),
+ Number("417"),
+ Number("418"),
+ Number("419"),
+ Number("420"),
+ Number("421"),
+ Number("422"),
+ Number("423"),
+ Number("424"),
+ Number("425"),
+ Number("426"),
+ Number("427"),
+ Number("428"),
+ Number("429"),
+ Number("430"),
+ Number("431"),
+ Number("432"),
+ Number("433"),
+ Number("434"),
+ Number("435"),
+ Number("436"),
+ Number("437"),
+ Number("438"),
+ Number("439"),
+ Number("440"),
+ Number("441"),
+ Number("442"),
+ Number("443"),
+ Number("444"),
+ Number("445"),
+ Number("446"),
+ Number("447"),
+ Number("448"),
+ Number("449"),
+ Number("450"),
+ Number("451"),
+ Number("452"),
+ Number("453"),
+ Number("454"),
+ Number("455"),
+ Number("456"),
+ Number("457"),
+ Number("458"),
+ Number("459"),
+ Number("460"),
+ Number("461"),
+ Number("462"),
+ Number("463"),
+ Number("464"),
+ Number("465"),
+ Number("466"),
+ Number("467"),
+ Number("468"),
+ Number("469"),
+ Number("470"),
+ Number("471"),
+ Number("472"),
+ Number("473"),
+ Number("474"),
+ Number("475"),
+ Number("476"),
+ Number("477"),
+ Number("478"),
+ Number("479"),
+ Number("480"),
+ Number("481"),
+ Number("482"),
+ Number("483"),
+ Number("484"),
+ Number("485"),
+ Number("486"),
+ Number("487"),
+ Number("488"),
+ Number("489"),
+ Number("490"),
+ Number("491"),
+ Number("492"),
+ Number("493"),
+ Number("494"),
+ Number("495"),
+ Number("496"),
+ Number("497"),
+ Number("498"),
+ Number("499"),
+ Number("500"),
+ Number("501"),
+ Number("502"),
+ Number("503"),
+ Number("504"),
+ Number("505"),
+ Number("506"),
+ Number("507"),
+ Number("508"),
+ Number("509"),
+ Number("510"),
+ Number("511"),
+ Number("512"),
+}
+
+var intNumberTerms = [...]*Term{
+ {Value: intNumberValues[0]},
+ {Value: intNumberValues[1]},
+ {Value: intNumberValues[2]},
+ {Value: intNumberValues[3]},
+ {Value: intNumberValues[4]},
+ {Value: intNumberValues[5]},
+ {Value: intNumberValues[6]},
+ {Value: intNumberValues[7]},
+ {Value: intNumberValues[8]},
+ {Value: intNumberValues[9]},
+ {Value: intNumberValues[10]},
+ {Value: intNumberValues[11]},
+ {Value: intNumberValues[12]},
+ {Value: intNumberValues[13]},
+ {Value: intNumberValues[14]},
+ {Value: intNumberValues[15]},
+ {Value: intNumberValues[16]},
+ {Value: intNumberValues[17]},
+ {Value: intNumberValues[18]},
+ {Value: intNumberValues[19]},
+ {Value: intNumberValues[20]},
+ {Value: intNumberValues[21]},
+ {Value: intNumberValues[22]},
+ {Value: intNumberValues[23]},
+ {Value: intNumberValues[24]},
+ {Value: intNumberValues[25]},
+ {Value: intNumberValues[26]},
+ {Value: intNumberValues[27]},
+ {Value: intNumberValues[28]},
+ {Value: intNumberValues[29]},
+ {Value: intNumberValues[30]},
+ {Value: intNumberValues[31]},
+ {Value: intNumberValues[32]},
+ {Value: intNumberValues[33]},
+ {Value: intNumberValues[34]},
+ {Value: intNumberValues[35]},
+ {Value: intNumberValues[36]},
+ {Value: intNumberValues[37]},
+ {Value: intNumberValues[38]},
+ {Value: intNumberValues[39]},
+ {Value: intNumberValues[40]},
+ {Value: intNumberValues[41]},
+ {Value: intNumberValues[42]},
+ {Value: intNumberValues[43]},
+ {Value: intNumberValues[44]},
+ {Value: intNumberValues[45]},
+ {Value: intNumberValues[46]},
+ {Value: intNumberValues[47]},
+ {Value: intNumberValues[48]},
+ {Value: intNumberValues[49]},
+ {Value: intNumberValues[50]},
+ {Value: intNumberValues[51]},
+ {Value: intNumberValues[52]},
+ {Value: intNumberValues[53]},
+ {Value: intNumberValues[54]},
+ {Value: intNumberValues[55]},
+ {Value: intNumberValues[56]},
+ {Value: intNumberValues[57]},
+ {Value: intNumberValues[58]},
+ {Value: intNumberValues[59]},
+ {Value: intNumberValues[60]},
+ {Value: intNumberValues[61]},
+ {Value: intNumberValues[62]},
+ {Value: intNumberValues[63]},
+ {Value: intNumberValues[64]},
+ {Value: intNumberValues[65]},
+ {Value: intNumberValues[66]},
+ {Value: intNumberValues[67]},
+ {Value: intNumberValues[68]},
+ {Value: intNumberValues[69]},
+ {Value: intNumberValues[70]},
+ {Value: intNumberValues[71]},
+ {Value: intNumberValues[72]},
+ {Value: intNumberValues[73]},
+ {Value: intNumberValues[74]},
+ {Value: intNumberValues[75]},
+ {Value: intNumberValues[76]},
+ {Value: intNumberValues[77]},
+ {Value: intNumberValues[78]},
+ {Value: intNumberValues[79]},
+ {Value: intNumberValues[80]},
+ {Value: intNumberValues[81]},
+ {Value: intNumberValues[82]},
+ {Value: intNumberValues[83]},
+ {Value: intNumberValues[84]},
+ {Value: intNumberValues[85]},
+ {Value: intNumberValues[86]},
+ {Value: intNumberValues[87]},
+ {Value: intNumberValues[88]},
+ {Value: intNumberValues[89]},
+ {Value: intNumberValues[90]},
+ {Value: intNumberValues[91]},
+ {Value: intNumberValues[92]},
+ {Value: intNumberValues[93]},
+ {Value: intNumberValues[94]},
+ {Value: intNumberValues[95]},
+ {Value: intNumberValues[96]},
+ {Value: intNumberValues[97]},
+ {Value: intNumberValues[98]},
+ {Value: intNumberValues[99]},
+ {Value: intNumberValues[100]},
+ {Value: intNumberValues[101]},
+ {Value: intNumberValues[102]},
+ {Value: intNumberValues[103]},
+ {Value: intNumberValues[104]},
+ {Value: intNumberValues[105]},
+ {Value: intNumberValues[106]},
+ {Value: intNumberValues[107]},
+ {Value: intNumberValues[108]},
+ {Value: intNumberValues[109]},
+ {Value: intNumberValues[110]},
+ {Value: intNumberValues[111]},
+ {Value: intNumberValues[112]},
+ {Value: intNumberValues[113]},
+ {Value: intNumberValues[114]},
+ {Value: intNumberValues[115]},
+ {Value: intNumberValues[116]},
+ {Value: intNumberValues[117]},
+ {Value: intNumberValues[118]},
+ {Value: intNumberValues[119]},
+ {Value: intNumberValues[120]},
+ {Value: intNumberValues[121]},
+ {Value: intNumberValues[122]},
+ {Value: intNumberValues[123]},
+ {Value: intNumberValues[124]},
+ {Value: intNumberValues[125]},
+ {Value: intNumberValues[126]},
+ {Value: intNumberValues[127]},
+ {Value: intNumberValues[128]},
+ {Value: intNumberValues[129]},
+ {Value: intNumberValues[130]},
+ {Value: intNumberValues[131]},
+ {Value: intNumberValues[132]},
+ {Value: intNumberValues[133]},
+ {Value: intNumberValues[134]},
+ {Value: intNumberValues[135]},
+ {Value: intNumberValues[136]},
+ {Value: intNumberValues[137]},
+ {Value: intNumberValues[138]},
+ {Value: intNumberValues[139]},
+ {Value: intNumberValues[140]},
+ {Value: intNumberValues[141]},
+ {Value: intNumberValues[142]},
+ {Value: intNumberValues[143]},
+ {Value: intNumberValues[144]},
+ {Value: intNumberValues[145]},
+ {Value: intNumberValues[146]},
+ {Value: intNumberValues[147]},
+ {Value: intNumberValues[148]},
+ {Value: intNumberValues[149]},
+ {Value: intNumberValues[150]},
+ {Value: intNumberValues[151]},
+ {Value: intNumberValues[152]},
+ {Value: intNumberValues[153]},
+ {Value: intNumberValues[154]},
+ {Value: intNumberValues[155]},
+ {Value: intNumberValues[156]},
+ {Value: intNumberValues[157]},
+ {Value: intNumberValues[158]},
+ {Value: intNumberValues[159]},
+ {Value: intNumberValues[160]},
+ {Value: intNumberValues[161]},
+ {Value: intNumberValues[162]},
+ {Value: intNumberValues[163]},
+ {Value: intNumberValues[164]},
+ {Value: intNumberValues[165]},
+ {Value: intNumberValues[166]},
+ {Value: intNumberValues[167]},
+ {Value: intNumberValues[168]},
+ {Value: intNumberValues[169]},
+ {Value: intNumberValues[170]},
+ {Value: intNumberValues[171]},
+ {Value: intNumberValues[172]},
+ {Value: intNumberValues[173]},
+ {Value: intNumberValues[174]},
+ {Value: intNumberValues[175]},
+ {Value: intNumberValues[176]},
+ {Value: intNumberValues[177]},
+ {Value: intNumberValues[178]},
+ {Value: intNumberValues[179]},
+ {Value: intNumberValues[180]},
+ {Value: intNumberValues[181]},
+ {Value: intNumberValues[182]},
+ {Value: intNumberValues[183]},
+ {Value: intNumberValues[184]},
+ {Value: intNumberValues[185]},
+ {Value: intNumberValues[186]},
+ {Value: intNumberValues[187]},
+ {Value: intNumberValues[188]},
+ {Value: intNumberValues[189]},
+ {Value: intNumberValues[190]},
+ {Value: intNumberValues[191]},
+ {Value: intNumberValues[192]},
+ {Value: intNumberValues[193]},
+ {Value: intNumberValues[194]},
+ {Value: intNumberValues[195]},
+ {Value: intNumberValues[196]},
+ {Value: intNumberValues[197]},
+ {Value: intNumberValues[198]},
+ {Value: intNumberValues[199]},
+ {Value: intNumberValues[200]},
+ {Value: intNumberValues[201]},
+ {Value: intNumberValues[202]},
+ {Value: intNumberValues[203]},
+ {Value: intNumberValues[204]},
+ {Value: intNumberValues[205]},
+ {Value: intNumberValues[206]},
+ {Value: intNumberValues[207]},
+ {Value: intNumberValues[208]},
+ {Value: intNumberValues[209]},
+ {Value: intNumberValues[210]},
+ {Value: intNumberValues[211]},
+ {Value: intNumberValues[212]},
+ {Value: intNumberValues[213]},
+ {Value: intNumberValues[214]},
+ {Value: intNumberValues[215]},
+ {Value: intNumberValues[216]},
+ {Value: intNumberValues[217]},
+ {Value: intNumberValues[218]},
+ {Value: intNumberValues[219]},
+ {Value: intNumberValues[220]},
+ {Value: intNumberValues[221]},
+ {Value: intNumberValues[222]},
+ {Value: intNumberValues[223]},
+ {Value: intNumberValues[224]},
+ {Value: intNumberValues[225]},
+ {Value: intNumberValues[226]},
+ {Value: intNumberValues[227]},
+ {Value: intNumberValues[228]},
+ {Value: intNumberValues[229]},
+ {Value: intNumberValues[230]},
+ {Value: intNumberValues[231]},
+ {Value: intNumberValues[232]},
+ {Value: intNumberValues[233]},
+ {Value: intNumberValues[234]},
+ {Value: intNumberValues[235]},
+ {Value: intNumberValues[236]},
+ {Value: intNumberValues[237]},
+ {Value: intNumberValues[238]},
+ {Value: intNumberValues[239]},
+ {Value: intNumberValues[240]},
+ {Value: intNumberValues[241]},
+ {Value: intNumberValues[242]},
+ {Value: intNumberValues[243]},
+ {Value: intNumberValues[244]},
+ {Value: intNumberValues[245]},
+ {Value: intNumberValues[246]},
+ {Value: intNumberValues[247]},
+ {Value: intNumberValues[248]},
+ {Value: intNumberValues[249]},
+ {Value: intNumberValues[250]},
+ {Value: intNumberValues[251]},
+ {Value: intNumberValues[252]},
+ {Value: intNumberValues[253]},
+ {Value: intNumberValues[254]},
+ {Value: intNumberValues[255]},
+ {Value: intNumberValues[256]},
+ {Value: intNumberValues[257]},
+ {Value: intNumberValues[258]},
+ {Value: intNumberValues[259]},
+ {Value: intNumberValues[260]},
+ {Value: intNumberValues[261]},
+ {Value: intNumberValues[262]},
+ {Value: intNumberValues[263]},
+ {Value: intNumberValues[264]},
+ {Value: intNumberValues[265]},
+ {Value: intNumberValues[266]},
+ {Value: intNumberValues[267]},
+ {Value: intNumberValues[268]},
+ {Value: intNumberValues[269]},
+ {Value: intNumberValues[270]},
+ {Value: intNumberValues[271]},
+ {Value: intNumberValues[272]},
+ {Value: intNumberValues[273]},
+ {Value: intNumberValues[274]},
+ {Value: intNumberValues[275]},
+ {Value: intNumberValues[276]},
+ {Value: intNumberValues[277]},
+ {Value: intNumberValues[278]},
+ {Value: intNumberValues[279]},
+ {Value: intNumberValues[280]},
+ {Value: intNumberValues[281]},
+ {Value: intNumberValues[282]},
+ {Value: intNumberValues[283]},
+ {Value: intNumberValues[284]},
+ {Value: intNumberValues[285]},
+ {Value: intNumberValues[286]},
+ {Value: intNumberValues[287]},
+ {Value: intNumberValues[288]},
+ {Value: intNumberValues[289]},
+ {Value: intNumberValues[290]},
+ {Value: intNumberValues[291]},
+ {Value: intNumberValues[292]},
+ {Value: intNumberValues[293]},
+ {Value: intNumberValues[294]},
+ {Value: intNumberValues[295]},
+ {Value: intNumberValues[296]},
+ {Value: intNumberValues[297]},
+ {Value: intNumberValues[298]},
+ {Value: intNumberValues[299]},
+ {Value: intNumberValues[300]},
+ {Value: intNumberValues[301]},
+ {Value: intNumberValues[302]},
+ {Value: intNumberValues[303]},
+ {Value: intNumberValues[304]},
+ {Value: intNumberValues[305]},
+ {Value: intNumberValues[306]},
+ {Value: intNumberValues[307]},
+ {Value: intNumberValues[308]},
+ {Value: intNumberValues[309]},
+ {Value: intNumberValues[310]},
+ {Value: intNumberValues[311]},
+ {Value: intNumberValues[312]},
+ {Value: intNumberValues[313]},
+ {Value: intNumberValues[314]},
+ {Value: intNumberValues[315]},
+ {Value: intNumberValues[316]},
+ {Value: intNumberValues[317]},
+ {Value: intNumberValues[318]},
+ {Value: intNumberValues[319]},
+ {Value: intNumberValues[320]},
+ {Value: intNumberValues[321]},
+ {Value: intNumberValues[322]},
+ {Value: intNumberValues[323]},
+ {Value: intNumberValues[324]},
+ {Value: intNumberValues[325]},
+ {Value: intNumberValues[326]},
+ {Value: intNumberValues[327]},
+ {Value: intNumberValues[328]},
+ {Value: intNumberValues[329]},
+ {Value: intNumberValues[330]},
+ {Value: intNumberValues[331]},
+ {Value: intNumberValues[332]},
+ {Value: intNumberValues[333]},
+ {Value: intNumberValues[334]},
+ {Value: intNumberValues[335]},
+ {Value: intNumberValues[336]},
+ {Value: intNumberValues[337]},
+ {Value: intNumberValues[338]},
+ {Value: intNumberValues[339]},
+ {Value: intNumberValues[340]},
+ {Value: intNumberValues[341]},
+ {Value: intNumberValues[342]},
+ {Value: intNumberValues[343]},
+ {Value: intNumberValues[344]},
+ {Value: intNumberValues[345]},
+ {Value: intNumberValues[346]},
+ {Value: intNumberValues[347]},
+ {Value: intNumberValues[348]},
+ {Value: intNumberValues[349]},
+ {Value: intNumberValues[350]},
+ {Value: intNumberValues[351]},
+ {Value: intNumberValues[352]},
+ {Value: intNumberValues[353]},
+ {Value: intNumberValues[354]},
+ {Value: intNumberValues[355]},
+ {Value: intNumberValues[356]},
+ {Value: intNumberValues[357]},
+ {Value: intNumberValues[358]},
+ {Value: intNumberValues[359]},
+ {Value: intNumberValues[360]},
+ {Value: intNumberValues[361]},
+ {Value: intNumberValues[362]},
+ {Value: intNumberValues[363]},
+ {Value: intNumberValues[364]},
+ {Value: intNumberValues[365]},
+ {Value: intNumberValues[366]},
+ {Value: intNumberValues[367]},
+ {Value: intNumberValues[368]},
+ {Value: intNumberValues[369]},
+ {Value: intNumberValues[370]},
+ {Value: intNumberValues[371]},
+ {Value: intNumberValues[372]},
+ {Value: intNumberValues[373]},
+ {Value: intNumberValues[374]},
+ {Value: intNumberValues[375]},
+ {Value: intNumberValues[376]},
+ {Value: intNumberValues[377]},
+ {Value: intNumberValues[378]},
+ {Value: intNumberValues[379]},
+ {Value: intNumberValues[380]},
+ {Value: intNumberValues[381]},
+ {Value: intNumberValues[382]},
+ {Value: intNumberValues[383]},
+ {Value: intNumberValues[384]},
+ {Value: intNumberValues[385]},
+ {Value: intNumberValues[386]},
+ {Value: intNumberValues[387]},
+ {Value: intNumberValues[388]},
+ {Value: intNumberValues[389]},
+ {Value: intNumberValues[390]},
+ {Value: intNumberValues[391]},
+ {Value: intNumberValues[392]},
+ {Value: intNumberValues[393]},
+ {Value: intNumberValues[394]},
+ {Value: intNumberValues[395]},
+ {Value: intNumberValues[396]},
+ {Value: intNumberValues[397]},
+ {Value: intNumberValues[398]},
+ {Value: intNumberValues[399]},
+ {Value: intNumberValues[400]},
+ {Value: intNumberValues[401]},
+ {Value: intNumberValues[402]},
+ {Value: intNumberValues[403]},
+ {Value: intNumberValues[404]},
+ {Value: intNumberValues[405]},
+ {Value: intNumberValues[406]},
+ {Value: intNumberValues[407]},
+ {Value: intNumberValues[408]},
+ {Value: intNumberValues[409]},
+ {Value: intNumberValues[410]},
+ {Value: intNumberValues[411]},
+ {Value: intNumberValues[412]},
+ {Value: intNumberValues[413]},
+ {Value: intNumberValues[414]},
+ {Value: intNumberValues[415]},
+ {Value: intNumberValues[416]},
+ {Value: intNumberValues[417]},
+ {Value: intNumberValues[418]},
+ {Value: intNumberValues[419]},
+ {Value: intNumberValues[420]},
+ {Value: intNumberValues[421]},
+ {Value: intNumberValues[422]},
+ {Value: intNumberValues[423]},
+ {Value: intNumberValues[424]},
+ {Value: intNumberValues[425]},
+ {Value: intNumberValues[426]},
+ {Value: intNumberValues[427]},
+ {Value: intNumberValues[428]},
+ {Value: intNumberValues[429]},
+ {Value: intNumberValues[430]},
+ {Value: intNumberValues[431]},
+ {Value: intNumberValues[432]},
+ {Value: intNumberValues[433]},
+ {Value: intNumberValues[434]},
+ {Value: intNumberValues[435]},
+ {Value: intNumberValues[436]},
+ {Value: intNumberValues[437]},
+ {Value: intNumberValues[438]},
+ {Value: intNumberValues[439]},
+ {Value: intNumberValues[440]},
+ {Value: intNumberValues[441]},
+ {Value: intNumberValues[442]},
+ {Value: intNumberValues[443]},
+ {Value: intNumberValues[444]},
+ {Value: intNumberValues[445]},
+ {Value: intNumberValues[446]},
+ {Value: intNumberValues[447]},
+ {Value: intNumberValues[448]},
+ {Value: intNumberValues[449]},
+ {Value: intNumberValues[450]},
+ {Value: intNumberValues[451]},
+ {Value: intNumberValues[452]},
+ {Value: intNumberValues[453]},
+ {Value: intNumberValues[454]},
+ {Value: intNumberValues[455]},
+ {Value: intNumberValues[456]},
+ {Value: intNumberValues[457]},
+ {Value: intNumberValues[458]},
+ {Value: intNumberValues[459]},
+ {Value: intNumberValues[460]},
+ {Value: intNumberValues[461]},
+ {Value: intNumberValues[462]},
+ {Value: intNumberValues[463]},
+ {Value: intNumberValues[464]},
+ {Value: intNumberValues[465]},
+ {Value: intNumberValues[466]},
+ {Value: intNumberValues[467]},
+ {Value: intNumberValues[468]},
+ {Value: intNumberValues[469]},
+ {Value: intNumberValues[470]},
+ {Value: intNumberValues[471]},
+ {Value: intNumberValues[472]},
+ {Value: intNumberValues[473]},
+ {Value: intNumberValues[474]},
+ {Value: intNumberValues[475]},
+ {Value: intNumberValues[476]},
+ {Value: intNumberValues[477]},
+ {Value: intNumberValues[478]},
+ {Value: intNumberValues[479]},
+ {Value: intNumberValues[480]},
+ {Value: intNumberValues[481]},
+ {Value: intNumberValues[482]},
+ {Value: intNumberValues[483]},
+ {Value: intNumberValues[484]},
+ {Value: intNumberValues[485]},
+ {Value: intNumberValues[486]},
+ {Value: intNumberValues[487]},
+ {Value: intNumberValues[488]},
+ {Value: intNumberValues[489]},
+ {Value: intNumberValues[490]},
+ {Value: intNumberValues[491]},
+ {Value: intNumberValues[492]},
+ {Value: intNumberValues[493]},
+ {Value: intNumberValues[494]},
+ {Value: intNumberValues[495]},
+ {Value: intNumberValues[496]},
+ {Value: intNumberValues[497]},
+ {Value: intNumberValues[498]},
+ {Value: intNumberValues[499]},
+ {Value: intNumberValues[500]},
+ {Value: intNumberValues[501]},
+ {Value: intNumberValues[502]},
+ {Value: intNumberValues[503]},
+ {Value: intNumberValues[504]},
+ {Value: intNumberValues[505]},
+ {Value: intNumberValues[506]},
+ {Value: intNumberValues[507]},
+ {Value: intNumberValues[508]},
+ {Value: intNumberValues[509]},
+ {Value: intNumberValues[510]},
+ {Value: intNumberValues[511]},
+ {Value: intNumberValues[512]},
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/json/json.go b/vendor/github.com/open-policy-agent/opa/v1/ast/json/json.go
new file mode 100644
index 0000000000..9081fe7039
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/json/json.go
@@ -0,0 +1,106 @@
+// Copyright 2023 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// This package provides options for JSON marshalling of AST nodes, and location
+// data in particular. Since location data occupies a significant portion of the
+// AST when included, it is excluded by default. The options provided here allow
+// changing that behavior — either for all nodes or for specific types. Since
+// JSONMarshaller implementations have access only to the node being marshaled,
+// our options are to either attach these settings to *all* nodes in the AST, or
+// to provide them via global state. The former is perhaps a little more elegant,
+// and is what we went with initially. The cost of attaching these settings to
+// every node however turned out to be non-negligible, and given that the number
+// of users who have an interest in AST serialization are likely to be few, we
+// have since switched to using global state, as provided here. Note that this
+// is mostly to provide an equivalent feature to what we had before, should
+// anyone depend on that. Users who need fine-grained control over AST
+// serialization are recommended to use external libraries for that purpose,
+// such as `github.com/json-iterator/go`.
+package json
+
+import "sync"
+
+// Options defines the options for JSON operations,
+// currently only marshaling can be configured
+type Options struct {
+ MarshalOptions MarshalOptions
+}
+
+// MarshalOptions defines the options for JSON marshaling,
+// currently only toggling the marshaling of location information is supported
+type MarshalOptions struct {
+ // IncludeLocation toggles the marshaling of location information
+ IncludeLocation NodeToggle
+ // IncludeLocationText additionally/optionally includes the text of the location
+ IncludeLocationText bool
+ // ExcludeLocationFile additionally/optionally excludes the file of the location
+ // Note that this is inverted (i.e. not "include" as the default needs to remain false)
+ ExcludeLocationFile bool
+}
+
+// NodeToggle is a generic struct to allow the toggling of
+// settings for different ast node types
+type NodeToggle struct {
+ Term bool
+ Package bool
+ Comment bool
+ Import bool
+ Rule bool
+ Head bool
+ Expr bool
+ SomeDecl bool
+ Every bool
+ With bool
+ Annotations bool
+ AnnotationsRef bool
+}
+
+// configuredJSONOptions synchronizes access to the global JSON options
+type configuredJSONOptions struct {
+ options Options
+ lock sync.RWMutex
+}
+
+var options = &configuredJSONOptions{
+ options: Defaults(),
+}
+
+// SetOptions sets the global options for marshalling AST nodes to JSON
+func SetOptions(opts Options) {
+ options.lock.Lock()
+ defer options.lock.Unlock()
+ options.options = opts
+}
+
+// GetOptions returns (a copy of) the global options for marshalling AST nodes to JSON
+func GetOptions() Options {
+ options.lock.RLock()
+ defer options.lock.RUnlock()
+ return options.options
+}
+
+// Defaults returns the default JSON options, which is to exclude location
+// information in serialized JSON AST.
+func Defaults() Options {
+ return Options{
+ MarshalOptions: MarshalOptions{
+ IncludeLocation: NodeToggle{
+ Term: false,
+ Package: false,
+ Comment: false,
+ Import: false,
+ Rule: false,
+ Head: false,
+ Expr: false,
+ SomeDecl: false,
+ Every: false,
+ With: false,
+ Annotations: false,
+ AnnotationsRef: false,
+ },
+ IncludeLocationText: false,
+ ExcludeLocationFile: false,
+ },
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/location/location.go b/vendor/github.com/open-policy-agent/opa/v1/ast/location/location.go
similarity index 85%
rename from vendor/github.com/open-policy-agent/opa/ast/location/location.go
rename to vendor/github.com/open-policy-agent/opa/v1/ast/location/location.go
index 92226df3f0..6d1b16cdfc 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/location/location.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/location/location.go
@@ -7,7 +7,7 @@ import (
"errors"
"fmt"
- astJSON "github.com/open-policy-agent/opa/ast/json"
+ astJSON "github.com/open-policy-agent/opa/v1/ast/json"
)
// Location records a position in source code
@@ -18,9 +18,6 @@ type Location struct {
Col int `json:"col"` // The column in the row.
Offset int `json:"-"` // The byte offset for the location in the source.
- // JSONOptions specifies options for marshaling and unmarshalling of locations
- JSONOptions astJSON.Options
-
Tabs []int `json:"-"` // The column offsets of tabs in the source.
}
@@ -39,18 +36,18 @@ func (loc *Location) Equal(other *Location) bool {
// Errorf returns a new error value with a message formatted to include the location
// info (e.g., line, column, filename, etc.)
-func (loc *Location) Errorf(f string, a ...interface{}) error {
+func (loc *Location) Errorf(f string, a ...any) error {
return errors.New(loc.Format(f, a...))
}
// Wrapf returns a new error value that wraps an existing error with a message formatted
// to include the location info (e.g., line, column, filename, etc.)
-func (loc *Location) Wrapf(err error, f string, a ...interface{}) error {
+func (loc *Location) Wrapf(err error, f string, a ...any) error {
return fmt.Errorf(loc.Format(f, a...)+": %w", err)
}
// Format returns a formatted string prefixed with the location information.
-func (loc *Location) Format(f string, a ...interface{}) string {
+func (loc *Location) Format(f string, a ...any) string {
if len(loc.File) > 0 {
f = fmt.Sprintf("%v:%v: %v", loc.File, loc.Row, f)
} else {
@@ -98,7 +95,8 @@ func (loc *Location) Compare(other *Location) int {
func (loc *Location) MarshalJSON() ([]byte, error) {
// structs are used here to preserve the field ordering of the original Location struct
- if loc.JSONOptions.MarshalOptions.ExcludeLocationFile {
+ jsonOptions := astJSON.GetOptions().MarshalOptions
+ if jsonOptions.ExcludeLocationFile {
data := struct {
Row int `json:"row"`
Col int `json:"col"`
@@ -108,7 +106,7 @@ func (loc *Location) MarshalJSON() ([]byte, error) {
Col: loc.Col,
}
- if loc.JSONOptions.MarshalOptions.IncludeLocationText {
+ if jsonOptions.IncludeLocationText {
data.Text = loc.Text
}
@@ -126,7 +124,7 @@ func (loc *Location) MarshalJSON() ([]byte, error) {
File: loc.File,
}
- if loc.JSONOptions.MarshalOptions.IncludeLocationText {
+ if jsonOptions.IncludeLocationText {
data.Text = loc.Text
}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/map.go b/vendor/github.com/open-policy-agent/opa/v1/ast/map.go
new file mode 100644
index 0000000000..31cad4d611
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/map.go
@@ -0,0 +1,108 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "encoding/json"
+
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+// ValueMap represents a key/value map between AST term values. Any type of term
+// can be used as a key in the map.
+type ValueMap struct {
+ hashMap *util.TypedHashMap[Value, Value]
+}
+
+// NewValueMap returns a new ValueMap.
+func NewValueMap() *ValueMap {
+ return &ValueMap{
+ hashMap: util.NewTypedHashMap(ValueEqual, ValueEqual, Value.Hash, Value.Hash, nil),
+ }
+}
+
+// MarshalJSON provides a custom marshaller for the ValueMap which
+// will include the key, value, and value type.
+func (vs *ValueMap) MarshalJSON() ([]byte, error) {
+ var tmp []map[string]any
+ vs.Iter(func(k Value, v Value) bool {
+ tmp = append(tmp, map[string]any{
+ "name": k.String(),
+ "type": ValueName(v),
+ "value": v,
+ })
+ return false
+ })
+ return json.Marshal(tmp)
+}
+
+// Equal returns true if this ValueMap equals the other.
+func (vs *ValueMap) Equal(other *ValueMap) bool {
+ if vs == nil {
+ return other == nil || other.Len() == 0
+ }
+ if other == nil {
+ return vs.Len() == 0
+ }
+ return vs.hashMap.Equal(other.hashMap)
+}
+
+// Len returns the number of elements in the map.
+func (vs *ValueMap) Len() int {
+ if vs == nil {
+ return 0
+ }
+ return vs.hashMap.Len()
+}
+
+// Get returns the value in the map for k.
+func (vs *ValueMap) Get(k Value) Value {
+ if vs != nil {
+ if v, ok := vs.hashMap.Get(k); ok {
+ return v
+ }
+ }
+ return nil
+}
+
+// Hash returns a hash code for this ValueMap.
+func (vs *ValueMap) Hash() int {
+ if vs == nil {
+ return 0
+ }
+ return vs.hashMap.Hash()
+}
+
+// Iter calls the iter function for each key/value pair in the map. If the iter
+// function returns true, iteration stops.
+func (vs *ValueMap) Iter(iter func(Value, Value) bool) bool {
+ if vs == nil {
+ return false
+ }
+ return vs.hashMap.Iter(iter)
+}
+
+// Put inserts a key k into the map with value v.
+func (vs *ValueMap) Put(k, v Value) {
+ if vs == nil {
+ panic("put on nil value map")
+ }
+ vs.hashMap.Put(k, v)
+}
+
+// Delete removes a key k from the map.
+func (vs *ValueMap) Delete(k Value) {
+ if vs == nil {
+ return
+ }
+ vs.hashMap.Delete(k)
+}
+
+func (vs *ValueMap) String() string {
+ if vs == nil {
+ return "{}"
+ }
+ return vs.hashMap.String()
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go b/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go
new file mode 100644
index 0000000000..8355186cb9
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go
@@ -0,0 +1,3088 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "maps"
+ "math/big"
+ "net/url"
+ "regexp"
+ "slices"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ "gopkg.in/yaml.v3"
+
+ "github.com/open-policy-agent/opa/v1/ast/internal/scanner"
+ "github.com/open-policy-agent/opa/v1/ast/internal/tokens"
+ astJSON "github.com/open-policy-agent/opa/v1/ast/json"
+ "github.com/open-policy-agent/opa/v1/ast/location"
+)
+
+// DefaultMaxParsingRecursionDepth is the default maximum recursion
+// depth for the parser
+const DefaultMaxParsingRecursionDepth = 100000
+
+// ErrMaxParsingRecursionDepthExceeded is returned when the parser
+// recursion exceeds the maximum allowed depth
+var ErrMaxParsingRecursionDepthExceeded = errors.New("max parsing recursion depth exceeded")
+
+var RegoV1CompatibleRef = Ref{VarTerm("rego"), InternedTerm("v1")}
+
+// RegoVersion defines the Rego syntax requirements for a module.
+type RegoVersion int
+
+const DefaultRegoVersion = RegoV1
+
+const (
+ RegoUndefined RegoVersion = iota
+ // RegoV0 is the default, original Rego syntax.
+ RegoV0
+ // RegoV0CompatV1 requires modules to comply with both the RegoV0 and RegoV1 syntax (as when 'rego.v1' is imported in a module).
+ // Shortly, RegoV1 compatibility is required, but 'rego.v1' or 'future.keywords' must also be imported.
+ RegoV0CompatV1
+ // RegoV1 is the Rego syntax enforced by OPA 1.0; e.g.:
+ // future.keywords part of default keyword set, and don't require imports;
+ // 'if' and 'contains' required in rule heads;
+ // (some) strict checks on by default.
+ RegoV1
+)
+
+func (v RegoVersion) Int() int {
+ if v == RegoV1 {
+ return 1
+ }
+ return 0
+}
+
+func (v RegoVersion) String() string {
+ switch v {
+ case RegoV0:
+ return "v0"
+ case RegoV1:
+ return "v1"
+ case RegoV0CompatV1:
+ return "v0v1"
+ default:
+ return "unknown"
+ }
+}
+
+func RegoVersionFromInt(i int) RegoVersion {
+ if i == 1 {
+ return RegoV1
+ }
+ return RegoV0
+}
+
+// Note: This state is kept isolated from the parser so that we
+// can do efficient shallow copies of these values when doing a
+// save() and restore().
+type state struct {
+ s *scanner.Scanner
+ lastEnd int
+ skippedNL bool
+ tok tokens.Token
+ tokEnd int
+ lit string
+ loc Location
+ errors Errors
+ hints []string
+ comments []*Comment
+ wildcard int
+}
+
+func (s *state) String() string {
+ return fmt.Sprintf("", s.s, s.tok, s.lit, s.loc, len(s.errors), len(s.comments))
+}
+
+func (s *state) Loc() *location.Location {
+ cpy := s.loc
+ return &cpy
+}
+
+func (s *state) Text(offset, end int) []byte {
+ bs := s.s.Bytes()
+ if offset >= 0 && offset < len(bs) {
+ if end >= offset && end <= len(bs) {
+ return bs[offset:end]
+ }
+ }
+ return nil
+}
+
+// Parser is used to parse Rego statements.
+type Parser struct {
+ r io.Reader
+ s *state
+ po ParserOptions
+ cache parsedTermCache
+ recursionDepth int
+ maxRecursionDepth int
+}
+
+type parsedTermCacheItem struct {
+ t *Term
+ post *state // post is the post-state that's restored on a cache-hit
+ offset int
+ next *parsedTermCacheItem
+}
+
+type parsedTermCache struct {
+ m *parsedTermCacheItem
+}
+
+func (c parsedTermCache) String() string {
+ s := strings.Builder{}
+ s.WriteRune('{')
+ var e *parsedTermCacheItem
+ for e = c.m; e != nil; e = e.next {
+ s.WriteString(e.String())
+ }
+ s.WriteRune('}')
+ return s.String()
+}
+
+func (e *parsedTermCacheItem) String() string {
+ return fmt.Sprintf("<%d:%v>", e.offset, e.t)
+}
+
+// ParserOptions defines the options for parsing Rego statements.
+type ParserOptions struct {
+ Capabilities *Capabilities
+ ProcessAnnotation bool
+ AllFutureKeywords bool
+ FutureKeywords []string
+ SkipRules bool
+ // RegoVersion is the version of Rego to parse for.
+ RegoVersion RegoVersion
+ unreleasedKeywords bool // TODO(sr): cleanup
+}
+
+// EffectiveRegoVersion returns the effective RegoVersion to use for parsing.
+func (po *ParserOptions) EffectiveRegoVersion() RegoVersion {
+ if po.RegoVersion == RegoUndefined {
+ return DefaultRegoVersion
+ }
+ return po.RegoVersion
+}
+
+// NewParser creates and initializes a Parser.
+func NewParser() *Parser {
+ p := &Parser{
+ s: &state{},
+ po: ParserOptions{},
+ maxRecursionDepth: DefaultMaxParsingRecursionDepth,
+ }
+ return p
+}
+
+// WithMaxRecursionDepth sets the maximum recursion depth for the parser.
+func (p *Parser) WithMaxRecursionDepth(depth int) *Parser {
+ p.maxRecursionDepth = depth
+ return p
+}
+
+// WithFilename provides the filename for Location details
+// on parsed statements.
+func (p *Parser) WithFilename(filename string) *Parser {
+ p.s.loc.File = filename
+ return p
+}
+
+// WithReader provides the io.Reader that the parser will
+// use as its source.
+func (p *Parser) WithReader(r io.Reader) *Parser {
+ p.r = r
+ return p
+}
+
+// WithProcessAnnotation enables or disables the processing of
+// annotations by the Parser
+func (p *Parser) WithProcessAnnotation(processAnnotation bool) *Parser {
+ p.po.ProcessAnnotation = processAnnotation
+ return p
+}
+
+// WithFutureKeywords enables "future" keywords, i.e., keywords that can
+// be imported via
+//
+// import future.keywords.kw
+// import future.keywords.other
+//
+// but in a more direct way. The equivalent of this import would be
+//
+// WithFutureKeywords("kw", "other")
+func (p *Parser) WithFutureKeywords(kws ...string) *Parser {
+ p.po.FutureKeywords = kws
+ return p
+}
+
+// WithAllFutureKeywords enables all "future" keywords, i.e., the
+// ParserOption equivalent of
+//
+// import future.keywords
+func (p *Parser) WithAllFutureKeywords(yes bool) *Parser {
+ p.po.AllFutureKeywords = yes
+ return p
+}
+
+// withUnreleasedKeywords allows using keywords that haven't surfaced
+// as future keywords (see above) yet, but have tests that require
+// them to be parsed
+func (p *Parser) withUnreleasedKeywords(yes bool) *Parser {
+ p.po.unreleasedKeywords = yes
+ return p
+}
+
+// WithCapabilities sets the capabilities structure on the parser.
+func (p *Parser) WithCapabilities(c *Capabilities) *Parser {
+ p.po.Capabilities = c
+ return p
+}
+
+// WithSkipRules instructs the parser not to attempt to parse Rule statements.
+func (p *Parser) WithSkipRules(skip bool) *Parser {
+ p.po.SkipRules = skip
+ return p
+}
+
+// WithJSONOptions sets the JSON options on the parser (now a no-op).
+//
+// Deprecated: Use SetOptions in the json package instead, where a longer description
+// of why this is deprecated also can be found.
+func (p *Parser) WithJSONOptions(_ *astJSON.Options) *Parser {
+ return p
+}
+
+func (p *Parser) WithRegoVersion(version RegoVersion) *Parser {
+ p.po.RegoVersion = version
+ return p
+}
+
+func (p *Parser) parsedTermCacheLookup() (*Term, *state) {
+ l := p.s.loc.Offset
+ // stop comparing once the cached offsets are lower than l
+ for h := p.cache.m; h != nil && h.offset >= l; h = h.next {
+ if h.offset == l {
+ return h.t, h.post
+ }
+ }
+ return nil, nil
+}
+
+func (p *Parser) parsedTermCachePush(t *Term, s0 *state) {
+ s1 := p.save()
+ o0 := s0.loc.Offset
+ entry := parsedTermCacheItem{t: t, post: s1, offset: o0}
+
+ // find the first one whose offset is smaller than ours
+ var e *parsedTermCacheItem
+ for e = p.cache.m; e != nil; e = e.next {
+ if e.offset < o0 {
+ break
+ }
+ }
+ entry.next = e
+ p.cache.m = &entry
+}
+
+// futureParser returns a shallow copy of `p` with an empty
+// cache, and a scanner that knows all future keywords.
+// It's used to present hints in errors, when statements would
+// only parse successfully if some future keyword is enabled.
+func (p *Parser) futureParser() *Parser {
+ q := *p
+ q.s = p.save()
+ q.s.s = p.s.s.WithKeywords(allFutureKeywords)
+ q.cache = parsedTermCache{}
+ return &q
+}
+
+// presentParser returns a shallow copy of `p` with an empty
+// cache, and a scanner that knows none of the future keywords.
+// It is used to successfully parse keyword imports, like
+//
+// import future.keywords.in
+//
+// even when the parser has already been informed about the
+// future keyword "in". This parser won't error out because
+// "in" is an identifier.
+func (p *Parser) presentParser() (*Parser, map[string]tokens.Token) {
+ var cpy map[string]tokens.Token
+ q := *p
+ q.s = p.save()
+ q.s.s, cpy = p.s.s.WithoutKeywords(allFutureKeywords)
+ q.cache = parsedTermCache{}
+ return &q, cpy
+}
+
+// Parse will read the Rego source and parse statements and
+// comments as they are found. Any errors encountered while
+// parsing will be accumulated and returned as a list of Errors.
+func (p *Parser) Parse() ([]Statement, []*Comment, Errors) {
+
+ if p.po.Capabilities == nil {
+ p.po.Capabilities = CapabilitiesForThisVersion(CapabilitiesRegoVersion(p.po.RegoVersion))
+ }
+
+ allowedFutureKeywords := map[string]tokens.Token{}
+
+ if p.po.EffectiveRegoVersion() == RegoV1 {
+ if !p.po.Capabilities.ContainsFeature(FeatureRegoV1) {
+ return nil, nil, Errors{
+ &Error{
+ Code: ParseErr,
+ Message: "illegal capabilities: rego_v1 feature required for parsing v1 Rego",
+ Location: nil,
+ },
+ }
+ }
+
+ // rego-v1 includes all v0 future keywords in the default language definition
+ maps.Copy(allowedFutureKeywords, futureKeywordsV0)
+
+ for _, kw := range p.po.Capabilities.FutureKeywords {
+ if tok, ok := futureKeywords[kw]; ok {
+ allowedFutureKeywords[kw] = tok
+ } else {
+ // For sake of error reporting, we still need to check that keywords in capabilities are known in v0
+ if _, ok := futureKeywordsV0[kw]; !ok {
+ return nil, nil, Errors{
+ &Error{
+ Code: ParseErr,
+ Message: fmt.Sprintf("illegal capabilities: unknown keyword: %v", kw),
+ Location: nil,
+ },
+ }
+ }
+ }
+ }
+
+ // Check that explicitly requested future keywords are known.
+ for _, kw := range p.po.FutureKeywords {
+ if _, ok := allowedFutureKeywords[kw]; !ok {
+ return nil, nil, Errors{
+ &Error{
+ Code: ParseErr,
+ Message: fmt.Sprintf("unknown future keyword: %v", kw),
+ Location: nil,
+ },
+ }
+ }
+ }
+ } else {
+ for _, kw := range p.po.Capabilities.FutureKeywords {
+ var ok bool
+ allowedFutureKeywords[kw], ok = allFutureKeywords[kw]
+ if !ok {
+ return nil, nil, Errors{
+ &Error{
+ Code: ParseErr,
+ Message: fmt.Sprintf("illegal capabilities: unknown keyword: %v", kw),
+ Location: nil,
+ },
+ }
+ }
+ }
+
+ if p.po.Capabilities.ContainsFeature(FeatureRegoV1) {
+ // rego-v1 includes all v0 future keywords in the default language definition
+ maps.Copy(allowedFutureKeywords, futureKeywordsV0)
+ }
+ }
+
+ var err error
+ p.s.s, err = scanner.New(p.r)
+ if err != nil {
+ return nil, nil, Errors{
+ &Error{
+ Code: ParseErr,
+ Message: err.Error(),
+ Location: nil,
+ },
+ }
+ }
+
+ selected := map[string]tokens.Token{}
+ if p.po.AllFutureKeywords || p.po.EffectiveRegoVersion() == RegoV1 {
+ maps.Copy(selected, allowedFutureKeywords)
+ } else {
+ for _, kw := range p.po.FutureKeywords {
+ tok, ok := allowedFutureKeywords[kw]
+ if !ok {
+ return nil, nil, Errors{
+ &Error{
+ Code: ParseErr,
+ Message: fmt.Sprintf("unknown future keyword: %v", kw),
+ Location: nil,
+ },
+ }
+ }
+ selected[kw] = tok
+ }
+ }
+ p.s.s = p.s.s.WithKeywords(selected)
+
+ if p.po.EffectiveRegoVersion() == RegoV1 {
+ for kw, tok := range allowedFutureKeywords {
+ p.s.s.AddKeyword(kw, tok)
+ }
+ }
+
+ // read the first token to initialize the parser
+ p.scan()
+
+ var stmts []Statement
+
+ // Read from the scanner until the last token is reached or no statements
+ // can be parsed. Attempt to parse package statements, import statements,
+ // rule statements, and then body/query statements (in that order). If a
+ // statement cannot be parsed, restore the parser state before trying the
+ // next type of statement. If a statement can be parsed, continue from that
+ // point trying to parse packages, imports, etc. in the same order.
+ for p.s.tok != tokens.EOF {
+
+ s := p.save()
+
+ if pkg := p.parsePackage(); pkg != nil {
+ stmts = append(stmts, pkg)
+ continue
+ } else if len(p.s.errors) > 0 {
+ break
+ }
+
+ p.restore(s)
+ s = p.save()
+
+ if imp := p.parseImport(); imp != nil {
+ if RegoRootDocument.Equal(imp.Path.Value.(Ref)[0]) {
+ p.regoV1Import(imp)
+ }
+
+ if FutureRootDocument.Equal(imp.Path.Value.(Ref)[0]) {
+ p.futureImport(imp, allowedFutureKeywords)
+ }
+
+ stmts = append(stmts, imp)
+ continue
+ } else if len(p.s.errors) > 0 {
+ break
+ }
+
+ p.restore(s)
+
+ if !p.po.SkipRules {
+ s = p.save()
+
+ if rules := p.parseRules(); rules != nil {
+ for i := range rules {
+ stmts = append(stmts, rules[i])
+ }
+ continue
+ } else if len(p.s.errors) > 0 {
+ break
+ }
+
+ p.restore(s)
+ }
+
+ if body := p.parseQuery(true, tokens.EOF); body != nil {
+ stmts = append(stmts, body)
+ continue
+ }
+
+ break
+ }
+
+ if p.po.ProcessAnnotation {
+ stmts = p.parseAnnotations(stmts)
+ }
+
+ return stmts, p.s.comments, p.s.errors
+}
+
+func (p *Parser) parseAnnotations(stmts []Statement) []Statement {
+
+ annotStmts, errs := parseAnnotations(p.s.comments)
+ for _, err := range errs {
+ p.error(err.Location, err.Message)
+ }
+
+ for _, annotStmt := range annotStmts {
+ stmts = append(stmts, annotStmt)
+ }
+
+ return stmts
+}
+
+func parseAnnotations(comments []*Comment) ([]*Annotations, Errors) {
+
+ var hint = []byte("METADATA")
+ var curr *metadataParser
+ var blocks []*metadataParser
+
+ for i := range comments {
+ if curr != nil {
+ if comments[i].Location.Row == comments[i-1].Location.Row+1 && comments[i].Location.Col == 1 {
+ curr.Append(comments[i])
+ continue
+ }
+ curr = nil
+ }
+ if bytes.HasPrefix(bytes.TrimSpace(comments[i].Text), hint) {
+ curr = newMetadataParser(comments[i].Location)
+ blocks = append(blocks, curr)
+ }
+ }
+
+ var stmts []*Annotations
+ var errs Errors
+ for _, b := range blocks {
+ a, err := b.Parse()
+ if err != nil {
+ errs = append(errs, &Error{
+ Code: ParseErr,
+ Message: err.Error(),
+ Location: b.loc,
+ })
+ } else {
+ stmts = append(stmts, a)
+ }
+ }
+
+ return stmts, errs
+}
+
+func (p *Parser) parsePackage() *Package {
+
+ var pkg Package
+ pkg.SetLoc(p.s.Loc())
+
+ if p.s.tok != tokens.Package {
+ return nil
+ }
+
+ p.scanWS()
+
+ // Make sure we allow the first term of refs to be the 'package' keyword.
+ if p.s.tok == tokens.Dot || p.s.tok == tokens.LBrack {
+ // This is a ref, not a package declaration.
+ return nil
+ }
+
+ if p.s.tok == tokens.Whitespace {
+ p.scan()
+ }
+
+ if !isIdentOrAllowedRefKeyword(p) {
+ p.illegalToken()
+ return nil
+ }
+
+ term := p.parseTerm()
+
+ if term != nil {
+ switch v := term.Value.(type) {
+ case Var:
+ pkg.Path = Ref{
+ DefaultRootDocument.Copy().SetLocation(term.Location),
+ StringTerm(string(v)).SetLocation(term.Location),
+ }
+ case Ref:
+ pkg.Path = make(Ref, len(v)+1)
+ pkg.Path[0] = DefaultRootDocument.Copy().SetLocation(v[0].Location)
+ first, ok := v[0].Value.(Var)
+ if !ok {
+ p.errorf(v[0].Location, "unexpected %v token: expecting var", ValueName(v[0].Value))
+ return nil
+ }
+ pkg.Path[1] = StringTerm(string(first)).SetLocation(v[0].Location)
+ for i := 2; i < len(pkg.Path); i++ {
+ switch v[i-1].Value.(type) {
+ case String:
+ pkg.Path[i] = v[i-1]
+ default:
+ p.errorf(v[i-1].Location, "unexpected %v token: expecting string", ValueName(v[i-1].Value))
+ return nil
+ }
+ }
+ default:
+ p.illegalToken()
+ return nil
+ }
+ }
+
+ if pkg.Path == nil {
+ if len(p.s.errors) == 0 {
+ p.error(p.s.Loc(), "expected path")
+ }
+ return nil
+ }
+
+ return &pkg
+}
+
+func (p *Parser) parseImport() *Import {
+
+ var imp Import
+ imp.SetLoc(p.s.Loc())
+
+ if p.s.tok != tokens.Import {
+ return nil
+ }
+
+ p.scanWS()
+
+ // Make sure we allow the first term of refs to be the 'import' keyword.
+ if p.s.tok == tokens.Dot || p.s.tok == tokens.LBrack {
+ // This is a ref, not an import declaration.
+ return nil
+ }
+
+ if p.s.tok == tokens.Whitespace {
+ p.scan()
+ }
+
+ if !isIdentOrAllowedRefKeyword(p) {
+ p.illegalToken()
+ return nil
+ }
+
+ q, prev := p.presentParser()
+ term := q.parseTerm()
+ if term != nil {
+ switch v := term.Value.(type) {
+ case Var:
+ imp.Path = RefTerm(term).SetLocation(term.Location)
+ case Ref:
+ for i := 1; i < len(v); i++ {
+ if _, ok := v[i].Value.(String); !ok {
+ p.errorf(v[i].Location, "unexpected %v token: expecting string", ValueName(v[i].Value))
+ return nil
+ }
+ }
+ imp.Path = term
+ }
+ }
+ // keep advanced parser state, reset known keywords
+ p.s = q.s
+ p.s.s = q.s.s.WithKeywords(prev)
+
+ if imp.Path == nil {
+ p.error(p.s.Loc(), "expected path")
+ return nil
+ }
+
+ path := imp.Path.Value.(Ref)
+
+ switch {
+ case RootDocumentNames.Contains(path[0]):
+ case FutureRootDocument.Equal(path[0]):
+ case RegoRootDocument.Equal(path[0]):
+ default:
+ p.hint("if this is unexpected, try updating OPA")
+ p.errorf(imp.Path.Location, "unexpected import path, must begin with one of: %v, got: %v",
+ RootDocumentNames.Union(NewSet(FutureRootDocument, RegoRootDocument)),
+ path[0])
+ return nil
+ }
+
+ if p.s.tok == tokens.As {
+ p.scan()
+
+ if p.s.tok != tokens.Ident {
+ p.illegal("expected var")
+ return nil
+ }
+
+ if alias := p.parseTerm(); alias != nil {
+ v, ok := alias.Value.(Var)
+ if ok {
+ imp.Alias = v
+ return &imp
+ }
+ }
+ p.illegal("expected var")
+ return nil
+ }
+
+ if imp.Alias != "" {
+ // Unreachable: parsing the alias var should already have generated an error.
+ name := imp.Alias.String()
+ if IsKeywordInRegoVersion(name, p.po.EffectiveRegoVersion()) {
+ p.errorf(imp.Location, "unexpected import alias, must not be a keyword, got: %s", name)
+ }
+ return &imp
+ }
+
+ r := imp.Path.Value.(Ref)
+
+ // Don't allow keywords in the tail path term unless it's a future import
+ if len(r) == 1 {
+ t := r[0]
+ name := string(t.Value.(Var))
+ if IsKeywordInRegoVersion(name, p.po.EffectiveRegoVersion()) {
+ p.errorf(t.Location, "unexpected import path, must not end with a keyword, got: %s", name)
+ p.hint("import a different path or use an alias")
+ }
+ } else if !FutureRootDocument.Equal(r[0]) {
+ t := r[len(r)-1]
+ name := string(t.Value.(String))
+ if IsKeywordInRegoVersion(name, p.po.EffectiveRegoVersion()) {
+ p.errorf(t.Location, "unexpected import path, must not end with a keyword, got: %s", name)
+ p.hint("import a different path or use an alias")
+ }
+ }
+
+ return &imp
+}
+
+// isIdentOrAllowedRefKeyword checks if the current token is an Ident or a keyword in the active rego-version.
+// If a keyword, sets p.s.token to token.Ident
+func isIdentOrAllowedRefKeyword(p *Parser) bool {
+ if p.s.tok == tokens.Ident {
+ return true
+ }
+
+ if p.isAllowedRefKeyword(p.s.tok) {
+ p.s.tok = tokens.Ident
+ return true
+ }
+
+ return false
+}
+
+func scanAheadRef(p *Parser) bool {
+ if p.isAllowedRefKeyword(p.s.tok) {
+ // scan ahead to check if we're parsing a ref
+ s := p.save()
+ p.scanWS()
+ tok := p.s.tok
+ p.restore(s)
+
+ if tok == tokens.Dot || tok == tokens.LBrack {
+ p.s.tok = tokens.Ident
+ return true
+ }
+ }
+
+ return false
+}
+
+func (p *Parser) parseRules() []*Rule {
+
+ var rule Rule
+ rule.SetLoc(p.s.Loc())
+
+ // This allows keywords in the first var term of the ref
+ _ = scanAheadRef(p)
+
+ if p.s.tok == tokens.Default {
+ p.scan()
+ rule.Default = true
+ _ = scanAheadRef(p)
+ }
+
+ if p.s.tok != tokens.Ident {
+ return nil
+ }
+
+ usesContains := false
+ if rule.Head, usesContains = p.parseHead(rule.Default); rule.Head == nil {
+ return nil
+ }
+
+ if usesContains {
+ rule.Head.keywords = append(rule.Head.keywords, tokens.Contains)
+ }
+
+ if rule.Default {
+ if !p.validateDefaultRuleValue(&rule) {
+ return nil
+ }
+
+ if len(rule.Head.Args) > 0 {
+ if !p.validateDefaultRuleArgs(&rule) {
+ return nil
+ }
+ }
+
+ rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location))
+ return []*Rule{&rule}
+ }
+
+ // back-compat with `p[x] { ... }``
+ hasIf := p.s.tok == tokens.If
+
+ // p[x] if ... becomes a single-value rule p[x]
+ if hasIf && !usesContains && len(rule.Head.Ref()) == 2 {
+ v := rule.Head.Ref()[1]
+ _, isRef := v.Value.(Ref)
+ if (!v.IsGround() || isRef) && len(rule.Head.Args) == 0 {
+ rule.Head.Key = rule.Head.Ref()[1]
+ }
+
+ if rule.Head.Value == nil {
+ rule.Head.generatedValue = true
+ rule.Head.Value = BooleanTerm(true).SetLocation(rule.Head.Location)
+ } else {
+ // p[x] = y if becomes a single-value rule p[x] with value y, but needs name for compat
+ v, ok := rule.Head.Ref()[0].Value.(Var)
+ if !ok {
+ return nil
+ }
+ rule.Head.Name = v
+ }
+ }
+
+ // p[x] becomes a multi-value rule p
+ if !hasIf && !usesContains &&
+ len(rule.Head.Args) == 0 && // not a function
+ len(rule.Head.Ref()) == 2 { // ref like 'p[x]'
+ v, ok := rule.Head.Ref()[0].Value.(Var)
+ if !ok {
+ return nil
+ }
+ rule.Head.Name = v
+ rule.Head.Key = rule.Head.Ref()[1]
+ if rule.Head.Value == nil {
+ rule.Head.SetRef(rule.Head.Ref()[:len(rule.Head.Ref())-1])
+ }
+ }
+
+ switch {
+ case hasIf:
+ rule.Head.keywords = append(rule.Head.keywords, tokens.If)
+ p.scan()
+ s := p.save()
+ if expr := p.parseLiteral(); expr != nil {
+ // NOTE(sr): set literals are never false or undefined, so parsing this as
+ // p if { true }
+ // ^^^^^^^^ set of one element, `true`
+ // isn't valid.
+ isSetLiteral := false
+ if t, ok := expr.Terms.(*Term); ok {
+ _, isSetLiteral = t.Value.(Set)
+ }
+ // expr.Term is []*Term or Every
+ if !isSetLiteral {
+ rule.Body.Append(expr)
+ break
+ }
+ }
+
+ // parsing as literal didn't work out, expect '{ BODY }'
+ p.restore(s)
+ fallthrough
+
+ case p.s.tok == tokens.LBrace:
+ p.scan()
+ if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil {
+ return nil
+ }
+ p.scan()
+
+ case usesContains:
+ rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location))
+ rule.generatedBody = true
+ rule.Location = rule.Head.Location
+
+ return []*Rule{&rule}
+
+ default:
+ return nil
+ }
+
+ if p.s.tok == tokens.Else {
+ // This might just be a refhead rule with a leading 'else' term.
+ if !scanAheadRef(p) {
+ if r := rule.Head.Ref(); len(r) > 1 && !r.IsGround() {
+ p.error(p.s.Loc(), "else keyword cannot be used on rules with variables in head")
+ return nil
+ }
+ if rule.Head.Key != nil {
+ p.error(p.s.Loc(), "else keyword cannot be used on multi-value rules")
+ return nil
+ }
+
+ if rule.Else = p.parseElse(rule.Head); rule.Else == nil {
+ return nil
+ }
+ }
+ }
+
+ rule.Location.Text = p.s.Text(rule.Location.Offset, p.s.lastEnd)
+
+ rules := []*Rule{&rule}
+
+ for p.s.tok == tokens.LBrace {
+
+ if rule.Else != nil {
+ p.error(p.s.Loc(), "expected else keyword")
+ return nil
+ }
+
+ loc := p.s.Loc()
+
+ p.scan()
+ var next Rule
+
+ if next.Body = p.parseBody(tokens.RBrace); next.Body == nil {
+ return nil
+ }
+ p.scan()
+
+ loc.Text = p.s.Text(loc.Offset, p.s.lastEnd)
+ next.SetLoc(loc)
+
+ // Chained rule head's keep the original
+ // rule's head AST but have their location
+ // set to the rule body.
+ next.Head = rule.Head.Copy()
+ next.Head.keywords = rule.Head.keywords
+ for i := range next.Head.Args {
+ if v, ok := next.Head.Args[i].Value.(Var); ok && v.IsWildcard() {
+ next.Head.Args[i].Value = Var(p.genwildcard())
+ }
+ }
+ setLocRecursive(next.Head, loc)
+
+ rules = append(rules, &next)
+ }
+
+ return rules
+}
+
+func (p *Parser) parseElse(head *Head) *Rule {
+
+ var rule Rule
+ rule.SetLoc(p.s.Loc())
+
+ rule.Head = head.Copy()
+ rule.Head.generatedValue = false
+ for i := range rule.Head.Args {
+ if v, ok := rule.Head.Args[i].Value.(Var); ok && v.IsWildcard() {
+ rule.Head.Args[i].Value = Var(p.genwildcard())
+ }
+ }
+ rule.Head.SetLoc(p.s.Loc())
+
+ defer func() {
+ rule.Location.Text = p.s.Text(rule.Location.Offset, p.s.lastEnd)
+ }()
+
+ p.scan()
+
+ switch p.s.tok {
+ case tokens.LBrace, tokens.If: // no value, but a body follows directly
+ rule.Head.generatedValue = true
+ rule.Head.Value = BooleanTerm(true)
+ case tokens.Assign, tokens.Unify:
+ rule.Head.Assign = tokens.Assign == p.s.tok
+ p.scan()
+ rule.Head.Value = p.parseTermInfixCall()
+ if rule.Head.Value == nil {
+ return nil
+ }
+ rule.Head.Location.Text = p.s.Text(rule.Head.Location.Offset, p.s.lastEnd)
+ default:
+ p.illegal("expected else value term or rule body")
+ return nil
+ }
+
+ hasIf := p.s.tok == tokens.If
+ hasLBrace := p.s.tok == tokens.LBrace
+
+ if !hasIf && !hasLBrace {
+ rule.Body = NewBody(NewExpr(BooleanTerm(true)))
+ rule.generatedBody = true
+ setLocRecursive(rule.Body, rule.Location)
+ return &rule
+ }
+
+ if hasIf {
+ rule.Head.keywords = append(rule.Head.keywords, tokens.If)
+ p.scan()
+ }
+
+ if p.s.tok == tokens.LBrace {
+ p.scan()
+ if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil {
+ return nil
+ }
+ p.scan()
+ } else if p.s.tok != tokens.EOF {
+ expr := p.parseLiteral()
+ if expr == nil {
+ return nil
+ }
+ rule.Body.Append(expr)
+ setLocRecursive(rule.Body, rule.Location)
+ } else {
+ p.illegal("rule body expected")
+ return nil
+ }
+
+ if p.s.tok == tokens.Else {
+ if rule.Else = p.parseElse(head); rule.Else == nil {
+ return nil
+ }
+ }
+ return &rule
+}
+
+func (p *Parser) parseHead(defaultRule bool) (*Head, bool) {
+ head := &Head{}
+ loc := p.s.Loc()
+ defer func() {
+ if head != nil {
+ head.SetLoc(loc)
+ head.Location.Text = p.s.Text(head.Location.Offset, p.s.lastEnd)
+ }
+ }()
+
+ term := p.parseVar()
+ if term == nil {
+ return nil, false
+ }
+
+ ref := p.parseHeadFinish(term, true)
+ if ref == nil {
+ p.illegal("expected rule head name")
+ return nil, false
+ }
+
+ switch x := ref.Value.(type) {
+ case Var:
+ // TODO
+ head = VarHead(x, ref.Location, nil)
+ case Ref:
+ head = RefHead(x)
+ case Call:
+ op, args := x[0], x[1:]
+ var ref Ref
+ switch y := op.Value.(type) {
+ case Var:
+ ref = Ref{op}
+ case Ref:
+ if _, ok := y[0].Value.(Var); !ok {
+ p.illegal("rule head ref %v invalid", y)
+ return nil, false
+ }
+ ref = y
+ }
+ head = RefHead(ref)
+ head.Args = slices.Clone[[]*Term](args)
+
+ default:
+ return nil, false
+ }
+
+ name := head.Ref().String()
+
+ switch p.s.tok {
+ case tokens.Contains: // NOTE: no Value for `contains` heads, we return here
+ // Catch error case of using 'contains' with a function definition rule head.
+ if head.Args != nil {
+ p.illegal("the contains keyword can only be used with multi-value rule definitions (e.g., %s contains { ... })", name)
+ }
+ p.scan()
+ head.Key = p.parseTermInfixCall()
+ if head.Key == nil {
+ p.illegal("expected rule key term (e.g., %s contains { ... })", name)
+ }
+ return head, true
+
+ case tokens.Unify:
+ p.scan()
+ head.Value = p.parseTermInfixCall()
+ if head.Value == nil {
+ // FIX HEAD.String()
+ p.illegal("expected rule value term (e.g., %s[%s] = { ... })", name, head.Key)
+ }
+ case tokens.Assign:
+ p.scan()
+ head.Assign = true
+ head.Value = p.parseTermInfixCall()
+ if head.Value == nil {
+ switch {
+ case len(head.Args) > 0:
+ p.illegal("expected function value term (e.g., %s(...) := { ... })", name)
+ case head.Key != nil:
+ p.illegal("expected partial rule value term (e.g., %s[...] := { ... })", name)
+ case defaultRule:
+ p.illegal("expected default rule value term (e.g., default %s := )", name)
+ default:
+ p.illegal("expected rule value term (e.g., %s := { ... })", name)
+ }
+ }
+ }
+
+ if head.Value == nil && head.Key == nil {
+ if len(head.Ref()) != 2 || len(head.Args) > 0 {
+ head.generatedValue = true
+ head.Value = BooleanTerm(true).SetLocation(head.Location)
+ }
+ }
+ return head, false
+}
+
+func (p *Parser) parseBody(end tokens.Token) Body {
+ if !p.enter() {
+ return nil
+ }
+ defer p.leave()
+ return p.parseQuery(false, end)
+}
+
+func (p *Parser) parseQuery(requireSemi bool, end tokens.Token) Body {
+ body := Body{}
+
+ if p.s.tok == end {
+ p.error(p.s.Loc(), "found empty body")
+ return nil
+ }
+
+ for {
+ expr := p.parseLiteral()
+ if expr == nil {
+ return nil
+ }
+
+ body.Append(expr)
+
+ if p.s.tok == tokens.Semicolon {
+ p.scan()
+ continue
+ }
+
+ if p.s.tok == end || requireSemi {
+ return body
+ }
+
+ if !p.s.skippedNL {
+ // If there was already an error then don't pile this one on
+ if len(p.s.errors) == 0 {
+ p.illegal(`expected \n or %s or %s`, tokens.Semicolon, end)
+ }
+ return nil
+ }
+ }
+}
+
+func (p *Parser) parseLiteral() (expr *Expr) {
+
+ offset := p.s.loc.Offset
+ loc := p.s.Loc()
+
+ defer func() {
+ if expr != nil {
+ loc.Text = p.s.Text(offset, p.s.lastEnd)
+ expr.SetLoc(loc)
+ }
+ }()
+
+ // Check that we're not parsing a ref
+ if p.isAllowedRefKeyword(p.s.tok) {
+ // Scan ahead
+ s := p.save()
+ p.scanWS()
+ tok := p.s.tok
+ p.restore(s)
+
+ if tok == tokens.Dot || tok == tokens.LBrack {
+ p.s.tok = tokens.Ident
+ return p.parseLiteralExpr(false)
+ }
+ }
+
+ var negated bool
+ if p.s.tok == tokens.Not {
+ s := p.save()
+ p.scanWS()
+ tok := p.s.tok
+ p.restore(s)
+
+ if tok != tokens.Dot && tok != tokens.LBrack {
+ p.scan()
+ negated = true
+ }
+ }
+
+ switch p.s.tok {
+ case tokens.Some:
+ if negated {
+ p.illegal("illegal negation of 'some'")
+ return nil
+ }
+ return p.parseSome()
+ case tokens.Every:
+ if negated {
+ p.illegal("illegal negation of 'every'")
+ return nil
+ }
+ return p.parseEvery()
+ default:
+ return p.parseLiteralExpr(negated)
+ }
+}
+
+func (p *Parser) isAllowedRefKeyword(t tokens.Token) bool {
+ return p.isAllowedRefKeywordStr(t.String())
+}
+
+func (p *Parser) isAllowedRefKeywordStr(s string) bool {
+ if p.po.Capabilities.ContainsFeature(FeatureKeywordsInRefs) {
+ return IsKeywordInRegoVersion(s, p.po.EffectiveRegoVersion()) || p.s.s.IsKeyword(s)
+ }
+
+ return false
+}
+
+func (p *Parser) parseLiteralExpr(negated bool) *Expr {
+ s := p.save()
+ expr := p.parseExpr()
+ if expr != nil {
+ expr.Negated = negated
+ if p.s.tok == tokens.With {
+ if expr.With = p.parseWith(); expr.With == nil {
+ return nil
+ }
+ }
+
+ if p.isFutureKeyword("every") {
+ // If we find a plain `every` identifier, attempt to parse an every expression,
+ // add hint if it succeeds.
+ if term, ok := expr.Terms.(*Term); ok && Var("every").Equal(term.Value) {
+ var hint bool
+ t := p.save()
+ p.restore(s)
+ if expr := p.futureParser().parseEvery(); expr != nil {
+ _, hint = expr.Terms.(*Every)
+ }
+ p.restore(t)
+ if hint {
+ p.hint("`import future.keywords.every` for `every x in xs { ... }` expressions")
+ }
+ }
+ }
+ }
+ return expr
+}
+
+func (p *Parser) parseWith() []*With {
+
+ withs := []*With{}
+
+ for {
+
+ with := With{
+ Location: p.s.Loc(),
+ }
+ p.scan()
+
+ if p.s.tok != tokens.Ident {
+ p.illegal("expected ident")
+ return nil
+ }
+
+ with.Target = p.parseTerm()
+ if with.Target == nil {
+ return nil
+ }
+
+ switch with.Target.Value.(type) {
+ case Ref, Var:
+ break
+ default:
+ p.illegal("expected with target path")
+ }
+
+ if p.s.tok != tokens.As {
+ p.illegal("expected as keyword")
+ return nil
+ }
+
+ p.scan()
+
+ if with.Value = p.parseTermInfixCall(); with.Value == nil {
+ return nil
+ }
+
+ with.Location.Text = p.s.Text(with.Location.Offset, p.s.lastEnd)
+
+ withs = append(withs, &with)
+
+ if p.s.tok != tokens.With {
+ break
+ }
+ }
+
+ return withs
+}
+
+func (p *Parser) parseSome() *Expr {
+
+ decl := &SomeDecl{}
+ decl.SetLoc(p.s.Loc())
+
+ // Attempt to parse "some x in xs", which will end up in
+ // SomeDecl{Symbols: ["member(x, xs)"]}
+ s := p.save()
+ p.scan()
+ if term := p.parseTermInfixCall(); term != nil {
+ if call, ok := term.Value.(Call); ok {
+ switch call[0].String() {
+ case Member.Name:
+ if len(call) != 3 {
+ p.illegal("illegal domain")
+ return nil
+ }
+ case MemberWithKey.Name:
+ if len(call) != 4 {
+ p.illegal("illegal domain")
+ return nil
+ }
+ default:
+ p.illegal("expected `x in xs` or `x, y in xs` expression")
+ return nil
+ }
+
+ decl.Symbols = []*Term{term}
+ expr := NewExpr(decl).SetLocation(decl.Location)
+ if p.s.tok == tokens.With {
+ if expr.With = p.parseWith(); expr.With == nil {
+ return nil
+ }
+ }
+ return expr
+ }
+ }
+
+ p.restore(s)
+
+ if p.isFutureKeyword("in") {
+ s = p.save() // new copy for later
+ var hint bool
+ p.scan()
+ if term := p.futureParser().parseTermInfixCall(); term != nil {
+ if call, ok := term.Value.(Call); ok {
+ switch call[0].String() {
+ case Member.Name, MemberWithKey.Name:
+ hint = true
+ }
+ }
+ }
+
+ // go on as before, it's `some x[...]` or illegal
+ p.restore(s)
+ if hint {
+ p.hint("`import future.keywords.in` for `some x in xs` expressions")
+ }
+ }
+
+ for { // collecting var args
+ p.scan()
+
+ if p.s.tok != tokens.Ident {
+ p.illegal("expected var")
+ return nil
+ }
+
+ decl.Symbols = append(decl.Symbols, p.parseVar())
+
+ p.scan()
+
+ if p.s.tok != tokens.Comma {
+ break
+ }
+ }
+
+ return NewExpr(decl).SetLocation(decl.Location)
+}
+
+func (p *Parser) parseEvery() *Expr {
+ qb := &Every{}
+ qb.SetLoc(p.s.Loc())
+
+ // TODO(sr): We'd get more accurate error messages if we didn't rely on
+ // parseTermInfixCall here, but parsed "var [, var] in term" manually.
+ p.scan()
+ term := p.parseTermInfixCall()
+ if term == nil {
+ return nil
+ }
+ call, ok := term.Value.(Call)
+ if !ok {
+ p.illegal("expected `x[, y] in xs { ... }` expression")
+ return nil
+ }
+ switch call[0].String() {
+ case Member.Name: // x in xs
+ if len(call) != 3 {
+ p.illegal("illegal domain")
+ return nil
+ }
+ qb.Value = call[1]
+ qb.Domain = call[2]
+ case MemberWithKey.Name: // k, v in xs
+ if len(call) != 4 {
+ p.illegal("illegal domain")
+ return nil
+ }
+ qb.Key = call[1]
+ qb.Value = call[2]
+ qb.Domain = call[3]
+ if _, ok := qb.Key.Value.(Var); !ok {
+ p.illegal("expected key to be a variable")
+ return nil
+ }
+ default:
+ p.illegal("expected `x[, y] in xs { ... }` expression")
+ return nil
+ }
+ if _, ok := qb.Value.Value.(Var); !ok {
+ p.illegal("expected value to be a variable")
+ return nil
+ }
+ if p.s.tok == tokens.LBrace { // every x in xs { ... }
+ p.scan()
+ body := p.parseBody(tokens.RBrace)
+ if body == nil {
+ return nil
+ }
+ p.scan()
+ qb.Body = body
+ expr := NewExpr(qb).SetLocation(qb.Location)
+
+ if p.s.tok == tokens.With {
+ if expr.With = p.parseWith(); expr.With == nil {
+ return nil
+ }
+ }
+ return expr
+ }
+
+ p.illegal("missing body")
+ return nil
+}
+
+func (p *Parser) parseExpr() *Expr {
+
+ lhs := p.parseTermInfixCall()
+ if lhs == nil {
+ return nil
+ }
+
+ if op := p.parseTermOp(tokens.Assign, tokens.Unify); op != nil {
+ if rhs := p.parseTermInfixCall(); rhs != nil {
+ return NewExpr([]*Term{op, lhs, rhs})
+ }
+ return nil
+ }
+
+ // NOTE(tsandall): the top-level call term is converted to an expr because
+ // the evaluator does not support the call term type (nested calls are
+ // rewritten by the compiler.)
+ if call, ok := lhs.Value.(Call); ok {
+ return NewExpr([]*Term(call))
+ }
+
+ return NewExpr(lhs)
+}
+
+// parseTermInfixCall consumes the next term from the input and returns it. If a
+// term cannot be parsed the return value is nil and error will be recorded. The
+// scanner will be advanced to the next token before returning.
+// By starting out with infix relations (==, !=, <, etc) and further calling the
+// other binary operators (|, &, arithmetics), it constitutes the binding
+// precedence.
+func (p *Parser) parseTermInfixCall() *Term {
+ if !p.enter() {
+ return nil
+ }
+ defer p.leave()
+
+ return p.parseTermIn(nil, true, p.s.loc.Offset)
+}
+
+func (p *Parser) parseTermInfixCallInList() *Term {
+ if !p.enter() {
+ return nil
+ }
+ defer p.leave()
+
+ return p.parseTermIn(nil, false, p.s.loc.Offset)
+}
+
+// use static references to avoid allocations, and
+// copy them to the call term only when needed
+var memberWithKeyRef = MemberWithKey.Ref()
+var memberRef = Member.Ref()
+
+func (p *Parser) parseTermIn(lhs *Term, keyVal bool, offset int) *Term {
+ if !p.enter() {
+ return nil
+ }
+ defer p.leave()
+
+ // NOTE(sr): `in` is a bit special: besides `lhs in rhs`, it also
+ // supports `key, val in rhs`, so it can have an optional second lhs.
+ // `keyVal` triggers if we attempt to parse a second lhs argument (`mhs`).
+ if lhs == nil {
+ lhs = p.parseTermRelation(nil, offset)
+ }
+ if lhs != nil {
+ if keyVal && p.s.tok == tokens.Comma { // second "lhs", or "middle hand side"
+ s := p.save()
+ p.scan()
+ if mhs := p.parseTermRelation(nil, offset); mhs != nil {
+
+ if op := p.parseTermOpName(memberWithKeyRef, tokens.In); op != nil {
+ if rhs := p.parseTermRelation(nil, p.s.loc.Offset); rhs != nil {
+ call := p.setLoc(CallTerm(op, lhs, mhs, rhs), lhs.Location, offset, p.s.lastEnd)
+ switch p.s.tok {
+ case tokens.In:
+ return p.parseTermIn(call, keyVal, offset)
+ default:
+ return call
+ }
+ }
+ }
+ }
+ p.restore(s)
+ }
+
+ _ = scanAheadRef(p)
+
+ if op := p.parseTermOpName(memberRef, tokens.In); op != nil {
+ if rhs := p.parseTermRelation(nil, p.s.loc.Offset); rhs != nil {
+ call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd)
+ switch p.s.tok {
+ case tokens.In:
+ return p.parseTermIn(call, keyVal, offset)
+ default:
+ return call
+ }
+ }
+ }
+ }
+ return lhs
+}
+
+func (p *Parser) parseTermRelation(lhs *Term, offset int) *Term {
+ if !p.enter() {
+ return nil
+ }
+ defer p.leave()
+
+ if lhs == nil {
+ lhs = p.parseTermOr(nil, offset)
+ }
+ if lhs != nil {
+ if op := p.parseTermOp(tokens.Equal, tokens.Neq, tokens.Lt, tokens.Gt, tokens.Lte, tokens.Gte); op != nil {
+ if rhs := p.parseTermOr(nil, p.s.loc.Offset); rhs != nil {
+ call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd)
+ switch p.s.tok {
+ case tokens.Equal, tokens.Neq, tokens.Lt, tokens.Gt, tokens.Lte, tokens.Gte:
+ return p.parseTermRelation(call, offset)
+ default:
+ return call
+ }
+ }
+ }
+ }
+ return lhs
+}
+
+func (p *Parser) parseTermOr(lhs *Term, offset int) *Term {
+ if !p.enter() {
+ return nil
+ }
+ defer p.leave()
+
+ if lhs == nil {
+ lhs = p.parseTermAnd(nil, offset)
+ }
+ if lhs != nil {
+ if op := p.parseTermOp(tokens.Or); op != nil {
+ if rhs := p.parseTermAnd(nil, p.s.loc.Offset); rhs != nil {
+ call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd)
+ switch p.s.tok {
+ case tokens.Or:
+ return p.parseTermOr(call, offset)
+ default:
+ return call
+ }
+ }
+ }
+ return lhs
+ }
+ return nil
+}
+
+func (p *Parser) parseTermAnd(lhs *Term, offset int) *Term {
+ if !p.enter() {
+ return nil
+ }
+ defer p.leave()
+
+ if lhs == nil {
+ lhs = p.parseTermArith(nil, offset)
+ }
+ if lhs != nil {
+ if op := p.parseTermOp(tokens.And); op != nil {
+ if rhs := p.parseTermArith(nil, p.s.loc.Offset); rhs != nil {
+ call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd)
+ switch p.s.tok {
+ case tokens.And:
+ return p.parseTermAnd(call, offset)
+ default:
+ return call
+ }
+ }
+ }
+ return lhs
+ }
+ return nil
+}
+
+func (p *Parser) parseTermArith(lhs *Term, offset int) *Term {
+ if !p.enter() {
+ return nil
+ }
+ defer p.leave()
+
+ if lhs == nil {
+ lhs = p.parseTermFactor(nil, offset)
+ }
+ if lhs != nil {
+ if op := p.parseTermOp(tokens.Add, tokens.Sub); op != nil {
+ if rhs := p.parseTermFactor(nil, p.s.loc.Offset); rhs != nil {
+ call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd)
+ switch p.s.tok {
+ case tokens.Add, tokens.Sub:
+ return p.parseTermArith(call, offset)
+ default:
+ return call
+ }
+ }
+ }
+ }
+ return lhs
+}
+
+func (p *Parser) parseTermFactor(lhs *Term, offset int) *Term {
+ if !p.enter() {
+ return nil
+ }
+ defer p.leave()
+
+ if lhs == nil {
+ lhs = p.parseTerm()
+ }
+ if lhs != nil {
+ if op := p.parseTermOp(tokens.Mul, tokens.Quo, tokens.Rem); op != nil {
+ if rhs := p.parseTerm(); rhs != nil {
+ call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd)
+ switch p.s.tok {
+ case tokens.Mul, tokens.Quo, tokens.Rem:
+ return p.parseTermFactor(call, offset)
+ default:
+ return call
+ }
+ }
+ }
+ }
+ return lhs
+}
+
+func (p *Parser) parseTerm() *Term {
+ if !p.enter() {
+ return nil
+ }
+ defer p.leave()
+
+ if term, s := p.parsedTermCacheLookup(); s != nil {
+ p.restore(s)
+ return term
+ }
+ s0 := p.save()
+
+ var term *Term
+ switch p.s.tok {
+ case tokens.Null:
+ term = NullTerm().SetLocation(p.s.Loc())
+ case tokens.True:
+ term = BooleanTerm(true).SetLocation(p.s.Loc())
+ case tokens.False:
+ term = BooleanTerm(false).SetLocation(p.s.Loc())
+ case tokens.Sub, tokens.Dot, tokens.Number:
+ term = p.parseNumber()
+ case tokens.String:
+ term = p.parseString()
+ case tokens.Ident, tokens.Contains: // NOTE(sr): contains anywhere BUT in rule heads gets no special treatment
+ term = p.parseVar()
+ case tokens.LBrack:
+ term = p.parseArray()
+ case tokens.LBrace:
+ term = p.parseSetOrObject()
+ case tokens.LParen:
+ offset := p.s.loc.Offset
+ p.scan()
+ if r := p.parseTermInfixCall(); r != nil {
+ if p.s.tok == tokens.RParen {
+ r.Location.Text = p.s.Text(offset, p.s.tokEnd)
+ term = r
+ } else {
+ p.error(p.s.Loc(), "non-terminated expression")
+ }
+ }
+ default:
+ p.illegalToken()
+ }
+
+ term = p.parseTermFinish(term, false)
+ p.parsedTermCachePush(term, s0)
+ return term
+}
+
+func (p *Parser) parseTermFinish(head *Term, skipws bool) *Term {
+ if head == nil {
+ return nil
+ }
+ offset := p.s.loc.Offset
+ p.doScan(skipws)
+
+ switch p.s.tok {
+ case tokens.LParen, tokens.Dot, tokens.LBrack:
+ return p.parseRef(head, offset)
+ case tokens.Whitespace:
+ p.scan()
+ fallthrough
+ default:
+ if _, ok := head.Value.(Var); ok && RootDocumentNames.Contains(head) {
+ return RefTerm(head).SetLocation(head.Location)
+ }
+ return head
+ }
+}
+
+func (p *Parser) parseHeadFinish(head *Term, skipws bool) *Term {
+ if head == nil {
+ return nil
+ }
+ offset := p.s.loc.Offset
+ p.doScan(false)
+
+ switch p.s.tok {
+ case tokens.Add, tokens.Sub, tokens.Mul, tokens.Quo, tokens.Rem,
+ tokens.And, tokens.Or,
+ tokens.Equal, tokens.Neq, tokens.Gt, tokens.Gte, tokens.Lt, tokens.Lte:
+ p.illegalToken()
+ case tokens.Whitespace:
+ p.doScan(skipws)
+ }
+
+ switch p.s.tok {
+ case tokens.LParen, tokens.Dot, tokens.LBrack:
+ return p.parseRef(head, offset)
+ case tokens.Whitespace:
+ p.scan()
+ }
+
+ if _, ok := head.Value.(Var); ok && RootDocumentNames.Contains(head) {
+ return RefTerm(head).SetLocation(head.Location)
+ }
+ return head
+}
+
+func (p *Parser) parseNumber() *Term {
+ var prefix string
+ loc := p.s.Loc()
+
+ // Handle negative sign
+ if p.s.tok == tokens.Sub {
+ prefix = "-"
+ p.scan()
+ switch p.s.tok {
+ case tokens.Number, tokens.Dot:
+ break
+ default:
+ p.illegal("expected number")
+ return nil
+ }
+ }
+
+ // Handle decimal point
+ if p.s.tok == tokens.Dot {
+ prefix += "."
+ p.scan()
+ if p.s.tok != tokens.Number {
+ p.illegal("expected number")
+ return nil
+ }
+ }
+
+ // Validate leading zeros: reject numbers like "01", "007", etc.
+ // Skip validation if prefix ends with '.' (like ".123")
+ hasDecimalPrefix := len(prefix) > 0 && prefix[len(prefix)-1] == '.'
+
+ if !hasDecimalPrefix && len(p.s.lit) > 1 && p.s.lit[0] == '0' {
+ // These are the only valid cases starting with '0':
+ isDecimal := p.s.lit[1] == '.' // "0.123"
+ isScientific := len(p.s.lit) > 2 && (p.s.lit[1] == 'e' || p.s.lit[1] == 'E') // "0e5", "0E-3"
+
+ if !isDecimal && !isScientific {
+ p.illegal("expected number without leading zero")
+ return nil
+ }
+ }
+
+ // Ensure that the number is valid
+ s := prefix + p.s.lit
+ f, ok := new(big.Float).SetString(s)
+ if !ok {
+ p.illegal("invalid float")
+ return nil
+ }
+
+ // Put limit on size of exponent to prevent non-linear cost of String()
+ // function on big.Float from causing denial of service: https://github.com/golang/go/issues/11068
+ //
+ // n == sign * mantissa * 2^exp
+ // 0.5 <= mantissa < 1.0
+ //
+ // The limit is arbitrary.
+ exp := f.MantExp(nil)
+ if exp > 1e5 || exp < -1e5 || f.IsInf() { // +/- inf, exp is 0
+ p.error(p.s.Loc(), "number too big")
+ return nil
+ }
+
+ // Note: Use the original string, do *not* round trip from
+ // the big.Float as it can cause precision loss.
+ return NumberTerm(json.Number(s)).SetLocation(loc)
+}
+
+func (p *Parser) parseString() *Term {
+ if p.s.lit[0] == '"' {
+ if p.s.lit == "\"\"" {
+ return NewTerm(InternedEmptyString.Value).SetLocation(p.s.Loc())
+ }
+
+ var s string
+ if err := json.Unmarshal([]byte(p.s.lit), &s); err != nil {
+ p.errorf(p.s.Loc(), "illegal string literal: %s", p.s.lit)
+ return nil
+ }
+ return StringTerm(s).SetLocation(p.s.Loc())
+ }
+ return p.parseRawString()
+}
+
+func (p *Parser) parseRawString() *Term {
+ if len(p.s.lit) < 2 {
+ return nil
+ }
+ return StringTerm(p.s.lit[1 : len(p.s.lit)-1]).SetLocation(p.s.Loc())
+}
+
+// this is the name to use for instantiating an empty set, e.g., `set()`.
+var setConstructor = RefTerm(VarTerm("set"))
+
+func (p *Parser) parseCall(operator *Term, offset int) (term *Term) {
+ if !p.enter() {
+ return nil
+ }
+ defer p.leave()
+
+ loc := operator.Location
+ var end int
+
+ defer func() {
+ p.setLoc(term, loc, offset, end)
+ }()
+
+ p.scan() // steps over '('
+
+ if p.s.tok == tokens.RParen { // no args, i.e. set() or any.func()
+ end = p.s.tokEnd
+ p.scanWS()
+ if operator.Equal(setConstructor) {
+ return SetTerm()
+ }
+ return CallTerm(operator)
+ }
+
+ if r := p.parseTermList(tokens.RParen, []*Term{operator}); r != nil {
+ end = p.s.tokEnd
+ p.scanWS()
+ return CallTerm(r...)
+ }
+
+ return nil
+}
+
+func (p *Parser) parseRef(head *Term, offset int) (term *Term) {
+ if !p.enter() {
+ return nil
+ }
+ defer p.leave()
+
+ loc := head.Location
+ var end int
+
+ defer func() {
+ p.setLoc(term, loc, offset, end)
+ }()
+
+ switch h := head.Value.(type) {
+ case Var, *Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call:
+ // ok
+ default:
+ p.errorf(loc, "illegal ref (head cannot be %v)", ValueName(h))
+ }
+
+ ref := []*Term{head}
+
+ for {
+ switch p.s.tok {
+ case tokens.Dot:
+ p.scanWS()
+ if p.s.tok != tokens.Ident && !p.isAllowedRefKeyword(p.s.tok) {
+ p.illegal("expected %v", tokens.Ident)
+ return nil
+ }
+ ref = append(ref, StringTerm(p.s.lit).SetLocation(p.s.Loc()))
+ p.scanWS()
+ case tokens.LParen:
+ term = p.parseCall(p.setLoc(RefTerm(ref...), loc, offset, p.s.loc.Offset), offset)
+ if term != nil {
+ switch p.s.tok {
+ case tokens.Whitespace:
+ p.scan()
+ end = p.s.lastEnd
+ return term
+ case tokens.Dot, tokens.LBrack:
+ term = p.parseRef(term, offset)
+ }
+ }
+ end = p.s.tokEnd
+ return term
+ case tokens.LBrack:
+ p.scan()
+ if term := p.parseTermInfixCall(); term != nil {
+ if p.s.tok != tokens.RBrack {
+ p.illegal("expected %v", tokens.LBrack)
+ return nil
+ }
+ ref = append(ref, term)
+ p.scanWS()
+ } else {
+ return nil
+ }
+ case tokens.Whitespace:
+ end = p.s.lastEnd
+ p.scan()
+ return RefTerm(ref...)
+ default:
+ end = p.s.lastEnd
+ return RefTerm(ref...)
+ }
+ }
+}
+
+func (p *Parser) parseArray() (term *Term) {
+ if !p.enter() {
+ return nil
+ }
+ defer p.leave()
+
+ loc := p.s.Loc()
+ offset := p.s.loc.Offset
+
+ defer func() {
+ p.setLoc(term, loc, offset, p.s.tokEnd)
+ }()
+
+ p.scan()
+
+ if p.s.tok == tokens.RBrack {
+ return ArrayTerm()
+ }
+
+ potentialComprehension := true
+
+ // Skip leading commas, eg [, x, y]
+ // Supported for backwards compatibility. In the future
+ // we should make this a parse error.
+ if p.s.tok == tokens.Comma {
+ potentialComprehension = false
+ p.scan()
+ }
+
+ s := p.save()
+
+ // NOTE(tsandall): The parser cannot attempt a relational term here because
+ // of ambiguity around comprehensions. For example, given:
+ //
+ // {1 | 1}
+ //
+ // Does this represent a set comprehension or a set containing binary OR
+ // call? We resolve the ambiguity by prioritizing comprehensions.
+ head := p.parseTerm()
+
+ if head == nil {
+ return nil
+ }
+
+ switch p.s.tok {
+ case tokens.RBrack:
+ return ArrayTerm(head)
+ case tokens.Comma:
+ p.scan()
+ if terms := p.parseTermList(tokens.RBrack, []*Term{head}); terms != nil {
+ return ArrayTerm(terms...)
+ }
+ return nil
+ case tokens.Or:
+ if potentialComprehension {
+ // Try to parse as if it is an array comprehension
+ p.scan()
+ if body := p.parseBody(tokens.RBrack); body != nil {
+ return ArrayComprehensionTerm(head, body)
+ }
+ if p.s.tok != tokens.Comma {
+ return nil
+ }
+ }
+ // fall back to parsing as a normal array definition
+ }
+
+ p.restore(s)
+
+ if terms := p.parseTermList(tokens.RBrack, nil); terms != nil {
+ return ArrayTerm(terms...)
+ }
+ return nil
+}
+
+func (p *Parser) parseSetOrObject() (term *Term) {
+ if !p.enter() {
+ return nil
+ }
+ defer p.leave()
+
+ loc := p.s.Loc()
+ offset := p.s.loc.Offset
+
+ defer func() {
+ p.setLoc(term, loc, offset, p.s.tokEnd)
+ }()
+
+ p.scan()
+
+ if p.s.tok == tokens.RBrace {
+ return ObjectTerm()
+ }
+
+ potentialComprehension := true
+
+ // Skip leading commas, eg {, x, y}
+ // Supported for backwards compatibility. In the future
+ // we should make this a parse error.
+ if p.s.tok == tokens.Comma {
+ potentialComprehension = false
+ p.scan()
+ }
+
+ s := p.save()
+
+ // Try parsing just a single term first to give comprehensions higher
+ // priority to "or" calls in ambiguous situations. Eg: { a | b }
+ // will be a set comprehension.
+ //
+ // Note: We don't know yet if it is a set or object being defined.
+ head := p.parseTerm()
+ if head == nil {
+ return nil
+ }
+
+ switch p.s.tok {
+ case tokens.Or:
+ if potentialComprehension {
+ return p.parseSet(s, head, potentialComprehension)
+ }
+ case tokens.RBrace, tokens.Comma:
+ return p.parseSet(s, head, potentialComprehension)
+ case tokens.Colon:
+ return p.parseObject(head, potentialComprehension)
+ }
+
+ p.restore(s)
+
+ head = p.parseTermInfixCallInList()
+ if head == nil {
+ return nil
+ }
+
+ switch p.s.tok {
+ case tokens.RBrace, tokens.Comma:
+ return p.parseSet(s, head, false)
+ case tokens.Colon:
+ // It still might be an object comprehension, eg { a+1: b | ... }
+ return p.parseObject(head, potentialComprehension)
+ }
+
+ p.illegal("non-terminated set")
+ return nil
+}
+
+func (p *Parser) parseSet(s *state, head *Term, potentialComprehension bool) *Term {
+ if !p.enter() {
+ return nil
+ }
+ defer p.leave()
+
+ switch p.s.tok {
+ case tokens.RBrace:
+ return SetTerm(head)
+ case tokens.Comma:
+ p.scan()
+ if terms := p.parseTermList(tokens.RBrace, []*Term{head}); terms != nil {
+ return SetTerm(terms...)
+ }
+ case tokens.Or:
+ if potentialComprehension {
+ // Try to parse as if it is a set comprehension
+ p.scan()
+ if body := p.parseBody(tokens.RBrace); body != nil {
+ return SetComprehensionTerm(head, body)
+ }
+ if p.s.tok != tokens.Comma {
+ return nil
+ }
+ }
+ // Fall back to parsing as normal set definition
+ p.restore(s)
+ if terms := p.parseTermList(tokens.RBrace, nil); terms != nil {
+ return SetTerm(terms...)
+ }
+ }
+ return nil
+}
+
+func (p *Parser) parseObject(k *Term, potentialComprehension bool) *Term {
+ if !p.enter() {
+ return nil
+ }
+ defer p.leave()
+
+ // NOTE(tsandall): Assumption: this function is called after parsing the key
+ // of the head element and then receiving a colon token from the scanner.
+ // Advance beyond the colon and attempt to parse an object.
+ if p.s.tok != tokens.Colon {
+ panic("expected colon")
+ }
+ p.scan()
+
+ s := p.save()
+
+ // NOTE(sr): We first try to parse the value as a term (`v`), and see
+ // if we can parse `{ x: v | ...}` as a comprehension.
+ // However, if we encounter either a Comma or an RBace, it cannot be
+ // parsed as a comprehension -- so we save double work further down
+ // where `parseObjectFinish(k, v, false)` would only exercise the
+ // same code paths once more.
+ v := p.parseTerm()
+ if v == nil {
+ return nil
+ }
+
+ potentialRelation := true
+ if potentialComprehension {
+ switch p.s.tok {
+ case tokens.RBrace, tokens.Comma:
+ potentialRelation = false
+ fallthrough
+ case tokens.Or:
+ if term := p.parseObjectFinish(k, v, true); term != nil {
+ return term
+ }
+ }
+ }
+
+ p.restore(s)
+
+ if potentialRelation {
+ v := p.parseTermInfixCallInList()
+ if v == nil {
+ return nil
+ }
+
+ switch p.s.tok {
+ case tokens.RBrace, tokens.Comma:
+ return p.parseObjectFinish(k, v, false)
+ }
+ }
+
+ p.illegal("non-terminated object")
+ return nil
+}
+
+func (p *Parser) parseObjectFinish(key, val *Term, potentialComprehension bool) *Term {
+ if !p.enter() {
+ return nil
+ }
+ defer p.leave()
+
+ switch p.s.tok {
+ case tokens.RBrace:
+ return ObjectTerm([2]*Term{key, val})
+ case tokens.Or:
+ if potentialComprehension {
+ p.scan()
+ if body := p.parseBody(tokens.RBrace); body != nil {
+ return ObjectComprehensionTerm(key, val, body)
+ }
+ } else {
+ p.illegal("non-terminated object")
+ }
+ case tokens.Comma:
+ p.scan()
+ if r := p.parseTermPairList(tokens.RBrace, [][2]*Term{{key, val}}); r != nil {
+ return ObjectTerm(r...)
+ }
+ }
+ return nil
+}
+
+func (p *Parser) parseTermList(end tokens.Token, r []*Term) []*Term {
+ if p.s.tok == end {
+ return r
+ }
+ for {
+ term := p.parseTermInfixCallInList()
+ if term != nil {
+ r = append(r, term)
+ switch p.s.tok {
+ case end:
+ return r
+ case tokens.Comma:
+ p.scan()
+ if p.s.tok == end {
+ return r
+ }
+ continue
+ default:
+ p.illegal(fmt.Sprintf("expected %q or %q", tokens.Comma, end))
+ return nil
+ }
+ }
+ return nil
+ }
+}
+
+func (p *Parser) parseTermPairList(end tokens.Token, r [][2]*Term) [][2]*Term {
+ if p.s.tok == end {
+ return r
+ }
+ for {
+ key := p.parseTermInfixCallInList()
+ if key != nil {
+ switch p.s.tok {
+ case tokens.Colon:
+ p.scan()
+ if val := p.parseTermInfixCallInList(); val != nil {
+ r = append(r, [2]*Term{key, val})
+ switch p.s.tok {
+ case end:
+ return r
+ case tokens.Comma:
+ p.scan()
+ if p.s.tok == end {
+ return r
+ }
+ continue
+ default:
+ p.illegal(fmt.Sprintf("expected %q or %q", tokens.Comma, end))
+ return nil
+ }
+ }
+ default:
+ p.illegal(fmt.Sprintf("expected %q", tokens.Colon))
+ return nil
+ }
+ }
+ return nil
+ }
+}
+
+func (p *Parser) parseTermOp(values ...tokens.Token) *Term {
+ if slices.Contains(values, p.s.tok) {
+ r := RefTerm(VarTerm(p.s.tok.String()).SetLocation(p.s.Loc())).SetLocation(p.s.Loc())
+ p.scan()
+ return r
+ }
+ return nil
+}
+
+func (p *Parser) parseTermOpName(ref Ref, values ...tokens.Token) *Term {
+ if slices.Contains(values, p.s.tok) {
+ cp := ref.Copy()
+ for _, r := range cp {
+ r.SetLocation(p.s.Loc())
+ }
+ t := RefTerm(cp...)
+ t.SetLocation(p.s.Loc())
+ p.scan()
+ return t
+ }
+ return nil
+}
+
+func (p *Parser) parseVar() *Term {
+
+ s := p.s.lit
+
+ term := VarTerm(s).SetLocation(p.s.Loc())
+
+ // Update wildcard values with unique identifiers
+ if term.Equal(Wildcard) {
+ term.Value = Var(p.genwildcard())
+ }
+
+ return term
+}
+
+func (p *Parser) genwildcard() string {
+ c := p.s.wildcard
+ p.s.wildcard++
+ return fmt.Sprintf("%v%d", WildcardPrefix, c)
+}
+
+func (p *Parser) error(loc *location.Location, reason string) {
+ p.errorf(loc, "%s", reason)
+}
+
+func (p *Parser) errorf(loc *location.Location, f string, a ...any) {
+ msg := strings.Builder{}
+ msg.WriteString(fmt.Sprintf(f, a...))
+
+ switch len(p.s.hints) {
+ case 0: // nothing to do
+ case 1:
+ msg.WriteString(" (hint: ")
+ msg.WriteString(p.s.hints[0])
+ msg.WriteRune(')')
+ default:
+ msg.WriteString(" (hints: ")
+ for i, h := range p.s.hints {
+ if i > 0 {
+ msg.WriteString(", ")
+ }
+ msg.WriteString(h)
+ }
+ msg.WriteRune(')')
+ }
+
+ p.s.errors = append(p.s.errors, &Error{
+ Code: ParseErr,
+ Message: msg.String(),
+ Location: loc,
+ Details: newParserErrorDetail(p.s.s.Bytes(), loc.Offset),
+ })
+ p.s.hints = nil
+}
+
+func (p *Parser) hint(f string, a ...any) {
+ p.s.hints = append(p.s.hints, fmt.Sprintf(f, a...))
+}
+
+func (p *Parser) illegal(note string, a ...any) {
+ tok := p.s.tok.String()
+
+ if p.s.tok == tokens.Illegal {
+ p.errorf(p.s.Loc(), "illegal token")
+ return
+ }
+
+ tokType := "token"
+ if tokens.IsKeyword(p.s.tok) {
+ tokType = "keyword"
+ } else if _, ok := allFutureKeywords[p.s.tok.String()]; ok {
+ tokType = "keyword"
+ }
+
+ note = fmt.Sprintf(note, a...)
+ if len(note) > 0 {
+ p.errorf(p.s.Loc(), "unexpected %s %s: %s", tok, tokType, note)
+ } else {
+ p.errorf(p.s.Loc(), "unexpected %s %s", tok, tokType)
+ }
+}
+
+func (p *Parser) illegalToken() {
+ p.illegal("")
+}
+
+func (p *Parser) scan() {
+ p.doScan(true)
+}
+
+func (p *Parser) scanWS() {
+ p.doScan(false)
+}
+
+func (p *Parser) doScan(skipws bool) {
+
+ // NOTE(tsandall): the last position is used to compute the "text" field for
+ // complex AST nodes. Whitespace never affects the last position of an AST
+ // node so do not update it when scanning.
+ if p.s.tok != tokens.Whitespace {
+ p.s.lastEnd = p.s.tokEnd
+ p.s.skippedNL = false
+ }
+
+ var errs []scanner.Error
+ for {
+ var pos scanner.Position
+ p.s.tok, pos, p.s.lit, errs = p.s.s.Scan()
+
+ p.s.tokEnd = pos.End
+ p.s.loc.Row = pos.Row
+ p.s.loc.Col = pos.Col
+ p.s.loc.Offset = pos.Offset
+ p.s.loc.Text = p.s.Text(pos.Offset, pos.End)
+ p.s.loc.Tabs = pos.Tabs
+
+ for _, err := range errs {
+ p.error(p.s.Loc(), err.Message)
+ }
+
+ if len(errs) > 0 {
+ p.s.tok = tokens.Illegal
+ }
+
+ if p.s.tok == tokens.Whitespace {
+ if p.s.lit == "\n" {
+ p.s.skippedNL = true
+ }
+ if skipws {
+ continue
+ }
+ }
+
+ if p.s.tok != tokens.Comment {
+ break
+ }
+
+ // For backwards compatibility leave a nil
+ // Text value if there is no text rather than
+ // an empty string.
+ var commentText []byte
+ if len(p.s.lit) > 1 {
+ commentText = []byte(p.s.lit[1:])
+ }
+ comment := NewComment(commentText)
+ comment.SetLoc(p.s.Loc())
+ p.s.comments = append(p.s.comments, comment)
+ }
+}
+
+func (p *Parser) save() *state {
+ cpy := *p.s
+ s := *cpy.s
+ cpy.s = &s
+ return &cpy
+}
+
+func (p *Parser) restore(s *state) {
+ p.s = s
+}
+
+func setLocRecursive(x any, loc *location.Location) {
+ NewGenericVisitor(func(x any) bool {
+ if node, ok := x.(Node); ok {
+ node.SetLoc(loc)
+ }
+ return false
+ }).Walk(x)
+}
+
+func (p *Parser) setLoc(term *Term, loc *location.Location, offset, end int) *Term {
+ if term != nil {
+ cpy := *loc
+ term.Location = &cpy
+ term.Location.Text = p.s.Text(offset, end)
+ }
+ return term
+}
+
+func (p *Parser) validateDefaultRuleValue(rule *Rule) bool {
+ if rule.Head.Value == nil {
+ p.error(rule.Loc(), "illegal default rule (must have a value)")
+ return false
+ }
+
+ valid := true
+ vis := NewGenericVisitor(func(x any) bool {
+ switch x.(type) {
+ case *ArrayComprehension, *ObjectComprehension, *SetComprehension: // skip closures
+ return true
+ case Ref, Var, Call:
+ p.error(rule.Loc(), fmt.Sprintf("illegal default rule (value cannot contain %v)", TypeName(x)))
+ valid = false
+ return true
+ }
+ return false
+ })
+
+ vis.Walk(rule.Head.Value.Value)
+ return valid
+}
+
+func (p *Parser) validateDefaultRuleArgs(rule *Rule) bool {
+
+ valid := true
+ vars := NewVarSet()
+
+ vis := NewGenericVisitor(func(x any) bool {
+ switch x := x.(type) {
+ case Var:
+ if vars.Contains(x) {
+ p.error(rule.Loc(), fmt.Sprintf("illegal default rule (arguments cannot be repeated %v)", x))
+ valid = false
+ return true
+ }
+ vars.Add(x)
+
+ case *Term:
+ switch v := x.Value.(type) {
+ case Var: // do nothing
+ default:
+ p.error(rule.Loc(), fmt.Sprintf("illegal default rule (arguments cannot contain %v)", ValueName(v)))
+ valid = false
+ return true
+ }
+ }
+
+ return false
+ })
+
+ vis.Walk(rule.Head.Args)
+ return valid
+}
+
+// We explicitly use yaml unmarshalling, to accommodate for the '_' in 'related_resources',
+// which isn't handled properly by json for some reason.
+type rawAnnotation struct {
+ Scope string `yaml:"scope"`
+ Title string `yaml:"title"`
+ Entrypoint bool `yaml:"entrypoint"`
+ Description string `yaml:"description"`
+ Organizations []string `yaml:"organizations"`
+ RelatedResources []any `yaml:"related_resources"`
+ Authors []any `yaml:"authors"`
+ Schemas []map[string]any `yaml:"schemas"`
+ Compile map[string]any `yaml:"compile"`
+ Custom map[string]any `yaml:"custom"`
+}
+
+type metadataParser struct {
+ buf *bytes.Buffer
+ comments []*Comment
+ loc *location.Location
+}
+
+func newMetadataParser(loc *Location) *metadataParser {
+ return &metadataParser{loc: loc, buf: bytes.NewBuffer(nil)}
+}
+
+func (b *metadataParser) Append(c *Comment) {
+ b.buf.Write(bytes.TrimPrefix(c.Text, []byte(" ")))
+ b.buf.WriteByte('\n')
+ b.comments = append(b.comments, c)
+}
+
+var yamlLineErrRegex = regexp.MustCompile(`^yaml:(?: unmarshal errors:[\n\s]*)? line ([[:digit:]]+):`)
+
+func (b *metadataParser) Parse() (*Annotations, error) {
+
+ var raw rawAnnotation
+
+ if len(bytes.TrimSpace(b.buf.Bytes())) == 0 {
+ return nil, errors.New("expected METADATA block, found whitespace")
+ }
+
+ if err := yaml.Unmarshal(b.buf.Bytes(), &raw); err != nil {
+ var comment *Comment
+ match := yamlLineErrRegex.FindStringSubmatch(err.Error())
+ if len(match) == 2 {
+ index, err2 := strconv.Atoi(match[1])
+ if err2 == nil {
+ if index >= len(b.comments) {
+ comment = b.comments[len(b.comments)-1]
+ } else {
+ comment = b.comments[index]
+ }
+ b.loc = comment.Location
+ }
+ }
+
+ if match == nil && len(b.comments) > 0 {
+ b.loc = b.comments[0].Location
+ }
+
+ return nil, augmentYamlError(err, b.comments)
+ }
+
+ var result Annotations
+ result.comments = b.comments
+ result.Scope = raw.Scope
+ result.Entrypoint = raw.Entrypoint
+ result.Title = raw.Title
+ result.Description = raw.Description
+ result.Organizations = raw.Organizations
+
+ for _, v := range raw.RelatedResources {
+ rr, err := parseRelatedResource(v)
+ if err != nil {
+ return nil, fmt.Errorf("invalid related-resource definition %s: %w", v, err)
+ }
+ result.RelatedResources = append(result.RelatedResources, rr)
+ }
+
+ if raw.Compile != nil {
+ result.Compile = &CompileAnnotation{}
+ if unknowns, ok := raw.Compile["unknowns"]; ok {
+ if unknowns, ok := unknowns.([]any); ok {
+ result.Compile.Unknowns = make([]Ref, len(unknowns))
+ for i := range unknowns {
+ if unknown, ok := unknowns[i].(string); ok {
+ ref, err := ParseRef(unknown)
+ if err != nil {
+ return nil, fmt.Errorf("invalid unknowns element %q: %w", unknown, err)
+ }
+ result.Compile.Unknowns[i] = ref
+ }
+ }
+ }
+ }
+ if mask, ok := raw.Compile["mask_rule"]; ok {
+ if mask, ok := mask.(string); ok {
+ maskTerm, err := ParseTerm(mask)
+ if err != nil {
+ return nil, fmt.Errorf("invalid mask_rule annotation %q: %w", mask, err)
+ }
+ switch v := maskTerm.Value.(type) {
+ case Var, String:
+ result.Compile.MaskRule = Ref{maskTerm}
+ case Ref:
+ result.Compile.MaskRule = v
+ default:
+ return nil, fmt.Errorf("invalid mask_rule annotation type %q: %[1]T", mask)
+ }
+ }
+ }
+ }
+
+ for _, pair := range raw.Schemas {
+ k, v := unwrapPair(pair)
+
+ var a SchemaAnnotation
+ var err error
+
+ a.Path, err = ParseRef(k)
+ if err != nil {
+ return nil, errors.New("invalid document reference")
+ }
+
+ switch v := v.(type) {
+ case string:
+ a.Schema, err = parseSchemaRef(v)
+ if err != nil {
+ return nil, err
+ }
+ case map[string]any:
+ w, err := convertYAMLMapKeyTypes(v, nil)
+ if err != nil {
+ return nil, fmt.Errorf("invalid schema definition: %w", err)
+ }
+ a.Definition = &w
+ default:
+ return nil, fmt.Errorf("invalid schema declaration for path %q", k)
+ }
+
+ result.Schemas = append(result.Schemas, &a)
+ }
+
+ for _, v := range raw.Authors {
+ author, err := parseAuthor(v)
+ if err != nil {
+ return nil, fmt.Errorf("invalid author definition %s: %w", v, err)
+ }
+ result.Authors = append(result.Authors, author)
+ }
+
+ result.Custom = make(map[string]any)
+ for k, v := range raw.Custom {
+ val, err := convertYAMLMapKeyTypes(v, nil)
+ if err != nil {
+ return nil, err
+ }
+ result.Custom[k] = val
+ }
+
+ result.Location = b.loc
+
+ // recreate original text of entire metadata block for location text attribute
+ sb := strings.Builder{}
+ sb.WriteString("# METADATA\n")
+
+ lines := bytes.Split(b.buf.Bytes(), []byte{'\n'})
+
+ for _, line := range lines[:len(lines)-1] {
+ sb.WriteString("# ")
+ sb.Write(line)
+ sb.WriteByte('\n')
+ }
+
+ result.Location.Text = []byte(strings.TrimSuffix(sb.String(), "\n"))
+
+ return &result, nil
+}
+
+// augmentYamlError augments a YAML error with hints intended to help the user figure out the cause of an otherwise
+// cryptic error. These are hints, instead of proper errors, because they are educated guesses, and aren't guaranteed
+// to be correct.
+func augmentYamlError(err error, comments []*Comment) error {
+ // Adding hints for when key/value ':' separator isn't suffixed with a legal YAML space symbol
+ for _, comment := range comments {
+ txt := string(comment.Text)
+ parts := strings.Split(txt, ":")
+ if len(parts) > 1 {
+ parts = parts[1:]
+ var invalidSpaces []string
+ for partIndex, part := range parts {
+ if len(part) == 0 && partIndex == len(parts)-1 {
+ invalidSpaces = []string{}
+ break
+ }
+
+ r, _ := utf8.DecodeRuneInString(part)
+ if r == ' ' || r == '\t' {
+ invalidSpaces = []string{}
+ break
+ }
+
+ invalidSpaces = append(invalidSpaces, fmt.Sprintf("%+q", r))
+ }
+ if len(invalidSpaces) > 0 {
+ err = fmt.Errorf(
+ "%s\n Hint: on line %d, symbol(s) %v immediately following a key/value separator ':' is not a legal yaml space character",
+ err.Error(), comment.Location.Row, invalidSpaces)
+ }
+ }
+ }
+ return err
+}
+
+func unwrapPair(pair map[string]any) (string, any) {
+ for k, v := range pair {
+ return k, v
+ }
+ return "", nil
+}
+
+var errInvalidSchemaRef = errors.New("invalid schema reference")
+
+// NOTE(tsandall): 'schema' is not registered as a root because it's not
+// supported by the compiler or evaluator today. Once we fix that, we can remove
+// this function.
+func parseSchemaRef(s string) (Ref, error) {
+
+ term, err := ParseTerm(s)
+ if err == nil {
+ switch v := term.Value.(type) {
+ case Var:
+ if term.Equal(SchemaRootDocument) {
+ return SchemaRootRef.Copy(), nil
+ }
+ case Ref:
+ if v.HasPrefix(SchemaRootRef) {
+ return v, nil
+ }
+ }
+ }
+
+ return nil, errInvalidSchemaRef
+}
+
+func parseRelatedResource(rr any) (*RelatedResourceAnnotation, error) {
+ rr, err := convertYAMLMapKeyTypes(rr, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ switch rr := rr.(type) {
+ case string:
+ if len(rr) > 0 {
+ u, err := url.Parse(rr)
+ if err != nil {
+ return nil, err
+ }
+ return &RelatedResourceAnnotation{Ref: *u}, nil
+ }
+ return nil, errors.New("ref URL may not be empty string")
+ case map[string]any:
+ description := strings.TrimSpace(getSafeString(rr, "description"))
+ ref := strings.TrimSpace(getSafeString(rr, "ref"))
+ if len(ref) > 0 {
+ u, err := url.Parse(ref)
+ if err != nil {
+ return nil, err
+ }
+ return &RelatedResourceAnnotation{Description: description, Ref: *u}, nil
+ }
+ return nil, errors.New("'ref' value required in object")
+ }
+
+ return nil, errors.New("invalid value type, must be string or map")
+}
+
+func parseAuthor(a any) (*AuthorAnnotation, error) {
+ a, err := convertYAMLMapKeyTypes(a, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ switch a := a.(type) {
+ case string:
+ return parseAuthorString(a)
+ case map[string]any:
+ name := strings.TrimSpace(getSafeString(a, "name"))
+ email := strings.TrimSpace(getSafeString(a, "email"))
+ if len(name) > 0 || len(email) > 0 {
+ return &AuthorAnnotation{name, email}, nil
+ }
+ return nil, errors.New("'name' and/or 'email' values required in object")
+ }
+
+ return nil, errors.New("invalid value type, must be string or map")
+}
+
+func getSafeString(m map[string]any, k string) string {
+ if v, found := m[k]; found {
+ if s, ok := v.(string); ok {
+ return s
+ }
+ }
+ return ""
+}
+
+const emailPrefix = "<"
+const emailSuffix = ">"
+
+// parseAuthor parses a string into an AuthorAnnotation. If the last word of the input string is enclosed within <>,
+// it is extracted as the author's email. The email may not contain whitelines, as it then will be interpreted as
+// multiple words.
+func parseAuthorString(s string) (*AuthorAnnotation, error) {
+ parts := strings.Fields(s)
+
+ if len(parts) == 0 {
+ return nil, errors.New("author is an empty string")
+ }
+
+ namePartCount := len(parts)
+ trailing := parts[namePartCount-1]
+ var email string
+ if len(trailing) >= len(emailPrefix)+len(emailSuffix) && strings.HasPrefix(trailing, emailPrefix) &&
+ strings.HasSuffix(trailing, emailSuffix) {
+ email = trailing[len(emailPrefix):]
+ email = email[0 : len(email)-len(emailSuffix)]
+ namePartCount -= 1
+ }
+
+ name := strings.Join(parts[0:namePartCount], " ")
+
+ return &AuthorAnnotation{Name: name, Email: email}, nil
+}
+
+func convertYAMLMapKeyTypes(x any, path []string) (any, error) {
+ var err error
+ switch x := x.(type) {
+ case map[any]any:
+ result := make(map[string]any, len(x))
+ for k, v := range x {
+ str, ok := k.(string)
+ if !ok {
+ return nil, fmt.Errorf("invalid map key type(s): %v", strings.Join(path, "/"))
+ }
+ result[str], err = convertYAMLMapKeyTypes(v, append(path, str))
+ if err != nil {
+ return nil, err
+ }
+ }
+ return result, nil
+ case []any:
+ for i := range x {
+ x[i], err = convertYAMLMapKeyTypes(x[i], append(path, strconv.Itoa(i)))
+ if err != nil {
+ return nil, err
+ }
+ }
+ return x, nil
+ default:
+ return x, nil
+ }
+}
+
+// futureKeywords is the source of truth for future keywords that will
+// eventually become standard keywords inside of Rego.
+var futureKeywords = map[string]tokens.Token{}
+
+// futureKeywordsV0 is the source of truth for future keywords that were
+// not yet a standard part of Rego in v0, and required importing.
+var futureKeywordsV0 = map[string]tokens.Token{
+ "in": tokens.In,
+ "every": tokens.Every,
+ "contains": tokens.Contains,
+ "if": tokens.If,
+}
+
+var allFutureKeywords map[string]tokens.Token
+
+func IsFutureKeyword(s string) bool {
+ return IsFutureKeywordForRegoVersion(s, RegoV1)
+}
+
+func IsFutureKeywordForRegoVersion(s string, v RegoVersion) bool {
+ var yes bool
+
+ switch v {
+ case RegoV0, RegoV0CompatV1:
+ _, yes = futureKeywordsV0[s]
+ case RegoV1:
+ _, yes = futureKeywords[s]
+ }
+
+ return yes
+}
+
+// isFutureKeyword answers if keyword is from the "future" with the parser options set.
+func (p *Parser) isFutureKeyword(s string) bool {
+ return IsFutureKeywordForRegoVersion(s, p.po.RegoVersion)
+}
+
+func (p *Parser) futureImport(imp *Import, allowedFutureKeywords map[string]tokens.Token) {
+ path := imp.Path.Value.(Ref)
+
+ if len(path) == 1 || !path[1].Equal(InternedTerm("keywords")) {
+ p.errorf(imp.Path.Location, "invalid import, must be `future.keywords`")
+ return
+ }
+
+ if imp.Alias != "" {
+ p.errorf(imp.Path.Location, "`future` imports cannot be aliased")
+ return
+ }
+
+ kwds := make([]string, 0, len(allowedFutureKeywords))
+ for k := range allowedFutureKeywords {
+ kwds = append(kwds, k)
+ }
+
+ switch len(path) {
+ case 2: // all keywords imported, nothing to do
+ case 3: // one keyword imported
+ kw, ok := path[2].Value.(String)
+ if !ok {
+ p.errorf(imp.Path.Location, "invalid import, must be `future.keywords.x`, e.g. `import future.keywords.in`")
+ return
+ }
+ keyword := string(kw)
+ _, ok = allowedFutureKeywords[keyword]
+ if !ok {
+ sort.Strings(kwds) // so the error message is stable
+ p.errorf(imp.Path.Location, "unexpected keyword, must be one of %v", kwds)
+ return
+ }
+
+ kwds = []string{keyword} // overwrite
+ }
+ for _, kw := range kwds {
+ p.s.s.AddKeyword(kw, allowedFutureKeywords[kw])
+ }
+}
+
+func (p *Parser) regoV1Import(imp *Import) {
+ if !p.po.Capabilities.ContainsFeature(FeatureRegoV1Import) && !p.po.Capabilities.ContainsFeature(FeatureRegoV1) {
+ p.errorf(imp.Path.Location, "invalid import, `%s` is not supported by current capabilities", RegoV1CompatibleRef)
+ return
+ }
+
+ path := imp.Path.Value.(Ref)
+
+ // v1 is only valid option
+ if len(path) == 1 || !path[1].Equal(RegoV1CompatibleRef[1]) || len(path) > 2 {
+ p.errorf(imp.Path.Location, "invalid import `%s`, must be `%s`", path, RegoV1CompatibleRef)
+ return
+ }
+
+ if p.po.EffectiveRegoVersion() == RegoV1 {
+ // We're parsing for Rego v1, where the 'rego.v1' import is a no-op.
+ return
+ }
+
+ if imp.Alias != "" {
+ p.errorf(imp.Path.Location, "`rego` imports cannot be aliased")
+ return
+ }
+
+ // import all future keywords with the rego.v1 import
+ kwds := make([]string, 0, len(futureKeywordsV0))
+ for k := range futureKeywordsV0 {
+ kwds = append(kwds, k)
+ }
+
+ p.s.s.SetRegoV1Compatible()
+ for _, kw := range kwds {
+ p.s.s.AddKeyword(kw, futureKeywordsV0[kw])
+ }
+}
+
+func init() {
+ allFutureKeywords = map[string]tokens.Token{}
+ maps.Copy(allFutureKeywords, futureKeywords)
+ maps.Copy(allFutureKeywords, futureKeywordsV0)
+}
+
+// enter increments the recursion depth counter and checks if it exceeds the maximum.
+// Returns false if the maximum is exceeded, true otherwise.
+// If p.maxRecursionDepth is 0 or negative, the check is effectively disabled.
+func (p *Parser) enter() bool {
+ p.recursionDepth++
+ if p.maxRecursionDepth > 0 && p.recursionDepth > p.maxRecursionDepth {
+ p.error(p.s.Loc(), ErrMaxParsingRecursionDepthExceeded.Error())
+ p.recursionDepth--
+ return false
+ }
+ return true
+}
+
+// leave decrements the recursion depth counter.
+func (p *Parser) leave() {
+ p.recursionDepth--
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go b/vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go
new file mode 100644
index 0000000000..f3d4e0d188
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go
@@ -0,0 +1,814 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// This file contains extra functions for parsing Rego.
+// Most of the parsing is handled by the code in parser.go,
+// however, there are additional utilities that are
+// helpful for dealing with Rego source inputs (e.g., REPL
+// statements, source files, etc.)
+
+package ast
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "slices"
+ "strings"
+ "unicode"
+
+ "github.com/open-policy-agent/opa/v1/ast/internal/tokens"
+)
+
+// MustParseBody returns a parsed body.
+// If an error occurs during parsing, panic.
+func MustParseBody(input string) Body {
+ return MustParseBodyWithOpts(input, ParserOptions{})
+}
+
+// MustParseBodyWithOpts returns a parsed body.
+// If an error occurs during parsing, panic.
+func MustParseBodyWithOpts(input string, opts ParserOptions) Body {
+ parsed, err := ParseBodyWithOpts(input, opts)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// MustParseExpr returns a parsed expression.
+// If an error occurs during parsing, panic.
+func MustParseExpr(input string) *Expr {
+ parsed, err := ParseExpr(input)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// MustParseImports returns a slice of imports.
+// If an error occurs during parsing, panic.
+func MustParseImports(input string) []*Import {
+ parsed, err := ParseImports(input)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// MustParseModule returns a parsed module.
+// If an error occurs during parsing, panic.
+func MustParseModule(input string) *Module {
+ return MustParseModuleWithOpts(input, ParserOptions{})
+}
+
+// MustParseModuleWithOpts returns a parsed module.
+// If an error occurs during parsing, panic.
+func MustParseModuleWithOpts(input string, opts ParserOptions) *Module {
+ parsed, err := ParseModuleWithOpts("", input, opts)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// MustParsePackage returns a Package.
+// If an error occurs during parsing, panic.
+func MustParsePackage(input string) *Package {
+ parsed, err := ParsePackage(input)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// MustParseStatements returns a slice of parsed statements.
+// If an error occurs during parsing, panic.
+func MustParseStatements(input string) []Statement {
+ parsed, _, err := ParseStatements("", input)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// MustParseStatement returns exactly one statement.
+// If an error occurs during parsing, panic.
+func MustParseStatement(input string) Statement {
+ parsed, err := ParseStatement(input)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+func MustParseStatementWithOpts(input string, popts ParserOptions) Statement {
+ parsed, err := ParseStatementWithOpts(input, popts)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// MustParseRef returns a parsed reference.
+// If an error occurs during parsing, panic.
+func MustParseRef(input string) Ref {
+ parsed, err := ParseRef(input)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// MustParseRule returns a parsed rule.
+// If an error occurs during parsing, panic.
+func MustParseRule(input string) *Rule {
+ parsed, err := ParseRule(input)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// MustParseRuleWithOpts returns a parsed rule.
+// If an error occurs during parsing, panic.
+func MustParseRuleWithOpts(input string, opts ParserOptions) *Rule {
+ parsed, err := ParseRuleWithOpts(input, opts)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// MustParseTerm returns a parsed term.
+// If an error occurs during parsing, panic.
+func MustParseTerm(input string) *Term {
+ parsed, err := ParseTerm(input)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// ParseRuleFromBody returns a rule if the body can be interpreted as a rule
+// definition. Otherwise, an error is returned.
+func ParseRuleFromBody(module *Module, body Body) (*Rule, error) {
+
+ if len(body) != 1 {
+ return nil, errors.New("multiple expressions cannot be used for rule head")
+ }
+
+ return ParseRuleFromExpr(module, body[0])
+}
+
+// ParseRuleFromExpr returns a rule if the expression can be interpreted as a
+// rule definition.
+func ParseRuleFromExpr(module *Module, expr *Expr) (*Rule, error) {
+
+ if len(expr.With) > 0 {
+ return nil, errors.New("expressions using with keyword cannot be used for rule head")
+ }
+
+ if expr.Negated {
+ return nil, errors.New("negated expressions cannot be used for rule head")
+ }
+
+ if _, ok := expr.Terms.(*SomeDecl); ok {
+ return nil, errors.New("'some' declarations cannot be used for rule head")
+ }
+
+ if term, ok := expr.Terms.(*Term); ok {
+ switch v := term.Value.(type) {
+ case Ref:
+ if len(v) > 2 { // 2+ dots
+ return ParseCompleteDocRuleWithDotsFromTerm(module, term)
+ }
+ return ParsePartialSetDocRuleFromTerm(module, term)
+ default:
+ return nil, fmt.Errorf("%v cannot be used for rule name", ValueName(v))
+ }
+ }
+
+ if _, ok := expr.Terms.([]*Term); !ok {
+ // This is a defensive check in case other kinds of expression terms are
+ // introduced in the future.
+ return nil, errors.New("expression cannot be used for rule head")
+ }
+
+ if expr.IsEquality() {
+ return parseCompleteRuleFromEq(module, expr)
+ } else if expr.IsAssignment() {
+ rule, err := parseCompleteRuleFromEq(module, expr)
+ if err != nil {
+ return nil, err
+ }
+ rule.Head.Assign = true
+ return rule, nil
+ }
+
+ if _, ok := BuiltinMap[expr.Operator().String()]; ok {
+ return nil, errors.New("rule name conflicts with built-in function")
+ }
+
+ return ParseRuleFromCallExpr(module, expr.Terms.([]*Term))
+}
+
+func parseCompleteRuleFromEq(module *Module, expr *Expr) (rule *Rule, err error) {
+
+ // ensure the rule location is set to the expr location
+ // the helper functions called below try to set the location based
+ // on the terms they've been provided but that is not as accurate.
+ defer func() {
+ if rule != nil {
+ rule.Location = expr.Location
+ rule.Head.Location = expr.Location
+ }
+ }()
+
+ lhs, rhs := expr.Operand(0), expr.Operand(1)
+ if lhs == nil || rhs == nil {
+ return nil, errors.New("assignment requires two operands")
+ }
+
+ rule, err = ParseRuleFromCallEqExpr(module, lhs, rhs)
+ if err == nil {
+ return rule, nil
+ }
+
+ rule, err = ParsePartialObjectDocRuleFromEqExpr(module, lhs, rhs)
+ if err == nil {
+ return rule, nil
+ }
+
+ return ParseCompleteDocRuleFromEqExpr(module, lhs, rhs)
+}
+
+// ParseCompleteDocRuleFromAssignmentExpr returns a rule if the expression can
+// be interpreted as a complete document definition declared with the assignment
+// operator.
+func ParseCompleteDocRuleFromAssignmentExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
+
+ rule, err := ParseCompleteDocRuleFromEqExpr(module, lhs, rhs)
+ if err != nil {
+ return nil, err
+ }
+
+ rule.Head.Assign = true
+
+ return rule, nil
+}
+
+// ParseCompleteDocRuleFromEqExpr returns a rule if the expression can be
+// interpreted as a complete document definition.
+func ParseCompleteDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
+ var head *Head
+
+ if v, ok := lhs.Value.(Var); ok {
+ // Modify the code to add the location to the head ref
+ head = VarHead(v, lhs.Location, nil)
+ } else if r, ok := lhs.Value.(Ref); ok { // groundness ?
+ if _, ok := r[0].Value.(Var); !ok {
+ return nil, fmt.Errorf("invalid rule head: %v", r)
+ }
+ head = RefHead(r)
+ if len(r) > 1 && !r[len(r)-1].IsGround() {
+ return nil, errors.New("ref not ground")
+ }
+ } else {
+ return nil, fmt.Errorf("%v cannot be used for rule name", ValueName(lhs.Value))
+ }
+ head.Value = rhs
+ head.Location = lhs.Location
+
+ body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location))
+
+ return &Rule{
+ Location: lhs.Location,
+ Head: head,
+ Body: body,
+ Module: module,
+ generatedBody: true,
+ }, nil
+}
+
+func ParseCompleteDocRuleWithDotsFromTerm(module *Module, term *Term) (*Rule, error) {
+ ref, ok := term.Value.(Ref)
+ if !ok {
+ return nil, fmt.Errorf("%v cannot be used for rule name", ValueName(term.Value))
+ }
+
+ if _, ok := ref[0].Value.(Var); !ok {
+ return nil, fmt.Errorf("invalid rule head: %v", ref)
+ }
+ head := RefHead(ref, BooleanTerm(true).SetLocation(term.Location))
+ head.generatedValue = true
+ head.Location = term.Location
+
+ body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location))
+
+ return &Rule{
+ Location: term.Location,
+ Head: head,
+ Body: body,
+ Module: module,
+ }, nil
+}
+
+// ParsePartialObjectDocRuleFromEqExpr returns a rule if the expression can be
+// interpreted as a partial object document definition.
+func ParsePartialObjectDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
+ ref, ok := lhs.Value.(Ref)
+ if !ok {
+ return nil, fmt.Errorf("%v cannot be used as rule name", ValueName(lhs.Value))
+ }
+
+ if _, ok := ref[0].Value.(Var); !ok {
+ return nil, fmt.Errorf("invalid rule head: %v", ref)
+ }
+
+ head := RefHead(ref, rhs)
+ if len(ref) == 2 { // backcompat for naked `foo.bar = "baz"` statements
+ head.Name = ref[0].Value.(Var)
+ head.Key = ref[1]
+ }
+ head.Location = rhs.Location
+
+ body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location))
+
+ rule := &Rule{
+ Location: rhs.Location,
+ Head: head,
+ Body: body,
+ Module: module,
+ }
+
+ return rule, nil
+}
+
+// ParsePartialSetDocRuleFromTerm returns a rule if the term can be interpreted
+// as a partial set document definition.
+func ParsePartialSetDocRuleFromTerm(module *Module, term *Term) (*Rule, error) {
+
+ ref, ok := term.Value.(Ref)
+ if !ok || len(ref) == 1 {
+ return nil, fmt.Errorf("%vs cannot be used for rule head", ValueName(term.Value))
+ }
+ if _, ok := ref[0].Value.(Var); !ok {
+ return nil, fmt.Errorf("invalid rule head: %v", ref)
+ }
+
+ head := RefHead(ref)
+ if len(ref) == 2 {
+ v, ok := ref[0].Value.(Var)
+ if !ok {
+ return nil, fmt.Errorf("%vs cannot be used for rule head", ValueName(term.Value))
+ }
+ // Modify the code to add the location to the head ref
+ head = VarHead(v, ref[0].Location, nil)
+ head.Key = ref[1]
+ }
+ head.Location = term.Location
+
+ body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location))
+
+ rule := &Rule{
+ Location: term.Location,
+ Head: head,
+ Body: body,
+ Module: module,
+ }
+
+ return rule, nil
+}
+
+// ParseRuleFromCallEqExpr returns a rule if the term can be interpreted as a
+// function definition (e.g., f(x) = y => f(x) = y { true }).
+func ParseRuleFromCallEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
+
+ call, ok := lhs.Value.(Call)
+ if !ok {
+ return nil, errors.New("must be call")
+ }
+
+ ref, ok := call[0].Value.(Ref)
+ if !ok {
+ return nil, fmt.Errorf("%vs cannot be used in function signature", ValueName(call[0].Value))
+ }
+ if _, ok := ref[0].Value.(Var); !ok {
+ return nil, fmt.Errorf("invalid rule head: %v", ref)
+ }
+
+ head := RefHead(ref, rhs)
+ head.Location = lhs.Location
+ head.Args = Args(call[1:])
+
+ body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location))
+
+ rule := &Rule{
+ Location: lhs.Location,
+ Head: head,
+ Body: body,
+ Module: module,
+ }
+
+ return rule, nil
+}
+
+// ParseRuleFromCallExpr returns a rule if the terms can be interpreted as a
+// function returning true or some value (e.g., f(x) => f(x) = true { true }).
+func ParseRuleFromCallExpr(module *Module, terms []*Term) (*Rule, error) {
+
+ if len(terms) <= 1 {
+ return nil, errors.New("rule argument list must take at least one argument")
+ }
+
+ loc := terms[0].Location
+ ref := terms[0].Value.(Ref)
+ if _, ok := ref[0].Value.(Var); !ok {
+ return nil, fmt.Errorf("invalid rule head: %v", ref)
+ }
+ head := RefHead(ref, BooleanTerm(true).SetLocation(loc))
+ head.Location = loc
+ head.Args = terms[1:]
+
+ body := NewBody(NewExpr(BooleanTerm(true).SetLocation(loc)).SetLocation(loc))
+
+ rule := &Rule{
+ Location: loc,
+ Head: head,
+ Module: module,
+ Body: body,
+ }
+ return rule, nil
+}
+
+// ParseImports returns a slice of Import objects.
+func ParseImports(input string) ([]*Import, error) {
+ stmts, _, err := ParseStatements("", input)
+ if err != nil {
+ return nil, err
+ }
+ result := []*Import{}
+ for _, stmt := range stmts {
+ if imp, ok := stmt.(*Import); ok {
+ result = append(result, imp)
+ } else {
+ return nil, fmt.Errorf("expected import but got %T", stmt)
+ }
+ }
+ return result, nil
+}
+
+// ParseModule returns a parsed Module object.
+// For details on Module objects and their fields, see policy.go.
+// Empty input will return nil, nil.
+func ParseModule(filename, input string) (*Module, error) {
+ return ParseModuleWithOpts(filename, input, ParserOptions{})
+}
+
+// ParseModuleWithOpts returns a parsed Module object, and has an additional input ParserOptions
+// For details on Module objects and their fields, see policy.go.
+// Empty input will return nil, nil.
+func ParseModuleWithOpts(filename, input string, popts ParserOptions) (*Module, error) {
+ stmts, comments, err := ParseStatementsWithOpts(filename, input, popts)
+ if err != nil {
+ return nil, err
+ }
+ return parseModule(filename, stmts, comments, popts.RegoVersion)
+}
+
+// ParseBody returns exactly one body.
+// If multiple bodies are parsed, an error is returned.
+func ParseBody(input string) (Body, error) {
+ return ParseBodyWithOpts(input, ParserOptions{SkipRules: true})
+}
+
+// ParseBodyWithOpts returns exactly one body. It does _not_ set SkipRules: true on its own,
+// but respects whatever ParserOptions it's been given.
+func ParseBodyWithOpts(input string, popts ParserOptions) (Body, error) {
+
+ stmts, _, err := ParseStatementsWithOpts("", input, popts)
+ if err != nil {
+ return nil, err
+ }
+
+ result := Body{}
+
+ for _, stmt := range stmts {
+ switch stmt := stmt.(type) {
+ case Body:
+ for i := range stmt {
+ result.Append(stmt[i])
+ }
+ case *Comment:
+ // skip
+ default:
+ return nil, fmt.Errorf("expected body but got %T", stmt)
+ }
+ }
+
+ return result, nil
+}
+
+// ParseExpr returns exactly one expression.
+// If multiple expressions are parsed, an error is returned.
+func ParseExpr(input string) (*Expr, error) {
+ body, err := ParseBody(input)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse expression: %w", err)
+ }
+ if len(body) != 1 {
+ return nil, fmt.Errorf("expected exactly one expression but got: %v", body)
+ }
+ return body[0], nil
+}
+
+// ParsePackage returns exactly one Package.
+// If multiple statements are parsed, an error is returned.
+func ParsePackage(input string) (*Package, error) {
+ stmt, err := ParseStatement(input)
+ if err != nil {
+ return nil, err
+ }
+ pkg, ok := stmt.(*Package)
+ if !ok {
+ return nil, fmt.Errorf("expected package but got %T", stmt)
+ }
+ return pkg, nil
+}
+
+// ParseTerm returns exactly one term.
+// If multiple terms are parsed, an error is returned.
+func ParseTerm(input string) (*Term, error) {
+ body, err := ParseBody(input)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse term: %w", err)
+ }
+ if len(body) != 1 {
+ return nil, fmt.Errorf("expected exactly one term but got: %v", body)
+ }
+ term, ok := body[0].Terms.(*Term)
+ if !ok {
+ return nil, fmt.Errorf("expected term but got %v", body[0].Terms)
+ }
+ return term, nil
+}
+
+// ParseRef returns exactly one reference.
+func ParseRef(input string) (Ref, error) {
+ term, err := ParseTerm(input)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse ref: %w", err)
+ }
+ ref, ok := term.Value.(Ref)
+ if !ok {
+ return nil, fmt.Errorf("expected ref but got %v", term)
+ }
+ return ref, nil
+}
+
+// ParseRuleWithOpts returns exactly one rule.
+// If multiple rules are parsed, an error is returned.
+func ParseRuleWithOpts(input string, opts ParserOptions) (*Rule, error) {
+ stmts, _, err := ParseStatementsWithOpts("", input, opts)
+ if err != nil {
+ return nil, err
+ }
+ if len(stmts) != 1 {
+ return nil, fmt.Errorf("expected exactly one statement (rule), got %v = %T, %T", stmts, stmts[0], stmts[1])
+ }
+ rule, ok := stmts[0].(*Rule)
+ if !ok {
+ return nil, fmt.Errorf("expected rule but got %T", stmts[0])
+ }
+ return rule, nil
+}
+
+// ParseRule returns exactly one rule.
+// If multiple rules are parsed, an error is returned.
+func ParseRule(input string) (*Rule, error) {
+ return ParseRuleWithOpts(input, ParserOptions{})
+}
+
+// ParseStatement returns exactly one statement.
+// A statement might be a term, expression, rule, etc. Regardless,
+// this function expects *exactly* one statement. If multiple
+// statements are parsed, an error is returned.
+func ParseStatement(input string) (Statement, error) {
+ stmts, _, err := ParseStatements("", input)
+ if err != nil {
+ return nil, err
+ }
+ if len(stmts) != 1 {
+ return nil, errors.New("expected exactly one statement")
+ }
+ return stmts[0], nil
+}
+
+func ParseStatementWithOpts(input string, popts ParserOptions) (Statement, error) {
+ stmts, _, err := ParseStatementsWithOpts("", input, popts)
+ if err != nil {
+ return nil, err
+ }
+ if len(stmts) != 1 {
+ return nil, errors.New("expected exactly one statement")
+ }
+ return stmts[0], nil
+}
+
+// ParseStatements is deprecated. Use ParseStatementWithOpts instead.
+func ParseStatements(filename, input string) ([]Statement, []*Comment, error) {
+ return ParseStatementsWithOpts(filename, input, ParserOptions{})
+}
+
+// ParseStatementsWithOpts returns a slice of parsed statements. This is the
+// default return value from the parser.
+func ParseStatementsWithOpts(filename, input string, popts ParserOptions) ([]Statement, []*Comment, error) {
+
+ parser := NewParser().
+ WithFilename(filename).
+ WithReader(bytes.NewBufferString(input)).
+ WithProcessAnnotation(popts.ProcessAnnotation).
+ WithFutureKeywords(popts.FutureKeywords...).
+ WithAllFutureKeywords(popts.AllFutureKeywords).
+ WithCapabilities(popts.Capabilities).
+ WithSkipRules(popts.SkipRules).
+ WithRegoVersion(popts.RegoVersion).
+ withUnreleasedKeywords(popts.unreleasedKeywords)
+
+ stmts, comments, errs := parser.Parse()
+
+ if len(errs) > 0 {
+ return nil, nil, errs
+ }
+
+ return stmts, comments, nil
+}
+
+func parseModule(filename string, stmts []Statement, comments []*Comment, regoCompatibilityMode RegoVersion) (*Module, error) {
+
+ if len(stmts) == 0 {
+ return nil, NewError(ParseErr, &Location{File: filename}, "empty module")
+ }
+
+ var errs Errors
+
+ pkg, ok := stmts[0].(*Package)
+ if !ok {
+ loc := stmts[0].Loc()
+ errs = append(errs, NewError(ParseErr, loc, "package expected"))
+ }
+
+ mod := &Module{
+ Package: pkg,
+ stmts: stmts,
+ }
+
+ // The comments slice only holds comments that were not their own statements.
+ mod.Comments = append(mod.Comments, comments...)
+
+ if regoCompatibilityMode == RegoUndefined {
+ mod.regoVersion = DefaultRegoVersion
+ } else {
+ mod.regoVersion = regoCompatibilityMode
+ }
+
+ for i, stmt := range stmts[1:] {
+ switch stmt := stmt.(type) {
+ case *Import:
+ mod.Imports = append(mod.Imports, stmt)
+ if mod.regoVersion == RegoV0 && Compare(stmt.Path.Value, RegoV1CompatibleRef) == 0 {
+ mod.regoVersion = RegoV0CompatV1
+ }
+ case *Rule:
+ setRuleModule(stmt, mod)
+ mod.Rules = append(mod.Rules, stmt)
+ case Body:
+ rule, err := ParseRuleFromBody(mod, stmt)
+ if err != nil {
+ errs = append(errs, NewError(ParseErr, stmt[0].Location, "%s", err.Error()))
+ continue
+ }
+ rule.generatedBody = true
+ mod.Rules = append(mod.Rules, rule)
+
+ // NOTE(tsandall): the statement should now be interpreted as a
+ // rule so update the statement list. This is important for the
+ // logic below that associates annotations with statements.
+ stmts[i+1] = rule
+ case *Package:
+ errs = append(errs, NewError(ParseErr, stmt.Loc(), "unexpected package"))
+ case *Annotations:
+ mod.Annotations = append(mod.Annotations, stmt)
+ case *Comment:
+ // Ignore comments, they're handled above.
+ default:
+ panic("illegal value") // Indicates grammar is out-of-sync with code.
+ }
+ }
+
+ if mod.regoVersion == RegoV0CompatV1 || mod.regoVersion == RegoV1 {
+ for _, rule := range mod.Rules {
+ for r := rule; r != nil; r = r.Else {
+ errs = append(errs, CheckRegoV1(r)...)
+ }
+ }
+ }
+
+ if len(errs) > 0 {
+ return nil, errs
+ }
+
+ errs = append(errs, attachAnnotationsNodes(mod)...)
+
+ if len(errs) > 0 {
+ return nil, errs
+ }
+
+ attachRuleAnnotations(mod)
+
+ return mod, nil
+}
+
+func ruleDeclarationHasKeyword(rule *Rule, keyword tokens.Token) bool {
+ return slices.Contains(rule.Head.keywords, keyword)
+}
+
+func newScopeAttachmentErr(a *Annotations, want string) *Error {
+ var have string
+ if a.node != nil {
+ have = fmt.Sprintf(" (have %v)", TypeName(a.node))
+ }
+ return NewError(ParseErr, a.Loc(), "annotation scope '%v' must be applied to %v%v", a.Scope, want, have)
+}
+
+func setRuleModule(rule *Rule, module *Module) {
+ rule.Module = module
+ if rule.Else != nil {
+ setRuleModule(rule.Else, module)
+ }
+}
+
+// ParserErrorDetail holds additional details for parser errors.
+type ParserErrorDetail struct {
+ Line string `json:"line"`
+ Idx int `json:"idx"`
+}
+
+func newParserErrorDetail(bs []byte, offset int) *ParserErrorDetail {
+
+ // Find first non-space character at or before offset position.
+ if offset >= len(bs) {
+ offset = len(bs) - 1
+ } else if offset < 0 {
+ offset = 0
+ }
+
+ for offset > 0 && unicode.IsSpace(rune(bs[offset])) {
+ offset--
+ }
+
+ // Find beginning of line containing offset.
+ begin := offset
+
+ for begin > 0 && !isNewLineChar(bs[begin]) {
+ begin--
+ }
+
+ if isNewLineChar(bs[begin]) {
+ begin++
+ }
+
+ // Find end of line containing offset.
+ end := offset
+
+ for end < len(bs) && !isNewLineChar(bs[end]) {
+ end++
+ }
+
+ if begin > end {
+ begin = end
+ }
+
+ // Extract line and compute index of offset byte in line.
+ line := bs[begin:end]
+ index := offset - begin
+
+ return &ParserErrorDetail{
+ Line: string(line),
+ Idx: index,
+ }
+}
+
+// Lines returns the pretty formatted line output for the error details.
+func (d ParserErrorDetail) Lines() []string {
+ line := strings.TrimLeft(d.Line, "\t") // remove leading tabs
+ tabCount := len(d.Line) - len(line)
+ indent := max(d.Idx-tabCount, 0)
+ return []string{line, strings.Repeat(" ", indent) + "^"}
+}
+
+func isNewLineChar(b byte) bool {
+ return b == '\r' || b == '\n'
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/performance.go b/vendor/github.com/open-policy-agent/opa/v1/ast/performance.go
new file mode 100644
index 0000000000..3e285f963d
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/performance.go
@@ -0,0 +1,85 @@
+// Copyright 2025 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+package ast
+
+import (
+ "strings"
+ "sync"
+)
+
+var builtinNamesByNumParts = sync.OnceValue(func() map[int][]string {
+ m := map[int][]string{}
+ for name := range BuiltinMap {
+ parts := strings.Count(name, ".") + 1
+ if parts > 1 {
+ m[parts] = append(m[parts], name)
+ }
+ }
+ return m
+})
+
+// BuiltinNameFromRef attempts to extract a known built-in function name from a ref,
+// in the most efficient way possible. I.e. without allocating memory for a new string.
+// If no built-in function name can be extracted, the second return value is false.
+func BuiltinNameFromRef(ref Ref) (string, bool) {
+ reflen := len(ref)
+ if reflen == 0 {
+ return "", false
+ }
+
+ _var, ok := ref[0].Value.(Var)
+ if !ok {
+ return "", false
+ }
+
+ varName := string(_var)
+ if reflen == 1 {
+ if _, ok := BuiltinMap[varName]; ok {
+ return varName, true
+ }
+ return "", false
+ }
+
+ totalLen := len(varName)
+ for _, term := range ref[1:] {
+ if _, ok = term.Value.(String); !ok {
+ return "", false
+ }
+ totalLen += 1 + len(term.Value.(String)) // account for dot
+ }
+
+ matched, ok := builtinNamesByNumParts()[reflen]
+ if !ok {
+ return "", false
+ }
+
+ for _, name := range matched {
+ // This check saves us a huge amount of work, as only very few built-in
+ // names will have the exact same length as the ref we are checking.
+ if len(name) != totalLen {
+ continue
+ }
+ // Example: `name` is "io.jwt.decode" (and so is ref)
+ // The first part is varName, which have already been established to be 'io':
+ // io, jwt.decode io == io
+ if curr, remaining, _ := strings.Cut(name, "."); curr == varName {
+ // Loop over the remaining (now known to be string) terms in the ref, e.g. "jwt" and "decode"
+ for _, term := range ref[1:] {
+ ts := string(term.Value.(String))
+ // First iteration: jwt.decode != jwt, so we continue cutting
+ // Second iteration: remaining is "decode", and so is term
+ if remaining == ts {
+ return name, true
+ }
+ // Cutting remaining (e.g. jwt.decode), and we now get:
+ // jwt, decode, false || jwt != jwt
+ if curr, remaining, _ = strings.Cut(remaining, "."); remaining == "" || curr != ts {
+ break
+ }
+ }
+ }
+ }
+
+ return "", false
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/policy.go b/vendor/github.com/open-policy-agent/opa/v1/ast/policy.go
new file mode 100644
index 0000000000..62c82f51ec
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/policy.go
@@ -0,0 +1,2005 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "slices"
+ "strings"
+
+ "github.com/open-policy-agent/opa/v1/ast/internal/tokens"
+ astJSON "github.com/open-policy-agent/opa/v1/ast/json"
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+// DefaultRootDocument is the default root document.
+//
+// All package directives inside source files are implicitly prefixed with the
+// DefaultRootDocument value.
+var DefaultRootDocument = VarTerm("data")
+
+// InputRootDocument names the document containing query arguments.
+var InputRootDocument = VarTerm("input")
+
+// SchemaRootDocument names the document containing external data schemas.
+var SchemaRootDocument = VarTerm("schema")
+
+// FunctionArgRootDocument names the document containing function arguments.
+// It's only for internal usage, for referencing function arguments between
+// the index and topdown.
+var FunctionArgRootDocument = VarTerm("args")
+
+// FutureRootDocument names the document containing new, to-become-default,
+// features.
+var FutureRootDocument = VarTerm("future")
+
+// RegoRootDocument names the document containing new, to-become-default,
+// features in a future versioned release.
+var RegoRootDocument = VarTerm("rego")
+
+// RootDocumentNames contains the names of top-level documents that can be
+// referred to in modules and queries.
+//
+// Note, the schema document is not currently implemented in the evaluator so it
+// is not registered as a root document name (yet).
+var RootDocumentNames = NewSet(
+ DefaultRootDocument,
+ InputRootDocument,
+)
+
+// DefaultRootRef is a reference to the root of the default document.
+//
+// All refs to data in the policy engine's storage layer are prefixed with this ref.
+var DefaultRootRef = Ref{DefaultRootDocument}
+
+// InputRootRef is a reference to the root of the input document.
+//
+// All refs to query arguments are prefixed with this ref.
+var InputRootRef = Ref{InputRootDocument}
+
+// SchemaRootRef is a reference to the root of the schema document.
+//
+// All refs to schema documents are prefixed with this ref. Note, the schema
+// document is not currently implemented in the evaluator so it is not
+// registered as a root document ref (yet).
+var SchemaRootRef = Ref{SchemaRootDocument}
+
+// RootDocumentRefs contains the prefixes of top-level documents that all
+// non-local references start with.
+var RootDocumentRefs = NewSet(
+ NewTerm(DefaultRootRef),
+ NewTerm(InputRootRef),
+)
+
+// SystemDocumentKey is the name of the top-level key that identifies the system
+// document.
+const SystemDocumentKey = String("system")
+
+// ReservedVars is the set of names that refer to implicitly ground vars.
+var ReservedVars = NewVarSet(
+ DefaultRootDocument.Value.(Var),
+ InputRootDocument.Value.(Var),
+)
+
+// Wildcard represents the wildcard variable as defined in the language.
+var Wildcard = &Term{Value: Var("_")}
+
+// WildcardPrefix is the special character that all wildcard variables are
+// prefixed with when the statement they are contained in is parsed.
+const WildcardPrefix = "$"
+
+// Keywords contains strings that map to language keywords.
+var Keywords = KeywordsForRegoVersion(DefaultRegoVersion)
+
+var KeywordsV0 = [...]string{
+ "not",
+ "package",
+ "import",
+ "as",
+ "default",
+ "else",
+ "with",
+ "null",
+ "true",
+ "false",
+ "some",
+}
+
+var KeywordsV1 = [...]string{
+ "not",
+ "package",
+ "import",
+ "as",
+ "default",
+ "else",
+ "with",
+ "null",
+ "true",
+ "false",
+ "some",
+ "if",
+ "contains",
+ "in",
+ "every",
+}
+
+func KeywordsForRegoVersion(v RegoVersion) []string {
+ switch v {
+ case RegoV0:
+ return KeywordsV0[:]
+ case RegoV1, RegoV0CompatV1:
+ return KeywordsV1[:]
+ }
+ return nil
+}
+
+// IsKeyword returns true if s is a language keyword.
+func IsKeyword(s string) bool {
+ return IsInKeywords(s, Keywords)
+}
+
+func IsInKeywords(s string, keywords []string) bool {
+ return slices.Contains(keywords, s)
+}
+
+// IsKeywordInRegoVersion returns true if s is a language keyword.
+func IsKeywordInRegoVersion(s string, regoVersion RegoVersion) bool {
+ switch regoVersion {
+ case RegoV0:
+ for _, x := range KeywordsV0 {
+ if x == s {
+ return true
+ }
+ }
+ case RegoV1, RegoV0CompatV1:
+ for _, x := range KeywordsV1 {
+ if x == s {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+type (
+ // Node represents a node in an AST. Nodes may be statements in a policy module
+ // or elements of an ad-hoc query, expression, etc.
+ Node interface {
+ fmt.Stringer
+ Loc() *Location
+ SetLoc(*Location)
+ }
+
+ // Statement represents a single statement in a policy module.
+ Statement interface {
+ Node
+ }
+)
+
+type (
+
+ // Module represents a collection of policies (defined by rules)
+ // within a namespace (defined by the package) and optional
+ // dependencies on external documents (defined by imports).
+ Module struct {
+ Package *Package `json:"package"`
+ Imports []*Import `json:"imports,omitempty"`
+ Annotations []*Annotations `json:"annotations,omitempty"`
+ Rules []*Rule `json:"rules,omitempty"`
+ Comments []*Comment `json:"comments,omitempty"`
+ stmts []Statement
+ regoVersion RegoVersion
+ }
+
+ // Comment contains the raw text from the comment in the definition.
+ Comment struct {
+ // TODO: these fields have inconsistent JSON keys with other structs in this package.
+ Text []byte
+ Location *Location
+ }
+
+ // Package represents the namespace of the documents produced
+ // by rules inside the module.
+ Package struct {
+ Path Ref `json:"path"`
+ Location *Location `json:"location,omitempty"`
+ }
+
+ // Import represents a dependency on a document outside of the policy
+ // namespace. Imports are optional.
+ Import struct {
+ Path *Term `json:"path"`
+ Alias Var `json:"alias,omitempty"`
+ Location *Location `json:"location,omitempty"`
+ }
+
+ // Rule represents a rule as defined in the language. Rules define the
+ // content of documents that represent policy decisions.
+ Rule struct {
+ Default bool `json:"default,omitempty"`
+ Head *Head `json:"head"`
+ Body Body `json:"body"`
+ Else *Rule `json:"else,omitempty"`
+ Location *Location `json:"location,omitempty"`
+ Annotations []*Annotations `json:"annotations,omitempty"`
+
+ // Module is a pointer to the module containing this rule. If the rule
+ // was NOT created while parsing/constructing a module, this should be
+ // left unset. The pointer is not included in any standard operations
+ // on the rule (e.g., printing, comparison, visiting, etc.)
+ Module *Module `json:"-"`
+
+ generatedBody bool
+ }
+
+ // Head represents the head of a rule.
+ Head struct {
+ Name Var `json:"name,omitempty"`
+ Reference Ref `json:"ref,omitempty"`
+ Args Args `json:"args,omitempty"`
+ Key *Term `json:"key,omitempty"`
+ Value *Term `json:"value,omitempty"`
+ Assign bool `json:"assign,omitempty"`
+ Location *Location `json:"location,omitempty"`
+
+ keywords []tokens.Token
+ generatedValue bool
+ }
+
+ // Args represents zero or more arguments to a rule.
+ Args []*Term
+
+ // Body represents one or more expressions contained inside a rule or user
+ // function.
+ Body []*Expr
+
+ // Expr represents a single expression contained inside the body of a rule.
+ Expr struct {
+ With []*With `json:"with,omitempty"`
+ Terms any `json:"terms"`
+ Index int `json:"index"`
+ Generated bool `json:"generated,omitempty"`
+ Negated bool `json:"negated,omitempty"`
+ Location *Location `json:"location,omitempty"`
+
+ generatedFrom *Expr
+ generates []*Expr
+ }
+
+ // SomeDecl represents a variable declaration statement. The symbols are variables.
+ SomeDecl struct {
+ Symbols []*Term `json:"symbols"`
+ Location *Location `json:"location,omitempty"`
+ }
+
+ Every struct {
+ Key *Term `json:"key"`
+ Value *Term `json:"value"`
+ Domain *Term `json:"domain"`
+ Body Body `json:"body"`
+ Location *Location `json:"location,omitempty"`
+ }
+
+ // With represents a modifier on an expression.
+ With struct {
+ Target *Term `json:"target"`
+ Value *Term `json:"value"`
+ Location *Location `json:"location,omitempty"`
+ }
+)
+
+// SetModuleRegoVersion sets the RegoVersion for the Module.
+func SetModuleRegoVersion(mod *Module, v RegoVersion) {
+ mod.regoVersion = v
+}
+
+// Compare returns an integer indicating whether mod is less than, equal to,
+// or greater than other.
+func (mod *Module) Compare(other *Module) int {
+ if mod == nil {
+ if other == nil {
+ return 0
+ }
+ return -1
+ } else if other == nil {
+ return 1
+ }
+ if cmp := mod.Package.Compare(other.Package); cmp != 0 {
+ return cmp
+ }
+ if cmp := importsCompare(mod.Imports, other.Imports); cmp != 0 {
+ return cmp
+ }
+ if cmp := annotationsCompare(mod.Annotations, other.Annotations); cmp != 0 {
+ return cmp
+ }
+ return rulesCompare(mod.Rules, other.Rules)
+}
+
+// Copy returns a deep copy of mod.
+func (mod *Module) Copy() *Module {
+ cpy := *mod
+ cpy.Rules = make([]*Rule, len(mod.Rules))
+
+ nodes := make(map[Node]Node, len(mod.Rules)+len(mod.Imports)+1 /* package */)
+
+ for i := range mod.Rules {
+ cpy.Rules[i] = mod.Rules[i].Copy()
+ cpy.Rules[i].Module = &cpy
+ nodes[mod.Rules[i]] = cpy.Rules[i]
+ }
+
+ cpy.Imports = make([]*Import, len(mod.Imports))
+ for i := range mod.Imports {
+ cpy.Imports[i] = mod.Imports[i].Copy()
+ nodes[mod.Imports[i]] = cpy.Imports[i]
+ }
+
+ cpy.Package = mod.Package.Copy()
+ nodes[mod.Package] = cpy.Package
+
+ cpy.Annotations = make([]*Annotations, len(mod.Annotations))
+ for i, a := range mod.Annotations {
+ cpy.Annotations[i] = a.Copy(nodes[a.node])
+ }
+
+ cpy.Comments = make([]*Comment, len(mod.Comments))
+ for i := range mod.Comments {
+ cpy.Comments[i] = mod.Comments[i].Copy()
+ }
+
+ cpy.stmts = make([]Statement, len(mod.stmts))
+ for i := range mod.stmts {
+ cpy.stmts[i] = nodes[mod.stmts[i]]
+ }
+
+ return &cpy
+}
+
+// Equal returns true if mod equals other.
+func (mod *Module) Equal(other *Module) bool {
+ return mod.Compare(other) == 0
+}
+
+func (mod *Module) String() string {
+ byNode := map[Node][]*Annotations{}
+ for _, a := range mod.Annotations {
+ byNode[a.node] = append(byNode[a.node], a)
+ }
+
+ appendAnnotationStrings := func(buf []string, node Node) []string {
+ if as, ok := byNode[node]; ok {
+ for i := range as {
+ buf = append(buf, "# METADATA")
+ buf = append(buf, "# "+as[i].String())
+ }
+ }
+ return buf
+ }
+
+ buf := []string{}
+ buf = appendAnnotationStrings(buf, mod.Package)
+ buf = append(buf, mod.Package.String())
+
+ if len(mod.Imports) > 0 {
+ buf = append(buf, "")
+ for _, imp := range mod.Imports {
+ buf = appendAnnotationStrings(buf, imp)
+ buf = append(buf, imp.String())
+ }
+ }
+ if len(mod.Rules) > 0 {
+ buf = append(buf, "")
+ for _, rule := range mod.Rules {
+ buf = appendAnnotationStrings(buf, rule)
+ buf = append(buf, rule.stringWithOpts(toStringOpts{regoVersion: mod.regoVersion}))
+ }
+ }
+ return strings.Join(buf, "\n")
+}
+
+// RuleSet returns a RuleSet containing named rules in the mod.
+func (mod *Module) RuleSet(name Var) RuleSet {
+ rs := NewRuleSet()
+ for _, rule := range mod.Rules {
+ if rule.Head.Name.Equal(name) {
+ rs.Add(rule)
+ }
+ }
+ return rs
+}
+
+// UnmarshalJSON parses bs and stores the result in mod. The rules in the module
+// will have their module pointer set to mod.
+func (mod *Module) UnmarshalJSON(bs []byte) error {
+
+ // Declare a new type and use a type conversion to avoid recursively calling
+ // Module#UnmarshalJSON.
+ type module Module
+
+ if err := util.UnmarshalJSON(bs, (*module)(mod)); err != nil {
+ return err
+ }
+
+ WalkRules(mod, func(rule *Rule) bool {
+ rule.Module = mod
+ return false
+ })
+
+ return nil
+}
+
+func (mod *Module) regoV1Compatible() bool {
+ return mod.regoVersion == RegoV1 || mod.regoVersion == RegoV0CompatV1
+}
+
+func (mod *Module) RegoVersion() RegoVersion {
+ return mod.regoVersion
+}
+
+// SetRegoVersion sets the RegoVersion for the module.
+// Note: Setting a rego-version that does not match the module's rego-version might have unintended consequences.
+func (mod *Module) SetRegoVersion(v RegoVersion) {
+ mod.regoVersion = v
+}
+
+// NewComment returns a new Comment object.
+func NewComment(text []byte) *Comment {
+ return &Comment{
+ Text: text,
+ }
+}
+
+// Loc returns the location of the comment in the definition.
+func (c *Comment) Loc() *Location {
+ if c == nil {
+ return nil
+ }
+ return c.Location
+}
+
+// SetLoc sets the location on c.
+func (c *Comment) SetLoc(loc *Location) {
+ c.Location = loc
+}
+
+func (c *Comment) String() string {
+ return "#" + string(c.Text)
+}
+
+// Copy returns a deep copy of c.
+func (c *Comment) Copy() *Comment {
+ cpy := *c
+ cpy.Text = make([]byte, len(c.Text))
+ copy(cpy.Text, c.Text)
+ return &cpy
+}
+
+// Equal returns true if this comment equals the other comment.
+// Unlike other equality checks on AST nodes, comment equality
+// depends on location.
+func (c *Comment) Equal(other *Comment) bool {
+ return c.Location.Equal(other.Location) && bytes.Equal(c.Text, other.Text)
+}
+
+// Compare returns an integer indicating whether pkg is less than, equal to,
+// or greater than other.
+func (pkg *Package) Compare(other *Package) int {
+ return termSliceCompare(pkg.Path, other.Path)
+}
+
+// Copy returns a deep copy of pkg.
+func (pkg *Package) Copy() *Package {
+ cpy := *pkg
+ cpy.Path = pkg.Path.Copy()
+ return &cpy
+}
+
+// Equal returns true if pkg is equal to other.
+func (pkg *Package) Equal(other *Package) bool {
+ return pkg.Compare(other) == 0
+}
+
+// Loc returns the location of the Package in the definition.
+func (pkg *Package) Loc() *Location {
+ if pkg == nil {
+ return nil
+ }
+ return pkg.Location
+}
+
+// SetLoc sets the location on pkg.
+func (pkg *Package) SetLoc(loc *Location) {
+ pkg.Location = loc
+}
+
+func (pkg *Package) String() string {
+ if pkg == nil {
+ return ""
+ } else if len(pkg.Path) <= 1 {
+ return fmt.Sprintf("package ", pkg.Path)
+ }
+ // Omit head as all packages have the DefaultRootDocument prepended at parse time.
+ path := make(Ref, len(pkg.Path)-1)
+ path[0] = VarTerm(string(pkg.Path[1].Value.(String)))
+ copy(path[1:], pkg.Path[2:])
+ return fmt.Sprintf("package %v", path)
+}
+
+func (pkg *Package) MarshalJSON() ([]byte, error) {
+ data := map[string]any{
+ "path": pkg.Path,
+ }
+
+ if astJSON.GetOptions().MarshalOptions.IncludeLocation.Package {
+ if pkg.Location != nil {
+ data["location"] = pkg.Location
+ }
+ }
+
+ return json.Marshal(data)
+}
+
+// IsValidImportPath returns an error indicating if the import path is invalid.
+// If the import path is valid, err is nil.
+func IsValidImportPath(v Value) (err error) {
+ switch v := v.(type) {
+ case Var:
+ if !v.Equal(DefaultRootDocument.Value) && !v.Equal(InputRootDocument.Value) {
+ return fmt.Errorf("invalid path %v: path must begin with input or data", v)
+ }
+ case Ref:
+ if err := IsValidImportPath(v[0].Value); err != nil {
+ return fmt.Errorf("invalid path %v: path must begin with input or data", v)
+ }
+ for _, e := range v[1:] {
+ if _, ok := e.Value.(String); !ok {
+ return fmt.Errorf("invalid path %v: path elements must be strings", v)
+ }
+ }
+ default:
+ return fmt.Errorf("invalid path %v: path must be ref or var", v)
+ }
+ return nil
+}
+
+// Compare returns an integer indicating whether imp is less than, equal to,
+// or greater than other.
+func (imp *Import) Compare(other *Import) int {
+ if imp == nil {
+ if other == nil {
+ return 0
+ }
+ return -1
+ } else if other == nil {
+ return 1
+ }
+ if cmp := Compare(imp.Path, other.Path); cmp != 0 {
+ return cmp
+ }
+
+ return VarCompare(imp.Alias, other.Alias)
+}
+
+// Copy returns a deep copy of imp.
+func (imp *Import) Copy() *Import {
+ cpy := *imp
+ cpy.Path = imp.Path.Copy()
+ return &cpy
+}
+
+// Equal returns true if imp is equal to other.
+func (imp *Import) Equal(other *Import) bool {
+ return imp.Compare(other) == 0
+}
+
+// Loc returns the location of the Import in the definition.
+func (imp *Import) Loc() *Location {
+ if imp == nil {
+ return nil
+ }
+ return imp.Location
+}
+
+// SetLoc sets the location on imp.
+func (imp *Import) SetLoc(loc *Location) {
+ imp.Location = loc
+}
+
+// Name returns the variable that is used to refer to the imported virtual
+// document. This is the alias if defined otherwise the last element in the
+// path.
+func (imp *Import) Name() Var {
+ if len(imp.Alias) != 0 {
+ return imp.Alias
+ }
+ switch v := imp.Path.Value.(type) {
+ case Var:
+ return v
+ case Ref:
+ if len(v) == 1 {
+ return v[0].Value.(Var)
+ }
+ return Var(v[len(v)-1].Value.(String))
+ }
+ panic("illegal import")
+}
+
+func (imp *Import) String() string {
+ buf := []string{"import", imp.Path.String()}
+ if len(imp.Alias) > 0 {
+ buf = append(buf, "as", imp.Alias.String())
+ }
+ return strings.Join(buf, " ")
+}
+
+func (imp *Import) MarshalJSON() ([]byte, error) {
+ data := map[string]any{
+ "path": imp.Path,
+ }
+
+ if len(imp.Alias) != 0 {
+ data["alias"] = imp.Alias
+ }
+
+ if astJSON.GetOptions().MarshalOptions.IncludeLocation.Import {
+ if imp.Location != nil {
+ data["location"] = imp.Location
+ }
+ }
+
+ return json.Marshal(data)
+}
+
+// Compare returns an integer indicating whether rule is less than, equal to,
+// or greater than other.
+func (rule *Rule) Compare(other *Rule) int {
+ if rule == nil {
+ if other == nil {
+ return 0
+ }
+ return -1
+ } else if other == nil {
+ return 1
+ }
+ if cmp := rule.Head.Compare(other.Head); cmp != 0 {
+ return cmp
+ }
+ if rule.Default != other.Default {
+ if !rule.Default {
+ return -1
+ }
+ return 1
+ }
+ if cmp := rule.Body.Compare(other.Body); cmp != 0 {
+ return cmp
+ }
+
+ if cmp := annotationsCompare(rule.Annotations, other.Annotations); cmp != 0 {
+ return cmp
+ }
+
+ return rule.Else.Compare(other.Else)
+}
+
+// Copy returns a deep copy of rule.
+func (rule *Rule) Copy() *Rule {
+ cpy := *rule
+ cpy.Head = rule.Head.Copy()
+ cpy.Body = rule.Body.Copy()
+
+ if len(cpy.Annotations) > 0 {
+ cpy.Annotations = make([]*Annotations, len(rule.Annotations))
+ for i, a := range rule.Annotations {
+ cpy.Annotations[i] = a.Copy(&cpy)
+ }
+ }
+
+ if cpy.Else != nil {
+ cpy.Else = rule.Else.Copy()
+ }
+ return &cpy
+}
+
+// Equal returns true if rule is equal to other.
+func (rule *Rule) Equal(other *Rule) bool {
+ return rule.Compare(other) == 0
+}
+
+// Loc returns the location of the Rule in the definition.
+func (rule *Rule) Loc() *Location {
+ if rule == nil {
+ return nil
+ }
+ return rule.Location
+}
+
+// SetLoc sets the location on rule.
+func (rule *Rule) SetLoc(loc *Location) {
+ rule.Location = loc
+}
+
+// Path returns a ref referring to the document produced by this rule. If rule
+// is not contained in a module, this function panics.
+// Deprecated: Poor handling of ref rules. Use `(*Rule).Ref()` instead.
+func (rule *Rule) Path() Ref {
+ if rule.Module == nil {
+ panic("assertion failed")
+ }
+ return rule.Module.Package.Path.Extend(rule.Head.Ref().GroundPrefix())
+}
+
+// Ref returns a ref referring to the document produced by this rule. If rule
+// is not contained in a module, this function panics. The returned ref may
+// contain variables in the last position.
+func (rule *Rule) Ref() Ref {
+ if rule.Module == nil {
+ panic("assertion failed")
+ }
+ return rule.Module.Package.Path.Extend(rule.Head.Ref())
+}
+
+func (rule *Rule) String() string {
+ regoVersion := DefaultRegoVersion
+ if rule.Module != nil {
+ regoVersion = rule.Module.RegoVersion()
+ }
+ return rule.stringWithOpts(toStringOpts{regoVersion: regoVersion})
+}
+
+type toStringOpts struct {
+ regoVersion RegoVersion
+}
+
+func (o toStringOpts) RegoVersion() RegoVersion {
+ if o.regoVersion == RegoUndefined {
+ return DefaultRegoVersion
+ }
+ return o.regoVersion
+}
+
+func (rule *Rule) stringWithOpts(opts toStringOpts) string {
+ buf := []string{}
+ if rule.Default {
+ buf = append(buf, "default")
+ }
+ buf = append(buf, rule.Head.stringWithOpts(opts))
+ if !rule.Default {
+ switch opts.RegoVersion() {
+ case RegoV1, RegoV0CompatV1:
+ buf = append(buf, "if")
+ }
+ buf = append(buf, "{", rule.Body.String(), "}")
+ }
+ if rule.Else != nil {
+ buf = append(buf, rule.Else.elseString(opts))
+ }
+ return strings.Join(buf, " ")
+}
+
+func (rule *Rule) isFunction() bool {
+ return len(rule.Head.Args) > 0
+}
+
+func (rule *Rule) MarshalJSON() ([]byte, error) {
+ data := map[string]any{
+ "head": rule.Head,
+ "body": rule.Body,
+ }
+
+ if rule.Default {
+ data["default"] = true
+ }
+
+ if rule.Else != nil {
+ data["else"] = rule.Else
+ }
+
+ if astJSON.GetOptions().MarshalOptions.IncludeLocation.Rule {
+ if rule.Location != nil {
+ data["location"] = rule.Location
+ }
+ }
+
+ if len(rule.Annotations) != 0 {
+ data["annotations"] = rule.Annotations
+ }
+
+ return json.Marshal(data)
+}
+
+func (rule *Rule) elseString(opts toStringOpts) string {
+ var buf []string
+
+ buf = append(buf, "else")
+
+ value := rule.Head.Value
+ if value != nil {
+ buf = append(buf, "=", value.String())
+ }
+
+ switch opts.RegoVersion() {
+ case RegoV1, RegoV0CompatV1:
+ buf = append(buf, "if")
+ }
+
+ buf = append(buf, "{", rule.Body.String(), "}")
+
+ if rule.Else != nil {
+ buf = append(buf, rule.Else.elseString(opts))
+ }
+
+ return strings.Join(buf, " ")
+}
+
+// NewHead returns a new Head object. If args are provided, the first will be
+// used for the key and the second will be used for the value.
+func NewHead(name Var, args ...*Term) *Head {
+ head := &Head{
+ Name: name, // backcompat
+ Reference: []*Term{NewTerm(name)},
+ }
+ if len(args) == 0 {
+ return head
+ }
+ head.Key = args[0]
+ if len(args) == 1 {
+ return head
+ }
+ head.Value = args[1]
+ if head.Key != nil && head.Value != nil {
+ head.Reference = head.Reference.Append(args[0])
+ }
+ return head
+}
+
+// VarHead creates a head object, initializes its Name and Location and returns the new head.
+// NOTE: The JSON options argument is no longer used, and kept only for backwards compatibility.
+func VarHead(name Var, location *Location, _ *astJSON.Options) *Head {
+ h := NewHead(name)
+ h.Reference[0].Location = location
+ return h
+}
+
+// RefHead returns a new Head object with the passed Ref. If args are provided,
+// the first will be used for the value.
+func RefHead(ref Ref, args ...*Term) *Head {
+ head := &Head{}
+ head.SetRef(ref)
+ if len(ref) < 2 {
+ head.Name = ref[0].Value.(Var)
+ }
+ if len(args) >= 1 {
+ head.Value = args[0]
+ }
+ return head
+}
+
+// DocKind represents the collection of document types that can be produced by rules.
+type DocKind byte
+
+const (
+ // CompleteDoc represents a document that is completely defined by the rule.
+ CompleteDoc = iota
+
+ // PartialSetDoc represents a set document that is partially defined by the rule.
+ PartialSetDoc
+
+ // PartialObjectDoc represents an object document that is partially defined by the rule.
+ PartialObjectDoc
+) // TODO(sr): Deprecate?
+
+// DocKind returns the type of document produced by this rule.
+func (head *Head) DocKind() DocKind {
+ if head.Key != nil {
+ if head.Value != nil {
+ return PartialObjectDoc
+ }
+ return PartialSetDoc
+ } else if head.HasDynamicRef() {
+ return PartialObjectDoc
+ }
+ return CompleteDoc
+}
+
+type RuleKind byte
+
+const (
+ SingleValue = iota
+ MultiValue
+)
+
+// RuleKind returns the type of rule this is
+func (head *Head) RuleKind() RuleKind {
+ // NOTE(sr): This is bit verbose, since the key is irrelevant for single vs
+ // multi value, but as good a spot as to assert the invariant.
+ switch {
+ case head.Value != nil:
+ return SingleValue
+ case head.Key != nil:
+ return MultiValue
+ default:
+ panic("unreachable")
+ }
+}
+
+// Ref returns the Ref of the rule. If it doesn't have one, it's filled in
+// via the Head's Name.
+func (head *Head) Ref() Ref {
+ if len(head.Reference) > 0 {
+ return head.Reference
+ }
+ return Ref{&Term{Value: head.Name}}
+}
+
+// SetRef can be used to set a rule head's Reference
+func (head *Head) SetRef(r Ref) {
+ head.Reference = r
+}
+
+// Compare returns an integer indicating whether head is less than, equal to,
+// or greater than other.
+func (head *Head) Compare(other *Head) int {
+ if head == nil {
+ if other == nil {
+ return 0
+ }
+ return -1
+ } else if other == nil {
+ return 1
+ }
+ if head.Assign && !other.Assign {
+ return -1
+ } else if !head.Assign && other.Assign {
+ return 1
+ }
+ if cmp := Compare(head.Args, other.Args); cmp != 0 {
+ return cmp
+ }
+ if cmp := Compare(head.Reference, other.Reference); cmp != 0 {
+ return cmp
+ }
+ if cmp := VarCompare(head.Name, other.Name); cmp != 0 {
+ return cmp
+ }
+ if cmp := Compare(head.Key, other.Key); cmp != 0 {
+ return cmp
+ }
+ return Compare(head.Value, other.Value)
+}
+
+// Copy returns a deep copy of head.
+func (head *Head) Copy() *Head {
+ cpy := *head
+ cpy.Reference = head.Reference.Copy()
+ cpy.Args = head.Args.Copy()
+ cpy.Key = head.Key.Copy()
+ cpy.Value = head.Value.Copy()
+ cpy.keywords = nil
+ return &cpy
+}
+
+// Equal returns true if this head equals other.
+func (head *Head) Equal(other *Head) bool {
+ return head.Compare(other) == 0
+}
+
+func (head *Head) String() string {
+ return head.stringWithOpts(toStringOpts{})
+}
+
+func (head *Head) stringWithOpts(opts toStringOpts) string {
+ buf := strings.Builder{}
+ buf.WriteString(head.Ref().String())
+ containsAdded := false
+
+ switch {
+ case len(head.Args) != 0:
+ buf.WriteString(head.Args.String())
+ case len(head.Reference) == 1 && head.Key != nil:
+ switch opts.RegoVersion() {
+ case RegoV0:
+ buf.WriteRune('[')
+ buf.WriteString(head.Key.String())
+ buf.WriteRune(']')
+ default:
+ containsAdded = true
+ buf.WriteString(" contains ")
+ buf.WriteString(head.Key.String())
+ }
+ }
+ if head.Value != nil {
+ if head.Assign {
+ buf.WriteString(" := ")
+ } else {
+ buf.WriteString(" = ")
+ }
+ buf.WriteString(head.Value.String())
+ } else if !containsAdded && head.Name == "" && head.Key != nil {
+ buf.WriteString(" contains ")
+ buf.WriteString(head.Key.String())
+ }
+ return buf.String()
+}
+
+func (head *Head) MarshalJSON() ([]byte, error) {
+ var loc *Location
+ if astJSON.GetOptions().MarshalOptions.IncludeLocation.Head && head.Location != nil {
+ loc = head.Location
+ }
+
+ // NOTE(sr): we do this to override the rendering of `head.Reference`.
+ // It's still what'll be used via the default means of encoding/json
+ // for unmarshaling a json object into a Head struct!
+ type h Head
+ return json.Marshal(struct {
+ h
+ Ref Ref `json:"ref"`
+ Location *Location `json:"location,omitempty"`
+ }{
+ h: h(*head),
+ Ref: head.Ref(),
+ Location: loc,
+ })
+}
+
+// Vars returns a set of vars found in the head.
+func (head *Head) Vars() VarSet {
+ vis := NewVarVisitor()
+ // TODO: improve test coverage for this.
+ if head.Args != nil {
+ vis.WalkArgs(head.Args)
+ }
+ if head.Key != nil {
+ vis.Walk(head.Key)
+ }
+ if head.Value != nil {
+ vis.Walk(head.Value)
+ }
+ if len(head.Reference) > 0 {
+ vis.WalkRef(head.Reference[1:])
+ }
+ return vis.vars
+}
+
+// Loc returns the Location of head.
+func (head *Head) Loc() *Location {
+ if head == nil {
+ return nil
+ }
+ return head.Location
+}
+
+// SetLoc sets the location on head.
+func (head *Head) SetLoc(loc *Location) {
+ head.Location = loc
+}
+
+func (head *Head) HasDynamicRef() bool {
+ pos := head.Reference.Dynamic()
+ return pos > 0 && (pos < len(head.Reference))
+}
+
+// Copy returns a deep copy of a.
+func (a Args) Copy() Args {
+ cpy := Args{}
+ for _, t := range a {
+ cpy = append(cpy, t.Copy())
+ }
+ return cpy
+}
+
+func (a Args) String() string {
+ buf := make([]string, 0, len(a))
+ for _, t := range a {
+ buf = append(buf, t.String())
+ }
+ return "(" + strings.Join(buf, ", ") + ")"
+}
+
+// Loc returns the Location of a.
+func (a Args) Loc() *Location {
+ if len(a) == 0 {
+ return nil
+ }
+ return a[0].Location
+}
+
+// SetLoc sets the location on a.
+func (a Args) SetLoc(loc *Location) {
+ if len(a) != 0 {
+ a[0].SetLocation(loc)
+ }
+}
+
+// Vars returns a set of vars that appear in a.
+func (a Args) Vars() VarSet {
+ vis := NewVarVisitor()
+ vis.WalkArgs(a)
+ return vis.vars
+}
+
+// NewBody returns a new Body containing the given expressions. The indices of
+// the immediate expressions will be reset.
+func NewBody(exprs ...*Expr) Body {
+ for i, expr := range exprs {
+ expr.Index = i
+ }
+ return Body(exprs)
+}
+
+// MarshalJSON returns JSON encoded bytes representing body.
+func (body Body) MarshalJSON() ([]byte, error) {
+ // Serialize empty Body to empty array. This handles both the empty case and the
+ // nil case (whereas by default the result would be null if body was nil.)
+ if len(body) == 0 {
+ return []byte(`[]`), nil
+ }
+ ret, err := json.Marshal([]*Expr(body))
+ return ret, err
+}
+
+// Append adds the expr to the body and updates the expr's index accordingly.
+func (body *Body) Append(expr *Expr) {
+ n := len(*body)
+ expr.Index = n
+ *body = append(*body, expr)
+}
+
+// Set sets the expr in the body at the specified position and updates the
+// expr's index accordingly.
+func (body Body) Set(expr *Expr, pos int) {
+ body[pos] = expr
+ expr.Index = pos
+}
+
+// Compare returns an integer indicating whether body is less than, equal to,
+// or greater than other.
+//
+// If body is a subset of other, it is considered less than (and vice versa).
+func (body Body) Compare(other Body) int {
+ minLen := min(len(other), len(body))
+ for i := range minLen {
+ if cmp := body[i].Compare(other[i]); cmp != 0 {
+ return cmp
+ }
+ }
+ if len(body) < len(other) {
+ return -1
+ }
+ if len(other) < len(body) {
+ return 1
+ }
+ return 0
+}
+
+// Copy returns a deep copy of body.
+func (body Body) Copy() Body {
+ cpy := make(Body, len(body))
+ for i := range body {
+ cpy[i] = body[i].Copy()
+ }
+ return cpy
+}
+
+// Contains returns true if this body contains the given expression.
+func (body Body) Contains(x *Expr) bool {
+ return slices.ContainsFunc(body, x.Equal)
+}
+
+// Equal returns true if this Body is equal to the other Body.
+func (body Body) Equal(other Body) bool {
+ return body.Compare(other) == 0
+}
+
+// Hash returns the hash code for the Body.
+func (body Body) Hash() int {
+ s := 0
+ for _, e := range body {
+ s += e.Hash()
+ }
+ return s
+}
+
+// IsGround returns true if all of the expressions in the Body are ground.
+func (body Body) IsGround() bool {
+ for _, e := range body {
+ if !e.IsGround() {
+ return false
+ }
+ }
+ return true
+}
+
+// Loc returns the location of the Body in the definition.
+func (body Body) Loc() *Location {
+ if len(body) == 0 {
+ return nil
+ }
+ return body[0].Location
+}
+
+// SetLoc sets the location on body.
+func (body Body) SetLoc(loc *Location) {
+ if len(body) != 0 {
+ body[0].SetLocation(loc)
+ }
+}
+
+func (body Body) String() string {
+ buf := make([]string, 0, len(body))
+ for _, v := range body {
+ buf = append(buf, v.String())
+ }
+ return strings.Join(buf, "; ")
+}
+
+// Vars returns a VarSet containing variables in body. The params can be set to
+// control which vars are included.
+func (body Body) Vars(params VarVisitorParams) VarSet {
+ vis := NewVarVisitor().WithParams(params)
+ vis.WalkBody(body)
+ return vis.Vars()
+}
+
+// NewExpr returns a new Expr object.
+func NewExpr(terms any) *Expr {
+ switch terms.(type) {
+ case *SomeDecl, *Every, *Term, []*Term: // ok
+ default:
+ panic("unreachable")
+ }
+ return &Expr{
+ Negated: false,
+ Terms: terms,
+ Index: 0,
+ With: nil,
+ }
+}
+
+// Complement returns a copy of this expression with the negation flag flipped.
+func (expr *Expr) Complement() *Expr {
+ cpy := *expr
+ cpy.Negated = !cpy.Negated
+ return &cpy
+}
+
+// ComplementNoWith returns a copy of this expression with the negation flag flipped
+// and the with modifier removed. This is the same as calling .Complement().NoWith()
+// but without making an intermediate copy.
+func (expr *Expr) ComplementNoWith() *Expr {
+ cpy := *expr
+ cpy.Negated = !cpy.Negated
+ cpy.With = nil
+ return &cpy
+}
+
+// Equal returns true if this Expr equals the other Expr.
+func (expr *Expr) Equal(other *Expr) bool {
+ return expr.Compare(other) == 0
+}
+
+// Compare returns an integer indicating whether expr is less than, equal to,
+// or greater than other.
+//
+// Expressions are compared as follows:
+//
+// 1. Declarations are always less than other expressions.
+// 2. Preceding expression (by Index) is always less than the other expression.
+// 3. Non-negated expressions are always less than negated expressions.
+// 4. Single term expressions are always less than built-in expressions.
+//
+// Otherwise, the expression terms are compared normally. If both expressions
+// have the same terms, the modifiers are compared.
+func (expr *Expr) Compare(other *Expr) int {
+
+ if expr == nil {
+ if other == nil {
+ return 0
+ }
+ return -1
+ } else if other == nil {
+ return 1
+ }
+
+ o1 := expr.sortOrder()
+ o2 := other.sortOrder()
+ if o1 < o2 {
+ return -1
+ } else if o2 < o1 {
+ return 1
+ }
+
+ switch {
+ case expr.Index < other.Index:
+ return -1
+ case expr.Index > other.Index:
+ return 1
+ }
+
+ switch {
+ case expr.Negated && !other.Negated:
+ return 1
+ case !expr.Negated && other.Negated:
+ return -1
+ }
+
+ switch t := expr.Terms.(type) {
+ case *Term:
+ if cmp := Compare(t.Value, other.Terms.(*Term).Value); cmp != 0 {
+ return cmp
+ }
+ case []*Term:
+ if cmp := termSliceCompare(t, other.Terms.([]*Term)); cmp != 0 {
+ return cmp
+ }
+ case *SomeDecl:
+ if cmp := Compare(t, other.Terms.(*SomeDecl)); cmp != 0 {
+ return cmp
+ }
+ case *Every:
+ if cmp := Compare(t, other.Terms.(*Every)); cmp != 0 {
+ return cmp
+ }
+ }
+
+ return withSliceCompare(expr.With, other.With)
+}
+
+func (expr *Expr) sortOrder() int {
+ switch expr.Terms.(type) {
+ case *SomeDecl:
+ return 0
+ case *Term:
+ return 1
+ case []*Term:
+ return 2
+ case *Every:
+ return 3
+ }
+ return -1
+}
+
+// CopyWithoutTerms returns a deep copy of expr without its Terms
+func (expr *Expr) CopyWithoutTerms() *Expr {
+ cpy := *expr
+
+ if expr.With != nil {
+ cpy.With = make([]*With, len(expr.With))
+ for i := range expr.With {
+ cpy.With[i] = expr.With[i].Copy()
+ }
+ }
+
+ return &cpy
+}
+
+// Copy returns a deep copy of expr.
+func (expr *Expr) Copy() *Expr {
+
+ cpy := expr.CopyWithoutTerms()
+
+ switch ts := expr.Terms.(type) {
+ case *SomeDecl:
+ cpy.Terms = ts.Copy()
+ case []*Term:
+ cpy.Terms = termSliceCopy(ts)
+ case *Term:
+ cpy.Terms = ts.Copy()
+ case *Every:
+ cpy.Terms = ts.Copy()
+ }
+
+ return cpy
+}
+
+// Hash returns the hash code of the Expr.
+func (expr *Expr) Hash() int {
+ s := expr.Index
+ switch ts := expr.Terms.(type) {
+ case *SomeDecl:
+ s += ts.Hash()
+ case []*Term:
+ for _, t := range ts {
+ s += t.Value.Hash()
+ }
+ case *Term:
+ s += ts.Value.Hash()
+ }
+ if expr.Negated {
+ s++
+ }
+ for _, w := range expr.With {
+ s += w.Hash()
+ }
+ return s
+}
+
+// IncludeWith returns a copy of expr with the with modifier appended.
+func (expr *Expr) IncludeWith(target *Term, value *Term) *Expr {
+ cpy := *expr
+ cpy.With = append(cpy.With, &With{Target: target, Value: value})
+ return &cpy
+}
+
+// NoWith returns a copy of expr where the with modifier has been removed.
+func (expr *Expr) NoWith() *Expr {
+ cpy := *expr
+ cpy.With = nil
+ return &cpy
+}
+
+// IsEquality returns true if this is an equality expression.
+func (expr *Expr) IsEquality() bool {
+ return isGlobalBuiltin(expr, Var(Equality.Name))
+}
+
+// IsAssignment returns true if this an assignment expression.
+func (expr *Expr) IsAssignment() bool {
+ return isGlobalBuiltin(expr, Var(Assign.Name))
+}
+
+// IsCall returns true if this expression calls a function.
+func (expr *Expr) IsCall() bool {
+ _, ok := expr.Terms.([]*Term)
+ return ok
+}
+
+// IsEvery returns true if this expression is an 'every' expression.
+func (expr *Expr) IsEvery() bool {
+ _, ok := expr.Terms.(*Every)
+ return ok
+}
+
+// IsSome returns true if this expression is a 'some' expression.
+func (expr *Expr) IsSome() bool {
+ _, ok := expr.Terms.(*SomeDecl)
+ return ok
+}
+
+// Operator returns the name of the function or built-in this expression refers
+// to. If this expression is not a function call, returns nil.
+func (expr *Expr) Operator() Ref {
+ op := expr.OperatorTerm()
+ if op == nil {
+ return nil
+ }
+ return op.Value.(Ref)
+}
+
+// OperatorTerm returns the name of the function or built-in this expression
+// refers to. If this expression is not a function call, returns nil.
+func (expr *Expr) OperatorTerm() *Term {
+ terms, ok := expr.Terms.([]*Term)
+ if !ok || len(terms) == 0 {
+ return nil
+ }
+ return terms[0]
+}
+
+// Operand returns the term at the zero-based pos. If the expr does not include
+// at least pos+1 terms, this function returns nil.
+func (expr *Expr) Operand(pos int) *Term {
+ terms, ok := expr.Terms.([]*Term)
+ if !ok {
+ return nil
+ }
+ idx := pos + 1
+ if idx < len(terms) {
+ return terms[idx]
+ }
+ return nil
+}
+
+// Operands returns the built-in function operands.
+func (expr *Expr) Operands() []*Term {
+ terms, ok := expr.Terms.([]*Term)
+ if !ok {
+ return nil
+ }
+ return terms[1:]
+}
+
+// IsGround returns true if all of the expression terms are ground.
+func (expr *Expr) IsGround() bool {
+ switch ts := expr.Terms.(type) {
+ case []*Term:
+ for _, t := range ts[1:] {
+ if !t.IsGround() {
+ return false
+ }
+ }
+ case *Term:
+ return ts.IsGround()
+ }
+ return true
+}
+
+// SetOperator sets the expr's operator and returns the expr itself. If expr is
+// not a call expr, this function will panic.
+func (expr *Expr) SetOperator(term *Term) *Expr {
+ expr.Terms.([]*Term)[0] = term
+ return expr
+}
+
+// SetLocation sets the expr's location and returns the expr itself.
+func (expr *Expr) SetLocation(loc *Location) *Expr {
+ expr.Location = loc
+ return expr
+}
+
+// Loc returns the Location of expr.
+func (expr *Expr) Loc() *Location {
+ if expr == nil {
+ return nil
+ }
+ return expr.Location
+}
+
+// SetLoc sets the location on expr.
+func (expr *Expr) SetLoc(loc *Location) {
+ expr.SetLocation(loc)
+}
+
+func (expr *Expr) String() string {
+ buf := make([]string, 0, 2+len(expr.With))
+ if expr.Negated {
+ buf = append(buf, "not")
+ }
+ switch t := expr.Terms.(type) {
+ case []*Term:
+ if expr.IsEquality() && validEqAssignArgCount(expr) {
+ buf = append(buf, fmt.Sprintf("%v %v %v", t[1], Equality.Infix, t[2]))
+ } else {
+ buf = append(buf, Call(t).String())
+ }
+ case fmt.Stringer:
+ buf = append(buf, t.String())
+ }
+
+ for i := range expr.With {
+ buf = append(buf, expr.With[i].String())
+ }
+
+ return strings.Join(buf, " ")
+}
+
+func (expr *Expr) MarshalJSON() ([]byte, error) {
+ data := map[string]any{
+ "terms": expr.Terms,
+ "index": expr.Index,
+ }
+
+ if len(expr.With) > 0 {
+ data["with"] = expr.With
+ }
+
+ if expr.Generated {
+ data["generated"] = true
+ }
+
+ if expr.Negated {
+ data["negated"] = true
+ }
+
+ if astJSON.GetOptions().MarshalOptions.IncludeLocation.Expr {
+ if expr.Location != nil {
+ data["location"] = expr.Location
+ }
+ }
+
+ return json.Marshal(data)
+}
+
+// UnmarshalJSON parses the byte array and stores the result in expr.
+func (expr *Expr) UnmarshalJSON(bs []byte) error {
+ v := map[string]any{}
+ if err := util.UnmarshalJSON(bs, &v); err != nil {
+ return err
+ }
+ return unmarshalExpr(expr, v)
+}
+
+// Vars returns a VarSet containing variables in expr. The params can be set to
+// control which vars are included.
+func (expr *Expr) Vars(params VarVisitorParams) VarSet {
+ vis := NewVarVisitor().WithParams(params)
+ vis.Walk(expr)
+ return vis.Vars()
+}
+
+// NewBuiltinExpr creates a new Expr object with the supplied terms.
+// The builtin operator must be the first term.
+func NewBuiltinExpr(terms ...*Term) *Expr {
+ return &Expr{Terms: terms}
+}
+
+func (expr *Expr) CogeneratedExprs() []*Expr {
+ visited := map[*Expr]struct{}{}
+ visitCogeneratedExprs(expr, func(e *Expr) bool {
+ if expr.Equal(e) {
+ return true
+ }
+ if _, ok := visited[e]; ok {
+ return true
+ }
+ visited[e] = struct{}{}
+ return false
+ })
+
+ result := make([]*Expr, 0, len(visited))
+ for e := range visited {
+ result = append(result, e)
+ }
+ return result
+}
+
+func (expr *Expr) BaseCogeneratedExpr() *Expr {
+ if expr.generatedFrom == nil {
+ return expr
+ }
+ return expr.generatedFrom.BaseCogeneratedExpr()
+}
+
+func visitCogeneratedExprs(expr *Expr, f func(*Expr) bool) {
+ if parent := expr.generatedFrom; parent != nil {
+ if stop := f(parent); !stop {
+ visitCogeneratedExprs(parent, f)
+ }
+ }
+ for _, child := range expr.generates {
+ if stop := f(child); !stop {
+ visitCogeneratedExprs(child, f)
+ }
+ }
+}
+
+func (d *SomeDecl) String() string {
+ if call, ok := d.Symbols[0].Value.(Call); ok {
+ if len(call) == 4 {
+ return "some " + call[1].String() + ", " + call[2].String() + " in " + call[3].String()
+ }
+ return "some " + call[1].String() + " in " + call[2].String()
+ }
+ buf := make([]string, len(d.Symbols))
+ for i := range buf {
+ buf[i] = d.Symbols[i].String()
+ }
+ return "some " + strings.Join(buf, ", ")
+}
+
+// SetLoc sets the Location on d.
+func (d *SomeDecl) SetLoc(loc *Location) {
+ d.Location = loc
+}
+
+// Loc returns the Location of d.
+func (d *SomeDecl) Loc() *Location {
+ return d.Location
+}
+
+// Copy returns a deep copy of d.
+func (d *SomeDecl) Copy() *SomeDecl {
+ cpy := *d
+ cpy.Symbols = termSliceCopy(d.Symbols)
+ return &cpy
+}
+
+// Compare returns an integer indicating whether d is less than, equal to, or
+// greater than other.
+func (d *SomeDecl) Compare(other *SomeDecl) int {
+ return termSliceCompare(d.Symbols, other.Symbols)
+}
+
+// Hash returns a hash code of d.
+func (d *SomeDecl) Hash() int {
+ return termSliceHash(d.Symbols)
+}
+
+func (d *SomeDecl) MarshalJSON() ([]byte, error) {
+ data := map[string]any{
+ "symbols": d.Symbols,
+ }
+
+ if astJSON.GetOptions().MarshalOptions.IncludeLocation.SomeDecl {
+ if d.Location != nil {
+ data["location"] = d.Location
+ }
+ }
+
+ return json.Marshal(data)
+}
+
+func (q *Every) String() string {
+ if q.Key != nil {
+ return fmt.Sprintf("every %s, %s in %s { %s }",
+ q.Key,
+ q.Value,
+ q.Domain,
+ q.Body)
+ }
+ return fmt.Sprintf("every %s in %s { %s }",
+ q.Value,
+ q.Domain,
+ q.Body)
+}
+
+func (q *Every) Loc() *Location {
+ return q.Location
+}
+
+func (q *Every) SetLoc(l *Location) {
+ q.Location = l
+}
+
+// Copy returns a deep copy of d.
+func (q *Every) Copy() *Every {
+ cpy := *q
+ cpy.Key = q.Key.Copy()
+ cpy.Value = q.Value.Copy()
+ cpy.Domain = q.Domain.Copy()
+ cpy.Body = q.Body.Copy()
+ return &cpy
+}
+
+func (q *Every) Compare(other *Every) int {
+ for _, terms := range [][2]*Term{
+ {q.Key, other.Key},
+ {q.Value, other.Value},
+ {q.Domain, other.Domain},
+ } {
+ if d := Compare(terms[0], terms[1]); d != 0 {
+ return d
+ }
+ }
+ return q.Body.Compare(other.Body)
+}
+
+// KeyValueVars returns the key and val arguments of an `every`
+// expression, if they are non-nil and not wildcards.
+func (q *Every) KeyValueVars() VarSet {
+ vis := NewVarVisitor()
+ if q.Key != nil {
+ vis.Walk(q.Key)
+ }
+ vis.Walk(q.Value)
+ return vis.vars
+}
+
+func (q *Every) MarshalJSON() ([]byte, error) {
+ data := map[string]any{
+ "key": q.Key,
+ "value": q.Value,
+ "domain": q.Domain,
+ "body": q.Body,
+ }
+
+ if astJSON.GetOptions().MarshalOptions.IncludeLocation.Every {
+ if q.Location != nil {
+ data["location"] = q.Location
+ }
+ }
+
+ return json.Marshal(data)
+}
+
+func (w *With) String() string {
+ return "with " + w.Target.String() + " as " + w.Value.String()
+}
+
+// Equal returns true if this With is equals the other With.
+func (w *With) Equal(other *With) bool {
+ return Compare(w, other) == 0
+}
+
+// Compare returns an integer indicating whether w is less than, equal to, or
+// greater than other.
+func (w *With) Compare(other *With) int {
+ if w == nil {
+ if other == nil {
+ return 0
+ }
+ return -1
+ } else if other == nil {
+ return 1
+ }
+ if cmp := Compare(w.Target, other.Target); cmp != 0 {
+ return cmp
+ }
+ return Compare(w.Value, other.Value)
+}
+
+// Copy returns a deep copy of w.
+func (w *With) Copy() *With {
+ cpy := *w
+ cpy.Value = w.Value.Copy()
+ cpy.Target = w.Target.Copy()
+ return &cpy
+}
+
+// Hash returns the hash code of the With.
+func (w With) Hash() int {
+ return w.Target.Hash() + w.Value.Hash()
+}
+
+// SetLocation sets the location on w.
+func (w *With) SetLocation(loc *Location) *With {
+ w.Location = loc
+ return w
+}
+
+// Loc returns the Location of w.
+func (w *With) Loc() *Location {
+ if w == nil {
+ return nil
+ }
+ return w.Location
+}
+
+// SetLoc sets the location on w.
+func (w *With) SetLoc(loc *Location) {
+ w.Location = loc
+}
+
+func (w *With) MarshalJSON() ([]byte, error) {
+ data := map[string]any{
+ "target": w.Target,
+ "value": w.Value,
+ }
+
+ if astJSON.GetOptions().MarshalOptions.IncludeLocation.With {
+ if w.Location != nil {
+ data["location"] = w.Location
+ }
+ }
+
+ return json.Marshal(data)
+}
+
+// Copy returns a deep copy of the AST node x. If x is not an AST node, x is returned unmodified.
+func Copy(x any) any {
+ switch x := x.(type) {
+ case *Module:
+ return x.Copy()
+ case *Package:
+ return x.Copy()
+ case *Import:
+ return x.Copy()
+ case *Rule:
+ return x.Copy()
+ case *Head:
+ return x.Copy()
+ case Args:
+ return x.Copy()
+ case Body:
+ return x.Copy()
+ case *Expr:
+ return x.Copy()
+ case *With:
+ return x.Copy()
+ case *SomeDecl:
+ return x.Copy()
+ case *Every:
+ return x.Copy()
+ case *Term:
+ return x.Copy()
+ case *ArrayComprehension:
+ return x.Copy()
+ case *SetComprehension:
+ return x.Copy()
+ case *ObjectComprehension:
+ return x.Copy()
+ case Set:
+ return x.Copy()
+ case *object:
+ return x.Copy()
+ case *Array:
+ return x.Copy()
+ case Ref:
+ return x.Copy()
+ case Call:
+ return x.Copy()
+ case *Comment:
+ return x.Copy()
+ }
+ return x
+}
+
+// RuleSet represents a collection of rules that produce a virtual document.
+type RuleSet []*Rule
+
+// NewRuleSet returns a new RuleSet containing the given rules.
+func NewRuleSet(rules ...*Rule) RuleSet {
+ rs := make(RuleSet, 0, len(rules))
+ for _, rule := range rules {
+ rs.Add(rule)
+ }
+ return rs
+}
+
+// Add inserts the rule into rs.
+func (rs *RuleSet) Add(rule *Rule) {
+ for _, exist := range *rs {
+ if exist.Equal(rule) {
+ return
+ }
+ }
+ *rs = append(*rs, rule)
+}
+
+// Contains returns true if rs contains rule.
+func (rs RuleSet) Contains(rule *Rule) bool {
+ for i := range rs {
+ if rs[i].Equal(rule) {
+ return true
+ }
+ }
+ return false
+}
+
+// Diff returns a new RuleSet containing rules in rs that are not in other.
+func (rs RuleSet) Diff(other RuleSet) RuleSet {
+ result := NewRuleSet()
+ for i := range rs {
+ if !other.Contains(rs[i]) {
+ result.Add(rs[i])
+ }
+ }
+ return result
+}
+
+// Equal returns true if rs equals other.
+func (rs RuleSet) Equal(other RuleSet) bool {
+ return len(rs.Diff(other)) == 0 && len(other.Diff(rs)) == 0
+}
+
+// Merge returns a ruleset containing the union of rules from rs an other.
+func (rs RuleSet) Merge(other RuleSet) RuleSet {
+ result := NewRuleSet()
+ for i := range rs {
+ result.Add(rs[i])
+ }
+ for i := range other {
+ result.Add(other[i])
+ }
+ return result
+}
+
+func (rs RuleSet) String() string {
+ buf := make([]string, 0, len(rs))
+ for _, rule := range rs {
+ buf = append(buf, rule.String())
+ }
+ return "{" + strings.Join(buf, ", ") + "}"
+}
+
+// Returns true if the equality or assignment expression referred to by expr
+// has a valid number of arguments.
+func validEqAssignArgCount(expr *Expr) bool {
+ return len(expr.Operands()) == 2
+}
+
+// this function checks if the expr refers to a non-namespaced (global) built-in
+// function like eq, gt, plus, etc.
+func isGlobalBuiltin(expr *Expr, name Var) bool {
+ terms, ok := expr.Terms.([]*Term)
+ if !ok {
+ return false
+ }
+
+ // NOTE(tsandall): do not use Term#Equal or Value#Compare to avoid
+ // allocation here.
+ ref, ok := terms[0].Value.(Ref)
+ if !ok || len(ref) != 1 {
+ return false
+ }
+ if head, ok := ref[0].Value.(Var); ok {
+ return head.Equal(name)
+ }
+ return false
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/pretty.go b/vendor/github.com/open-policy-agent/opa/v1/ast/pretty.go
new file mode 100644
index 0000000000..aa34f37471
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/pretty.go
@@ -0,0 +1,82 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "fmt"
+ "io"
+ "strings"
+)
+
+// Pretty writes a pretty representation of the AST rooted at x to w.
+//
+// This is function is intended for debug purposes when inspecting ASTs.
+func Pretty(w io.Writer, x any) {
+ pp := &prettyPrinter{
+ depth: -1,
+ w: w,
+ }
+ NewBeforeAfterVisitor(pp.Before, pp.After).Walk(x)
+}
+
+type prettyPrinter struct {
+ depth int
+ w io.Writer
+}
+
+func (pp *prettyPrinter) Before(x any) bool {
+ switch x.(type) {
+ case *Term:
+ default:
+ pp.depth++
+ }
+
+ switch x := x.(type) {
+ case *Term:
+ return false
+ case Args:
+ if len(x) == 0 {
+ return false
+ }
+ pp.writeType(x)
+ case *Expr:
+ extras := []string{}
+ if x.Negated {
+ extras = append(extras, "negated")
+ }
+ extras = append(extras, fmt.Sprintf("index=%d", x.Index))
+ pp.writeIndent("%v %v", TypeName(x), strings.Join(extras, " "))
+ case Null, Boolean, Number, String, Var:
+ pp.writeValue(x)
+ default:
+ pp.writeType(x)
+ }
+ return false
+}
+
+func (pp *prettyPrinter) After(x any) {
+ switch x.(type) {
+ case *Term:
+ default:
+ pp.depth--
+ }
+}
+
+func (pp *prettyPrinter) writeValue(x any) {
+ pp.writeIndent(fmt.Sprint(x))
+}
+
+func (pp *prettyPrinter) writeType(x any) {
+ pp.writeIndent(TypeName(x))
+}
+
+func (pp *prettyPrinter) writeIndent(f string, a ...any) {
+ pad := strings.Repeat(" ", pp.depth)
+ pp.write(pad+f, a...)
+}
+
+func (pp *prettyPrinter) write(f string, a ...any) {
+ fmt.Fprintf(pp.w, f+"\n", a...)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/rego_compiler.go b/vendor/github.com/open-policy-agent/opa/v1/ast/rego_compiler.go
new file mode 100644
index 0000000000..78d0efc59a
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/rego_compiler.go
@@ -0,0 +1,17 @@
+package ast
+
+import "context"
+
+type regoCompileCtx struct{}
+
+func WithCompiler(ctx context.Context, c *Compiler) context.Context {
+ return context.WithValue(ctx, regoCompileCtx{}, c)
+}
+
+func CompilerFromContext(ctx context.Context) (*Compiler, bool) {
+ if ctx == nil {
+ return nil, false
+ }
+ v, ok := ctx.Value(regoCompileCtx{}).(*Compiler)
+ return v, ok
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/rego_v1.go b/vendor/github.com/open-policy-agent/opa/v1/ast/rego_v1.go
similarity index 87%
rename from vendor/github.com/open-policy-agent/opa/ast/rego_v1.go
rename to vendor/github.com/open-policy-agent/opa/v1/ast/rego_v1.go
index b64dfce7be..a702d9294c 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/rego_v1.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/rego_v1.go
@@ -3,7 +3,7 @@ package ast
import (
"fmt"
- "github.com/open-policy-agent/opa/ast/internal/tokens"
+ "github.com/open-policy-agent/opa/v1/ast/internal/tokens"
)
func checkDuplicateImports(modules []*Module) (errors Errors) {
@@ -23,7 +23,7 @@ func checkDuplicateImports(modules []*Module) (errors Errors) {
return
}
-func checkRootDocumentOverrides(node interface{}) Errors {
+func checkRootDocumentOverrides(node any) Errors {
errors := Errors{}
WalkRules(node, func(rule *Rule) bool {
@@ -64,8 +64,8 @@ func checkRootDocumentOverrides(node interface{}) Errors {
return errors
}
-func walkCalls(node interface{}, f func(interface{}) bool) {
- vis := &GenericVisitor{func(x interface{}) bool {
+func walkCalls(node any, f func(any) bool) {
+ vis := &GenericVisitor{func(x any) bool {
switch x := x.(type) {
case Call:
return f(x)
@@ -82,10 +82,10 @@ func walkCalls(node interface{}, f func(interface{}) bool) {
vis.Walk(node)
}
-func checkDeprecatedBuiltins(deprecatedBuiltinsMap map[string]struct{}, node interface{}) Errors {
+func checkDeprecatedBuiltins(deprecatedBuiltinsMap map[string]struct{}, node any) Errors {
errs := make(Errors, 0)
- walkCalls(node, func(x interface{}) bool {
+ walkCalls(node, func(x any) bool {
var operator string
var loc *Location
@@ -113,7 +113,7 @@ func checkDeprecatedBuiltins(deprecatedBuiltinsMap map[string]struct{}, node int
return errs
}
-func checkDeprecatedBuiltinsForCurrentVersion(node interface{}) Errors {
+func checkDeprecatedBuiltinsForCurrentVersion(node any) Errors {
deprecatedBuiltins := make(map[string]struct{})
capabilities := CapabilitiesForThisVersion()
for _, bi := range capabilities.Builtins {
@@ -150,11 +150,11 @@ func NewRegoCheckOptions() RegoCheckOptions {
// CheckRegoV1 checks the given module or rule for errors that are specific to Rego v1.
// Passing something other than an *ast.Rule or *ast.Module is considered a programming error, and will cause a panic.
-func CheckRegoV1(x interface{}) Errors {
+func CheckRegoV1(x any) Errors {
return CheckRegoV1WithOptions(x, NewRegoCheckOptions())
}
-func CheckRegoV1WithOptions(x interface{}, opts RegoCheckOptions) Errors {
+func CheckRegoV1WithOptions(x any, opts RegoCheckOptions) Errors {
switch x := x.(type) {
case *Module:
return checkRegoV1Module(x, opts)
@@ -191,8 +191,8 @@ func checkRegoV1Rule(rule *Rule, opts RegoCheckOptions) Errors {
var errs Errors
- if opts.NoKeywordsAsRuleNames && IsKeywordInRegoVersion(rule.Head.Name.String(), RegoV1) {
- errs = append(errs, NewError(ParseErr, rule.Location, fmt.Sprintf("%s keyword cannot be used for rule name", rule.Head.Name.String())))
+ if opts.NoKeywordsAsRuleNames && len(rule.Head.Reference) < 2 && IsKeywordInRegoVersion(rule.Head.Name.String(), RegoV1) {
+ errs = append(errs, NewError(ParseErr, rule.Location, "%s keyword cannot be used for rule name", rule.Head.Name.String()))
}
if opts.RequireRuleBodyOrValue && rule.generatedBody && rule.Head.generatedValue {
errs = append(errs, NewError(ParseErr, rule.Location, "%s must have value assignment and/or body declaration", t))
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/schema.go b/vendor/github.com/open-policy-agent/opa/v1/ast/schema.go
new file mode 100644
index 0000000000..3f9e2001d5
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/schema.go
@@ -0,0 +1,54 @@
+// Copyright 2021 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "fmt"
+
+ "github.com/open-policy-agent/opa/v1/types"
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+// SchemaSet holds a map from a path to a schema.
+type SchemaSet struct {
+ m *util.HasherMap[Ref, any]
+}
+
+// NewSchemaSet returns an empty SchemaSet.
+func NewSchemaSet() *SchemaSet {
+ return &SchemaSet{
+ m: util.NewHasherMap[Ref, any](RefEqual),
+ }
+}
+
+// Put inserts a raw schema into the set.
+func (ss *SchemaSet) Put(path Ref, raw any) {
+ ss.m.Put(path, raw)
+}
+
+// Get returns the raw schema identified by the path.
+func (ss *SchemaSet) Get(path Ref) any {
+ if ss != nil {
+ if x, ok := ss.m.Get(path); ok {
+ return x
+ }
+ }
+ return nil
+}
+
+func loadSchema(raw any, allowNet []string) (types.Type, error) {
+
+ jsonSchema, err := compileSchema(raw, allowNet)
+ if err != nil {
+ return nil, err
+ }
+
+ tpe, err := newSchemaParser().parseSchema(jsonSchema.RootSchema)
+ if err != nil {
+ return nil, fmt.Errorf("type checking: %w", err)
+ }
+
+ return tpe, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/strings.go b/vendor/github.com/open-policy-agent/opa/v1/ast/strings.go
new file mode 100644
index 0000000000..8447522412
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/strings.go
@@ -0,0 +1,54 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "reflect"
+ "strings"
+)
+
+// TypeName returns a human readable name for the AST element type.
+func TypeName(x any) string {
+ if _, ok := x.(*lazyObj); ok {
+ return "object"
+ }
+ return strings.ToLower(reflect.Indirect(reflect.ValueOf(x)).Type().Name())
+}
+
+// ValueName returns a human readable name for the AST Value type.
+// This is preferrable over calling TypeName when the argument is known to be
+// a Value, as this doesn't require reflection (= heap allocations).
+func ValueName(x Value) string {
+ switch x.(type) {
+ case String:
+ return "string"
+ case Boolean:
+ return "boolean"
+ case Number:
+ return "number"
+ case Null:
+ return "null"
+ case Var:
+ return "var"
+ case Object:
+ return "object"
+ case Set:
+ return "set"
+ case Ref:
+ return "ref"
+ case Call:
+ return "call"
+ case *Array:
+ return "array"
+ case *ArrayComprehension:
+ return "arraycomprehension"
+ case *ObjectComprehension:
+ return "objectcomprehension"
+ case *SetComprehension:
+ return "setcomprehension"
+ }
+
+ return TypeName(x)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/syncpools.go b/vendor/github.com/open-policy-agent/opa/v1/ast/syncpools.go
new file mode 100644
index 0000000000..82977c836b
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/syncpools.go
@@ -0,0 +1,92 @@
+package ast
+
+import (
+ "strings"
+ "sync"
+)
+
+type termPtrPool struct {
+ pool sync.Pool
+}
+
+type stringBuilderPool struct {
+ pool sync.Pool
+}
+
+type indexResultPool struct {
+ pool sync.Pool
+}
+
+type vvPool struct {
+ pool sync.Pool
+}
+
+func (p *termPtrPool) Get() *Term {
+ return p.pool.Get().(*Term)
+}
+
+func (p *termPtrPool) Put(t *Term) {
+ p.pool.Put(t)
+}
+
+func (p *stringBuilderPool) Get() *strings.Builder {
+ return p.pool.Get().(*strings.Builder)
+}
+
+func (p *stringBuilderPool) Put(sb *strings.Builder) {
+ sb.Reset()
+ p.pool.Put(sb)
+}
+
+func (p *indexResultPool) Get() *IndexResult {
+ return p.pool.Get().(*IndexResult)
+}
+
+func (p *indexResultPool) Put(x *IndexResult) {
+ if x != nil {
+ p.pool.Put(x)
+ }
+}
+
+func (p *vvPool) Get() *VarVisitor {
+ return p.pool.Get().(*VarVisitor)
+}
+
+func (p *vvPool) Put(vv *VarVisitor) {
+ if vv != nil {
+ vv.Clear()
+ p.pool.Put(vv)
+ }
+}
+
+var TermPtrPool = &termPtrPool{
+ pool: sync.Pool{
+ New: func() any {
+ return &Term{}
+ },
+ },
+}
+
+var sbPool = &stringBuilderPool{
+ pool: sync.Pool{
+ New: func() any {
+ return &strings.Builder{}
+ },
+ },
+}
+
+var varVisitorPool = &vvPool{
+ pool: sync.Pool{
+ New: func() any {
+ return NewVarVisitor()
+ },
+ },
+}
+
+var IndexResultPool = &indexResultPool{
+ pool: sync.Pool{
+ New: func() any {
+ return &IndexResult{}
+ },
+ },
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/term.go b/vendor/github.com/open-policy-agent/opa/v1/ast/term.go
new file mode 100644
index 0000000000..18f8a423d9
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/term.go
@@ -0,0 +1,3106 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// nolint: deadcode // Public API.
+package ast
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "net/url"
+ "regexp"
+ "slices"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+
+ "github.com/cespare/xxhash/v2"
+ astJSON "github.com/open-policy-agent/opa/v1/ast/json"
+ "github.com/open-policy-agent/opa/v1/ast/location"
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+var errFindNotFound = errors.New("find: not found")
+
+// Location records a position in source code.
+type Location = location.Location
+
+// NewLocation returns a new Location object.
+func NewLocation(text []byte, file string, row int, col int) *Location {
+ return location.NewLocation(text, file, row, col)
+}
+
+// Value declares the common interface for all Term values. Every kind of Term value
+// in the language is represented as a type that implements this interface:
+//
+// - Null, Boolean, Number, String
+// - Object, Array, Set
+// - Variables, References
+// - Array, Set, and Object Comprehensions
+// - Calls
+type Value interface {
+ Compare(other Value) int // Compare returns <0, 0, or >0 if this Value is less than, equal to, or greater than other, respectively.
+ Find(path Ref) (Value, error) // Find returns value referred to by path or an error if path is not found.
+ Hash() int // Returns hash code of the value.
+ IsGround() bool // IsGround returns true if this value is not a variable or contains no variables.
+ String() string // String returns a human readable string representation of the value.
+}
+
+// InterfaceToValue converts a native Go value x to a Value.
+func InterfaceToValue(x any) (Value, error) {
+ switch x := x.(type) {
+ case Value:
+ return x, nil
+ case nil:
+ return NullValue, nil
+ case bool:
+ return InternedValue(x), nil
+ case json.Number:
+ if interned := InternedIntNumberTermFromString(string(x)); interned != nil {
+ return interned.Value, nil
+ }
+ return Number(x), nil
+ case int:
+ return InternedValueOr(x, newIntNumberValue), nil
+ case int64:
+ return InternedValueOr(x, newInt64NumberValue), nil
+ case uint64:
+ return InternedValueOr(x, newUint64NumberValue), nil
+ case float64:
+ return floatNumber(x), nil
+ case string:
+ return String(x), nil
+ case []any:
+ r := util.NewPtrSlice[Term](len(x))
+ for i, e := range x {
+ e, err := InterfaceToValue(e)
+ if err != nil {
+ return nil, err
+ }
+ r[i].Value = e
+ }
+ return NewArray(r...), nil
+ case []string:
+ r := util.NewPtrSlice[Term](len(x))
+ for i, e := range x {
+ r[i].Value = String(e)
+ }
+ return NewArray(r...), nil
+ case map[string]any:
+ kvs := util.NewPtrSlice[Term](len(x) * 2)
+ idx := 0
+ for k, v := range x {
+ kvs[idx].Value = String(k)
+ v, err := InterfaceToValue(v)
+ if err != nil {
+ return nil, err
+ }
+ kvs[idx+1].Value = v
+ idx += 2
+ }
+ tuples := make([][2]*Term, len(kvs)/2)
+ for i := 0; i < len(kvs); i += 2 {
+ tuples[i/2] = *(*[2]*Term)(kvs[i : i+2])
+ }
+ return NewObject(tuples...), nil
+ case map[string]string:
+ r := newobject(len(x))
+ for k, v := range x {
+ r.Insert(StringTerm(k), StringTerm(v))
+ }
+ return r, nil
+ default:
+ ptr := util.Reference(x)
+ if err := util.RoundTrip(ptr); err != nil {
+ return nil, fmt.Errorf("ast: interface conversion: %w", err)
+ }
+ return InterfaceToValue(*ptr)
+ }
+}
+
+// ValueFromReader returns an AST value from a JSON serialized value in the reader.
+func ValueFromReader(r io.Reader) (Value, error) {
+ var x any
+ if err := util.NewJSONDecoder(r).Decode(&x); err != nil {
+ return nil, err
+ }
+ return InterfaceToValue(x)
+}
+
+// As converts v into a Go native type referred to by x.
+func As(v Value, x any) error {
+ return util.NewJSONDecoder(strings.NewReader(v.String())).Decode(x)
+}
+
+// Resolver defines the interface for resolving references to native Go values.
+type Resolver interface {
+ Resolve(Ref) (any, error)
+}
+
+// ValueResolver defines the interface for resolving references to AST values.
+type ValueResolver interface {
+ Resolve(Ref) (Value, error)
+}
+
+// UnknownValueErr indicates a ValueResolver was unable to resolve a reference
+// because the reference refers to an unknown value.
+type UnknownValueErr struct{}
+
+func (UnknownValueErr) Error() string {
+ return "unknown value"
+}
+
+// IsUnknownValueErr returns true if the err is an UnknownValueErr.
+func IsUnknownValueErr(err error) bool {
+ _, ok := err.(UnknownValueErr)
+ return ok
+}
+
+type illegalResolver struct{}
+
+func (illegalResolver) Resolve(ref Ref) (any, error) {
+ return nil, fmt.Errorf("illegal value: %v", ref)
+}
+
+// ValueToInterface returns the Go representation of an AST value. The AST
+// value should not contain any values that require evaluation (e.g., vars,
+// comprehensions, etc.)
+func ValueToInterface(v Value, resolver Resolver) (any, error) {
+ return valueToInterface(v, resolver, JSONOpt{})
+}
+
+func valueToInterface(v Value, resolver Resolver, opt JSONOpt) (any, error) {
+ switch v := v.(type) {
+ case Null:
+ return nil, nil
+ case Boolean:
+ return bool(v), nil
+ case Number:
+ return json.Number(v), nil
+ case String:
+ return string(v), nil
+ case *Array:
+ buf := []any{}
+ for i := range v.Len() {
+ x1, err := valueToInterface(v.Elem(i).Value, resolver, opt)
+ if err != nil {
+ return nil, err
+ }
+ buf = append(buf, x1)
+ }
+ return buf, nil
+ case *object:
+ buf := make(map[string]any, v.Len())
+ err := v.Iter(func(k, v *Term) error {
+ ki, err := valueToInterface(k.Value, resolver, opt)
+ if err != nil {
+ return err
+ }
+ var str string
+ var ok bool
+ if str, ok = ki.(string); !ok {
+ var buf bytes.Buffer
+ if err := json.NewEncoder(&buf).Encode(ki); err != nil {
+ return err
+ }
+ str = strings.TrimSpace(buf.String())
+ }
+ vi, err := valueToInterface(v.Value, resolver, opt)
+ if err != nil {
+ return err
+ }
+ buf[str] = vi
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return buf, nil
+ case *lazyObj:
+ if opt.CopyMaps {
+ return valueToInterface(v.force(), resolver, opt)
+ }
+ return v.native, nil
+ case Set:
+ buf := []any{}
+ iter := func(x *Term) error {
+ x1, err := valueToInterface(x.Value, resolver, opt)
+ if err != nil {
+ return err
+ }
+ buf = append(buf, x1)
+ return nil
+ }
+ var err error
+ if opt.SortSets {
+ err = v.Sorted().Iter(iter)
+ } else {
+ err = v.Iter(iter)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return buf, nil
+ case Ref:
+ return resolver.Resolve(v)
+ default:
+ return nil, fmt.Errorf("%v requires evaluation", TypeName(v))
+ }
+}
+
+// JSON returns the JSON representation of v. The value must not contain any
+// refs or terms that require evaluation (e.g., vars, comprehensions, etc.)
+func JSON(v Value) (any, error) {
+ return JSONWithOpt(v, JSONOpt{})
+}
+
+// JSONOpt defines parameters for AST to JSON conversion.
+type JSONOpt struct {
+ SortSets bool // sort sets before serializing (this makes conversion more expensive)
+ CopyMaps bool // enforces copying of map[string]any read from the store
+}
+
+// JSONWithOpt returns the JSON representation of v. The value must not contain any
+// refs or terms that require evaluation (e.g., vars, comprehensions, etc.)
+func JSONWithOpt(v Value, opt JSONOpt) (any, error) {
+ return valueToInterface(v, illegalResolver{}, opt)
+}
+
+// MustJSON returns the JSON representation of v. The value must not contain any
+// refs or terms that require evaluation (e.g., vars, comprehensions, etc.) If
+// the conversion fails, this function will panic. This function is mostly for
+// test purposes.
+func MustJSON(v Value) any {
+ r, err := JSON(v)
+ if err != nil {
+ panic(err)
+ }
+ return r
+}
+
+// MustInterfaceToValue converts a native Go value x to a Value. If the
+// conversion fails, this function will panic. This function is mostly for test
+// purposes.
+func MustInterfaceToValue(x any) Value {
+ v, err := InterfaceToValue(x)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// Term is an argument to a function.
+type Term struct {
+ Value Value `json:"value"` // the value of the Term as represented in Go
+ Location *Location `json:"location,omitempty"` // the location of the Term in the source
+}
+
+// NewTerm returns a new Term object.
+func NewTerm(v Value) *Term {
+ return &Term{
+ Value: v,
+ }
+}
+
+// SetLocation updates the term's Location and returns the term itself.
+func (term *Term) SetLocation(loc *Location) *Term {
+ term.Location = loc
+ return term
+}
+
+// Loc returns the Location of term.
+func (term *Term) Loc() *Location {
+ if term == nil {
+ return nil
+ }
+ return term.Location
+}
+
+// SetLoc sets the location on term.
+func (term *Term) SetLoc(loc *Location) {
+ term.SetLocation(loc)
+}
+
+// Copy returns a deep copy of term.
+func (term *Term) Copy() *Term {
+ if term == nil {
+ return nil
+ }
+
+ cpy := *term
+
+ switch v := term.Value.(type) {
+ case Null, Boolean, Number, String, Var:
+ cpy.Value = v
+ case Ref:
+ cpy.Value = v.Copy()
+ case *Array:
+ cpy.Value = v.Copy()
+ case Set:
+ cpy.Value = v.Copy()
+ case *object:
+ cpy.Value = v.Copy()
+ case *ArrayComprehension:
+ cpy.Value = v.Copy()
+ case *ObjectComprehension:
+ cpy.Value = v.Copy()
+ case *SetComprehension:
+ cpy.Value = v.Copy()
+ case Call:
+ cpy.Value = v.Copy()
+ }
+
+ return &cpy
+}
+
+// Equal returns true if this term equals the other term. Equality is
+// defined for each kind of term, and does not compare the Location.
+func (term *Term) Equal(other *Term) bool {
+ if term == nil && other != nil {
+ return false
+ }
+ if term != nil && other == nil {
+ return false
+ }
+ if term == other {
+ return true
+ }
+
+ return ValueEqual(term.Value, other.Value)
+}
+
+// Get returns a value referred to by name from the term.
+func (term *Term) Get(name *Term) *Term {
+ switch v := term.Value.(type) {
+ case *object:
+ return v.Get(name)
+ case *Array:
+ return v.Get(name)
+ case interface {
+ Get(*Term) *Term
+ }:
+ return v.Get(name)
+ case Set:
+ if v.Contains(name) {
+ return name
+ }
+ }
+ return nil
+}
+
+// Hash returns the hash code of the Term's Value. Its Location
+// is ignored.
+func (term *Term) Hash() int {
+ return term.Value.Hash()
+}
+
+// IsGround returns true if this term's Value is ground.
+func (term *Term) IsGround() bool {
+ return term.Value.IsGround()
+}
+
+// MarshalJSON returns the JSON encoding of the term.
+//
+// Specialized marshalling logic is required to include a type hint for Value.
+func (term *Term) MarshalJSON() ([]byte, error) {
+ d := map[string]any{
+ "type": ValueName(term.Value),
+ "value": term.Value,
+ }
+ jsonOptions := astJSON.GetOptions().MarshalOptions
+ if jsonOptions.IncludeLocation.Term {
+ if term.Location != nil {
+ d["location"] = term.Location
+ }
+ }
+ return json.Marshal(d)
+}
+
+func (term *Term) String() string {
+ return term.Value.String()
+}
+
+// UnmarshalJSON parses the byte array and stores the result in term.
+// Specialized unmarshalling is required to handle Value and Location.
+func (term *Term) UnmarshalJSON(bs []byte) error {
+ v := map[string]any{}
+ if err := util.UnmarshalJSON(bs, &v); err != nil {
+ return err
+ }
+ val, err := unmarshalValue(v)
+ if err != nil {
+ return err
+ }
+ term.Value = val
+
+ if loc, ok := v["location"].(map[string]any); ok {
+ term.Location = &Location{}
+ err := unmarshalLocation(term.Location, loc)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Vars returns a VarSet with variables contained in this term.
+func (term *Term) Vars() VarSet {
+ vis := NewVarVisitor()
+ vis.Walk(term)
+ return vis.vars
+}
+
+// IsConstant returns true if the AST value is constant.
+func IsConstant(v Value) bool {
+ found := false
+ vis := GenericVisitor{
+ func(x any) bool {
+ switch x.(type) {
+ case Var, Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call:
+ found = true
+ return true
+ }
+ return false
+ },
+ }
+ vis.Walk(v)
+ return !found
+}
+
+// IsComprehension returns true if the supplied value is a comprehension.
+func IsComprehension(x Value) bool {
+ switch x.(type) {
+ case *ArrayComprehension, *ObjectComprehension, *SetComprehension:
+ return true
+ }
+ return false
+}
+
+// ContainsRefs returns true if the Value v contains refs.
+func ContainsRefs(v any) bool {
+ found := false
+ WalkRefs(v, func(Ref) bool {
+ found = true
+ return found
+ })
+ return found
+}
+
+// ContainsComprehensions returns true if the Value v contains comprehensions.
+func ContainsComprehensions(v any) bool {
+ found := false
+ WalkClosures(v, func(x any) bool {
+ switch x.(type) {
+ case *ArrayComprehension, *ObjectComprehension, *SetComprehension:
+ found = true
+ return found
+ }
+ return found
+ })
+ return found
+}
+
+// ContainsClosures returns true if the Value v contains closures.
+func ContainsClosures(v any) bool {
+ found := false
+ WalkClosures(v, func(x any) bool {
+ switch x.(type) {
+ case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *Every:
+ found = true
+ return found
+ }
+ return found
+ })
+ return found
+}
+
+// IsScalar returns true if the AST value is a scalar.
+func IsScalar(v Value) bool {
+ switch v.(type) {
+ case String, Number, Boolean, Null:
+ return true
+ }
+ return false
+}
+
+// Null represents the null value defined by JSON.
+type Null struct{}
+
+var NullValue Value = Null{}
+
+// NullTerm creates a new Term with a Null value.
+func NullTerm() *Term {
+ return &Term{Value: NullValue}
+}
+
+// Equal returns true if the other term Value is also Null.
+func (Null) Equal(other Value) bool {
+ switch other.(type) {
+ case Null:
+ return true
+ default:
+ return false
+ }
+}
+
+// Compare compares null to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (Null) Compare(other Value) int {
+ if _, ok := other.(Null); ok {
+ return 0
+ }
+ return -1
+}
+
+// Find returns the current value or a not found error.
+func (Null) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return NullValue, nil
+ }
+ return nil, errFindNotFound
+}
+
+// Hash returns the hash code for the Value.
+func (Null) Hash() int {
+ return 0
+}
+
+// IsGround always returns true.
+func (Null) IsGround() bool {
+ return true
+}
+
+func (Null) String() string {
+ return "null"
+}
+
+// Boolean represents a boolean value defined by JSON.
+type Boolean bool
+
+// BooleanTerm creates a new Term with a Boolean value.
+func BooleanTerm(b bool) *Term {
+ return &Term{Value: internedBooleanValue(b)}
+}
+
+// Equal returns true if the other Value is a Boolean and is equal.
+func (bol Boolean) Equal(other Value) bool {
+ switch other := other.(type) {
+ case Boolean:
+ return bol == other
+ default:
+ return false
+ }
+}
+
+// Compare compares bol to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (bol Boolean) Compare(other Value) int {
+ switch other := other.(type) {
+ case Boolean:
+ if bol == other {
+ return 0
+ }
+ if !bol {
+ return -1
+ }
+ return 1
+ case Null:
+ return 1
+ }
+
+ return -1
+}
+
+// Find returns the current value or a not found error.
+func (bol Boolean) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return InternedTerm(bool(bol)).Value, nil
+ }
+ return nil, errFindNotFound
+}
+
+// Hash returns the hash code for the Value.
+func (bol Boolean) Hash() int {
+ if bol {
+ return 1
+ }
+ return 0
+}
+
+// IsGround always returns true.
+func (Boolean) IsGround() bool {
+ return true
+}
+
+func (bol Boolean) String() string {
+ return strconv.FormatBool(bool(bol))
+}
+
+// Number represents a numeric value as defined by JSON.
+type Number json.Number
+
+// NumberTerm creates a new Term with a Number value.
+func NumberTerm(n json.Number) *Term {
+ return &Term{Value: Number(n)}
+}
+
+// IntNumberTerm creates a new Term with an integer Number value.
+func IntNumberTerm(i int) *Term {
+ return &Term{Value: newIntNumberValue(i)}
+}
+
+// UIntNumberTerm creates a new Term with an unsigned integer Number value.
+func UIntNumberTerm(u uint64) *Term {
+ return &Term{Value: newUint64NumberValue(u)}
+}
+
+// FloatNumberTerm creates a new Term with a floating point Number value.
+func FloatNumberTerm(f float64) *Term {
+ s := strconv.FormatFloat(f, 'g', -1, 64)
+ return &Term{Value: Number(s)}
+}
+
+// Equal returns true if the other Value is a Number and is equal.
+func (num Number) Equal(other Value) bool {
+ if other, ok := other.(Number); ok {
+ return NumberCompare(num, other) == 0
+ }
+ return false
+}
+
+// Compare compares num to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (num Number) Compare(other Value) int {
+ // Optimize for the common case, as calling Compare allocates on heap.
+ if otherNum, yes := other.(Number); yes {
+ return NumberCompare(num, otherNum)
+ }
+
+ return Compare(num, other)
+}
+
+// Find returns the current value or a not found error.
+func (num Number) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return num, nil
+ }
+ return nil, errFindNotFound
+}
+
+// Hash returns the hash code for the Value.
+func (num Number) Hash() int {
+ if len(num) < 4 {
+ if i, err := strconv.Atoi(string(num)); err == nil {
+ return i
+ }
+ }
+ if f, ok := num.Float64(); ok {
+ return int(f)
+ }
+ return int(xxhash.Sum64String(string(num)))
+}
+
+// Int returns the int representation of num if possible.
+func (num Number) Int() (int, bool) {
+ i64, ok := num.Int64()
+ return int(i64), ok
+}
+
+// Int64 returns the int64 representation of num if possible.
+func (num Number) Int64() (int64, bool) {
+ i, err := json.Number(num).Int64()
+ if err != nil {
+ return 0, false
+ }
+ return i, true
+}
+
+// Float64 returns the float64 representation of num if possible.
+func (num Number) Float64() (float64, bool) {
+ f, err := json.Number(num).Float64()
+ if err != nil {
+ return 0, false
+ }
+ return f, true
+}
+
+// IsGround always returns true.
+func (Number) IsGround() bool {
+ return true
+}
+
+// MarshalJSON returns JSON encoded bytes representing num.
+func (num Number) MarshalJSON() ([]byte, error) {
+ return json.Marshal(json.Number(num))
+}
+
+func (num Number) String() string {
+ return string(num)
+}
+
+func newIntNumberValue(i int) Value {
+ return Number(strconv.Itoa(i))
+}
+
+func newInt64NumberValue(i int64) Value {
+ return Number(strconv.FormatInt(i, 10))
+}
+
+func newUint64NumberValue(u uint64) Value {
+ return Number(strconv.FormatUint(u, 10))
+}
+
+func floatNumber(f float64) Number {
+ return Number(strconv.FormatFloat(f, 'g', -1, 64))
+}
+
+// String represents a string value as defined by JSON.
+type String string
+
+// StringTerm creates a new Term with a String value.
+func StringTerm(s string) *Term {
+ return &Term{Value: String(s)}
+}
+
+// Equal returns true if the other Value is a String and is equal.
+func (str String) Equal(other Value) bool {
+ switch other := other.(type) {
+ case String:
+ return str == other
+ default:
+ return false
+ }
+}
+
+// Compare compares str to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (str String) Compare(other Value) int {
+ // Optimize for the common case of one string being compared to another by
+ // using a direct comparison of values. This avoids the allocation performed
+ // when calling Compare and its any argument conversion.
+ if otherStr, ok := other.(String); ok {
+ if str == otherStr {
+ return 0
+ }
+ if str < otherStr {
+ return -1
+ }
+ return 1
+ }
+
+ return Compare(str, other)
+}
+
+// Find returns the current value or a not found error.
+func (str String) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return str, nil
+ }
+ return nil, errFindNotFound
+}
+
+// IsGround always returns true.
+func (String) IsGround() bool {
+ return true
+}
+
+func (str String) String() string {
+ return strconv.Quote(string(str))
+}
+
+// Hash returns the hash code for the Value.
+func (str String) Hash() int {
+ return int(xxhash.Sum64String(string(str)))
+}
+
+// Var represents a variable as defined by the language.
+type Var string
+
+// VarTerm creates a new Term with a Variable value.
+func VarTerm(v string) *Term {
+ return &Term{Value: Var(v)}
+}
+
+// Equal returns true if the other Value is a Variable and has the same value
+// (name).
+func (v Var) Equal(other Value) bool {
+ switch other := other.(type) {
+ case Var:
+ return v == other
+ default:
+ return false
+ }
+}
+
+// Compare compares v to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (v Var) Compare(other Value) int {
+ if otherVar, ok := other.(Var); ok {
+ return strings.Compare(string(v), string(otherVar))
+ }
+ return Compare(v, other)
+}
+
+// Find returns the current value or a not found error.
+func (v Var) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return v, nil
+ }
+ return nil, errFindNotFound
+}
+
+// Hash returns the hash code for the Value.
+func (v Var) Hash() int {
+ return int(xxhash.Sum64String(string(v)))
+}
+
+// IsGround always returns false.
+func (Var) IsGround() bool {
+ return false
+}
+
+// IsWildcard returns true if this is a wildcard variable.
+func (v Var) IsWildcard() bool {
+ return strings.HasPrefix(string(v), WildcardPrefix)
+}
+
+// IsGenerated returns true if this variable was generated during compilation.
+func (v Var) IsGenerated() bool {
+ return strings.HasPrefix(string(v), "__local")
+}
+
+func (v Var) String() string {
+ // Special case for wildcard so that string representation is parseable. The
+ // parser mangles wildcard variables to make their names unique and uses an
+ // illegal variable name character (WildcardPrefix) to avoid conflicts. When
+ // we serialize the variable here, we need to make sure it's parseable.
+ if v.IsWildcard() {
+ return Wildcard.String()
+ }
+ return string(v)
+}
+
+// Ref represents a reference as defined by the language.
+type Ref []*Term
+
+// EmptyRef returns a new, empty reference.
+func EmptyRef() Ref {
+ return Ref([]*Term{})
+}
+
+// PtrRef returns a new reference against the head for the pointer
+// s. Path components in the pointer are unescaped.
+func PtrRef(head *Term, s string) (Ref, error) {
+ s = strings.Trim(s, "/")
+ if s == "" {
+ return Ref{head}, nil
+ }
+ parts := strings.Split(s, "/")
+ if maxLen := math.MaxInt32; len(parts) >= maxLen {
+ return nil, fmt.Errorf("path too long: %s, %d > %d (max)", s, len(parts), maxLen)
+ }
+ ref := make(Ref, uint(len(parts))+1)
+ ref[0] = head
+ for i := range parts {
+ var err error
+ parts[i], err = url.PathUnescape(parts[i])
+ if err != nil {
+ return nil, err
+ }
+ ref[i+1] = StringTerm(parts[i])
+ }
+ return ref, nil
+}
+
+// RefTerm creates a new Term with a Ref value.
+func RefTerm(r ...*Term) *Term {
+ return &Term{Value: Ref(r)}
+}
+
+// Append returns a copy of ref with the term appended to the end.
+func (ref Ref) Append(term *Term) Ref {
+ n := len(ref)
+ dst := make(Ref, n+1)
+ copy(dst, ref)
+ dst[n] = term
+ return dst
+}
+
+// Insert returns a copy of the ref with x inserted at pos. If pos < len(ref),
+// existing elements are shifted to the right. If pos > len(ref)+1 this
+// function panics.
+func (ref Ref) Insert(x *Term, pos int) Ref {
+ switch {
+ case pos == len(ref):
+ return ref.Append(x)
+ case pos > len(ref)+1:
+ panic("illegal index")
+ }
+ cpy := make(Ref, len(ref)+1)
+ copy(cpy, ref[:pos])
+ cpy[pos] = x
+ copy(cpy[pos+1:], ref[pos:])
+ return cpy
+}
+
+// Extend returns a copy of ref with the terms from other appended. The head of
+// other will be converted to a string.
+func (ref Ref) Extend(other Ref) Ref {
+ dst := make(Ref, len(ref)+len(other))
+ copy(dst, ref)
+
+ head := other[0].Copy()
+ head.Value = String(head.Value.(Var))
+ offset := len(ref)
+ dst[offset] = head
+
+ copy(dst[offset+1:], other[1:])
+ return dst
+}
+
+// Concat returns a ref with the terms appended.
+func (ref Ref) Concat(terms []*Term) Ref {
+ if len(terms) == 0 {
+ return ref
+ }
+ cpy := make(Ref, len(ref)+len(terms))
+ copy(cpy, ref)
+ copy(cpy[len(ref):], terms)
+ return cpy
+}
+
+// Dynamic returns the offset of the first non-constant operand of ref.
+func (ref Ref) Dynamic() int {
+ switch ref[0].Value.(type) {
+ case Call:
+ return 0
+ }
+ for i := 1; i < len(ref); i++ {
+ if !IsConstant(ref[i].Value) {
+ return i
+ }
+ }
+ return -1
+}
+
+// Copy returns a deep copy of ref.
+func (ref Ref) Copy() Ref {
+ return termSliceCopy(ref)
+}
+
+// CopyNonGround returns a new ref with deep copies of the non-ground parts and shallow
+// copies of the ground parts. This is a *much* cheaper operation than Copy for operations
+// that only intend to modify (e.g. plug) the non-ground parts. The head element of the ref
+// is always shallow copied.
+func (ref Ref) CopyNonGround() Ref {
+ cpy := make(Ref, len(ref))
+ cpy[0] = ref[0]
+
+ for i := 1; i < len(ref); i++ {
+ if ref[i].Value.IsGround() {
+ cpy[i] = ref[i]
+ } else {
+ cpy[i] = ref[i].Copy()
+ }
+ }
+
+ return cpy
+}
+
+// Equal returns true if ref is equal to other.
+func (ref Ref) Equal(other Value) bool {
+ switch o := other.(type) {
+ case Ref:
+ if len(ref) == len(o) {
+ for i := range ref {
+ if !ref[i].Equal(o[i]) {
+ return false
+ }
+ }
+
+ return true
+ }
+ }
+
+ return false
+}
+
+// Compare compares ref to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (ref Ref) Compare(other Value) int {
+ if o, ok := other.(Ref); ok {
+ return termSliceCompare(ref, o)
+ }
+
+ return Compare(ref, other)
+}
+
+// Find returns the current value or a "not found" error.
+func (ref Ref) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return ref, nil
+ }
+ return nil, errFindNotFound
+}
+
+// Hash returns the hash code for the Value.
+func (ref Ref) Hash() int {
+ return termSliceHash(ref)
+}
+
+// HasPrefix returns true if the other ref is a prefix of this ref.
+func (ref Ref) HasPrefix(other Ref) bool {
+ if len(other) > len(ref) {
+ return false
+ }
+ for i := range other {
+ if !ref[i].Equal(other[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// ConstantPrefix returns the constant portion of the ref starting from the head.
+func (ref Ref) ConstantPrefix() Ref {
+ i := ref.Dynamic()
+ if i < 0 {
+ return ref.Copy()
+ }
+ return ref[:i].Copy()
+}
+
+func (ref Ref) StringPrefix() Ref {
+ for i := 1; i < len(ref); i++ {
+ switch ref[i].Value.(type) {
+ case String: // pass
+ default: // cut off
+ return ref[:i].Copy()
+ }
+ }
+
+ return ref.Copy()
+}
+
+// GroundPrefix returns the ground portion of the ref starting from the head. By
+// definition, the head of the reference is always ground.
+func (ref Ref) GroundPrefix() Ref {
+ if ref.IsGround() {
+ return ref
+ }
+
+ prefix := make(Ref, 0, len(ref))
+
+ for i, x := range ref {
+ if i > 0 && !x.IsGround() {
+ break
+ }
+ prefix = append(prefix, x)
+ }
+
+ return prefix
+}
+
+func (ref Ref) DynamicSuffix() Ref {
+ i := ref.Dynamic()
+ if i < 0 {
+ return nil
+ }
+ return ref[i:]
+}
+
+// IsGround returns true if all of the parts of the Ref are ground.
+func (ref Ref) IsGround() bool {
+ if len(ref) == 0 {
+ return true
+ }
+ return termSliceIsGround(ref[1:])
+}
+
+// IsNested returns true if this ref contains other Refs.
+func (ref Ref) IsNested() bool {
+ for _, x := range ref {
+ if _, ok := x.Value.(Ref); ok {
+ return true
+ }
+ }
+ return false
+}
+
+// Ptr returns a slash-separated path string for this ref. If the ref
+// contains non-string terms this function returns an error. Path
+// components are escaped.
+func (ref Ref) Ptr() (string, error) {
+ parts := make([]string, 0, len(ref)-1)
+ for _, term := range ref[1:] {
+ if str, ok := term.Value.(String); ok {
+ parts = append(parts, url.PathEscape(string(str)))
+ } else {
+ return "", errors.New("invalid path value type")
+ }
+ }
+ return strings.Join(parts, "/"), nil
+}
+
+var varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$")
+
+func IsVarCompatibleString(s string) bool {
+ return varRegexp.MatchString(s)
+}
+
+var bbPool = &sync.Pool{
+ New: func() any {
+ return new(bytes.Buffer)
+ },
+}
+
+func (ref Ref) String() string {
+ // Note(anderseknert):
+ // Options tried in the order of cheapness, where after some effort,
+ // only the last option now requires a (single) allocation:
+ // 1. empty ref
+ // 2. single var ref
+ // 3. built-in function ref
+ // 4. concatenated parts
+ reflen := len(ref)
+ if reflen == 0 {
+ return ""
+ }
+ if reflen == 1 {
+ return ref[0].Value.String()
+ }
+ if name, ok := BuiltinNameFromRef(ref); ok {
+ return name
+ }
+
+ _var := ref[0].Value.String()
+
+ bb := bbPool.Get().(*bytes.Buffer)
+ bb.Reset()
+
+ defer bbPool.Put(bb)
+
+ bb.Grow(len(_var) + len(ref[1:])*7) // rough estimate
+ bb.WriteString(_var)
+
+ for _, p := range ref[1:] {
+ switch p := p.Value.(type) {
+ case String:
+ str := string(p)
+ if IsVarCompatibleString(str) && !IsKeyword(str) {
+ bb.WriteByte('.')
+ bb.WriteString(str)
+ } else {
+ bb.WriteByte('[')
+ // Determine whether we need the full JSON-escaped form
+ if strings.ContainsFunc(str, isControlOrBackslash) {
+ bb.Write(strconv.AppendQuote(bb.AvailableBuffer(), str))
+ } else {
+ bb.WriteByte('"')
+ bb.WriteString(str)
+ bb.WriteByte('"')
+ }
+ bb.WriteByte(']')
+ }
+ default:
+ bb.WriteByte('[')
+ bb.WriteString(p.String())
+ bb.WriteByte(']')
+ }
+ }
+
+ return bb.String()
+}
+
+// OutputVars returns a VarSet containing variables that would be bound by evaluating
+// this expression in isolation.
+func (ref Ref) OutputVars() VarSet {
+ vis := NewVarVisitor().WithParams(VarVisitorParams{SkipRefHead: true})
+ vis.WalkRef(ref)
+ return vis.Vars()
+}
+
+func (ref Ref) toArray() *Array {
+ terms := make([]*Term, 0, len(ref))
+ for _, term := range ref {
+ if _, ok := term.Value.(String); ok {
+ terms = append(terms, term)
+ } else {
+ terms = append(terms, InternedTerm(term.Value.String()))
+ }
+ }
+ return NewArray(terms...)
+}
+
+// QueryIterator defines the interface for querying AST documents with references.
+type QueryIterator func(map[Var]Value, Value) error
+
+// ArrayTerm creates a new Term with an Array value.
+func ArrayTerm(a ...*Term) *Term {
+ return NewTerm(NewArray(a...))
+}
+
+// NewArray creates an Array with the terms provided. The array will
+// use the provided term slice.
+func NewArray(a ...*Term) *Array {
+ hs := make([]int, len(a))
+ for i, e := range a {
+ hs[i] = e.Value.Hash()
+ }
+ arr := &Array{elems: a, hashs: hs, ground: termSliceIsGround(a)}
+ arr.rehash()
+ return arr
+}
+
+// Array represents an array as defined by the language. Arrays are similar to the
+// same types as defined by JSON with the exception that they can contain Vars
+// and References.
+type Array struct {
+ elems []*Term
+ hashs []int // element hashes
+ hash int
+ ground bool
+}
+
+// Copy returns a deep copy of arr.
+func (arr *Array) Copy() *Array {
+ cpy := make([]int, len(arr.elems))
+ copy(cpy, arr.hashs)
+ return &Array{
+ elems: termSliceCopy(arr.elems),
+ hashs: cpy,
+ hash: arr.hash,
+ ground: arr.IsGround()}
+}
+
+// Equal returns true if arr is equal to other.
+func (arr *Array) Equal(other Value) bool {
+ if arr == other {
+ return true
+ }
+
+ if other, ok := other.(*Array); ok && len(arr.elems) == len(other.elems) {
+ for i := range arr.elems {
+ if !arr.elems[i].Equal(other.elems[i]) {
+ return false
+ }
+ }
+ return true
+ }
+
+ return false
+}
+
+// Compare compares arr to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (arr *Array) Compare(other Value) int {
+ if b, ok := other.(*Array); ok {
+ return termSliceCompare(arr.elems, b.elems)
+ }
+
+ sortA := sortOrder(arr)
+ sortB := sortOrder(other)
+
+ if sortA < sortB {
+ return -1
+ } else if sortB < sortA {
+ return 1
+ }
+
+ return Compare(arr, other)
+}
+
+// Find returns the value at the index or an out-of-range error.
+func (arr *Array) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return arr, nil
+ }
+ num, ok := path[0].Value.(Number)
+ if !ok {
+ return nil, errFindNotFound
+ }
+ i, ok := num.Int()
+ if !ok || i < 0 || i >= arr.Len() {
+ return nil, errFindNotFound
+ }
+
+ term := arr.Elem(i)
+ // Using Find on scalar values costs an allocation (type -> Value conversion)
+ // and since we already have the Value here, we can avoid that.
+ if len(path) == 1 && IsScalar(term.Value) {
+ return term.Value, nil
+ }
+
+ return term.Value.Find(path[1:])
+}
+
+// Get returns the element at pos or nil if not possible.
+func (arr *Array) Get(pos *Term) *Term {
+ num, ok := pos.Value.(Number)
+ if !ok {
+ return nil
+ }
+
+ if i, ok := num.Int(); ok && i >= 0 && i < len(arr.elems) {
+ return arr.elems[i]
+ }
+
+ return nil
+}
+
+// Sorted returns a new Array that contains the sorted elements of arr.
+func (arr *Array) Sorted() *Array {
+ cpy := make([]*Term, len(arr.elems))
+ for i := range cpy {
+ cpy[i] = arr.elems[i]
+ }
+
+ slices.SortFunc(cpy, TermValueCompare)
+
+ a := NewArray(cpy...)
+ a.hashs = arr.hashs
+ return a
+}
+
+// Hash returns the hash code for the Value.
+func (arr *Array) Hash() int {
+ return arr.hash
+}
+
+// IsGround returns true if all of the Array elements are ground.
+func (arr *Array) IsGround() bool {
+ return arr.ground
+}
+
+// MarshalJSON returns JSON encoded bytes representing arr.
+func (arr *Array) MarshalJSON() ([]byte, error) {
+ if len(arr.elems) == 0 {
+ return []byte(`[]`), nil
+ }
+ return json.Marshal(arr.elems)
+}
+
+func (arr *Array) String() string {
+ sb := sbPool.Get()
+ sb.Grow(len(arr.elems) * 16)
+
+ defer sbPool.Put(sb)
+
+ sb.WriteByte('[')
+ for i, e := range arr.elems {
+ if i > 0 {
+ sb.WriteString(", ")
+ }
+ sb.WriteString(e.String())
+ }
+ sb.WriteByte(']')
+
+ return sb.String()
+}
+
+// Len returns the number of elements in the array.
+func (arr *Array) Len() int {
+ return len(arr.elems)
+}
+
+// Elem returns the element i of arr.
+func (arr *Array) Elem(i int) *Term {
+ return arr.elems[i]
+}
+
+// Set sets the element i of arr.
+func (arr *Array) Set(i int, v *Term) {
+ arr.set(i, v)
+}
+
+// rehash updates the cached hash of arr.
+func (arr *Array) rehash() {
+ arr.hash = 0
+ for _, h := range arr.hashs {
+ arr.hash += h
+ }
+}
+
+// set sets the element i of arr.
+func (arr *Array) set(i int, v *Term) {
+ arr.ground = arr.ground && v.IsGround()
+ arr.elems[i] = v
+ arr.hashs[i] = v.Value.Hash()
+ arr.rehash()
+}
+
+// Slice returns a slice of arr starting from i index to j. -1
+// indicates the end of the array. The returned value array is not a
+// copy and any modifications to either of arrays may be reflected to
+// the other.
+func (arr *Array) Slice(i, j int) *Array {
+ var elems []*Term
+ var hashs []int
+ if j == -1 {
+ elems = arr.elems[i:]
+ hashs = arr.hashs[i:]
+ } else {
+ elems = arr.elems[i:j]
+ hashs = arr.hashs[i:j]
+ }
+ // If arr is ground, the slice is, too.
+ // If it's not, the slice could still be.
+ gr := arr.ground || termSliceIsGround(elems)
+
+ s := &Array{elems: elems, hashs: hashs, ground: gr}
+ s.rehash()
+ return s
+}
+
+// Iter calls f on each element in arr. If f returns an error,
+// iteration stops and the return value is the error.
+func (arr *Array) Iter(f func(*Term) error) error {
+ for i := range arr.elems {
+ if err := f(arr.elems[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Until calls f on each element in arr. If f returns true, iteration stops.
+func (arr *Array) Until(f func(*Term) bool) bool {
+ return slices.ContainsFunc(arr.elems, f)
+}
+
+// Foreach calls f on each element in arr.
+func (arr *Array) Foreach(f func(*Term)) {
+ for _, term := range arr.elems {
+ f(term)
+ }
+}
+
+// Append appends a term to arr, returning the appended array.
+func (arr *Array) Append(v *Term) *Array {
+ cpy := *arr
+ cpy.elems = append(arr.elems, v)
+ cpy.hashs = append(arr.hashs, v.Value.Hash())
+ cpy.hash = arr.hash + v.Value.Hash()
+ cpy.ground = arr.ground && v.IsGround()
+ return &cpy
+}
+
+// Set represents a set as defined by the language.
+type Set interface {
+ Value
+ Len() int
+ Copy() Set
+ Diff(Set) Set
+ Intersect(Set) Set
+ Union(Set) Set
+ Add(*Term)
+ Iter(func(*Term) error) error
+ Until(func(*Term) bool) bool
+ Foreach(func(*Term))
+ Contains(*Term) bool
+ Map(func(*Term) (*Term, error)) (Set, error)
+ Reduce(*Term, func(*Term, *Term) (*Term, error)) (*Term, error)
+ Sorted() *Array
+ Slice() []*Term
+}
+
+// NewSet returns a new Set containing t.
+func NewSet(t ...*Term) Set {
+ s := newset(len(t))
+ for _, term := range t {
+ s.insert(term, false)
+ }
+ return s
+}
+
+func newset(n int) *set {
+ var keys []*Term
+ if n > 0 {
+ keys = make([]*Term, 0, n)
+ }
+ return &set{
+ elems: make(map[int]*Term, n),
+ keys: keys,
+ hash: 0,
+ ground: true,
+ sortGuard: sync.Once{},
+ }
+}
+
+// SetTerm returns a new Term representing a set containing terms t.
+func SetTerm(t ...*Term) *Term {
+ set := NewSet(t...)
+ return &Term{
+ Value: set,
+ }
+}
+
+type set struct {
+ elems map[int]*Term
+ keys []*Term
+ hash int
+ ground bool
+ // Prevents race condition around sorting.
+ // We can avoid (the allocation cost of) using a pointer here as all
+ // methods of `set` use a pointer receiver, and the `sync.Once` value
+ // is never copied.
+ sortGuard sync.Once
+}
+
+// Copy returns a deep copy of s.
+func (s *set) Copy() Set {
+ terms := make([]*Term, len(s.keys))
+ for i := range s.keys {
+ terms[i] = s.keys[i].Copy()
+ }
+ cpy := NewSet(terms...).(*set)
+ cpy.hash = s.hash
+ cpy.ground = s.ground
+ return cpy
+}
+
+// IsGround returns true if all terms in s are ground.
+func (s *set) IsGround() bool {
+ return s.ground
+}
+
+// Hash returns a hash code for s.
+func (s *set) Hash() int {
+ return s.hash
+}
+
+func (s *set) String() string {
+ if s.Len() == 0 {
+ return "set()"
+ }
+
+ sb := sbPool.Get()
+ sb.Grow(s.Len() * 16)
+
+ defer sbPool.Put(sb)
+
+ sb.WriteByte('{')
+ for i := range s.sortedKeys() {
+ if i > 0 {
+ sb.WriteString(", ")
+ }
+ sb.WriteString(s.keys[i].Value.String())
+ }
+ sb.WriteByte('}')
+
+ return sb.String()
+}
+
+func (s *set) sortedKeys() []*Term {
+ s.sortGuard.Do(func() {
+ slices.SortFunc(s.keys, TermValueCompare)
+ })
+ return s.keys
+}
+
+// Compare compares s to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (s *set) Compare(other Value) int {
+ o1 := sortOrder(s)
+ o2 := sortOrder(other)
+ if o1 < o2 {
+ return -1
+ } else if o1 > o2 {
+ return 1
+ }
+ t := other.(*set)
+ return termSliceCompare(s.sortedKeys(), t.sortedKeys())
+}
+
+// Find returns the set or dereferences the element itself.
+func (s *set) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return s, nil
+ }
+ if !s.Contains(path[0]) {
+ return nil, errFindNotFound
+ }
+ return path[0].Value.Find(path[1:])
+}
+
+// Diff returns elements in s that are not in other.
+func (s *set) Diff(other Set) Set {
+ if s.Compare(other) == 0 {
+ return NewSet()
+ }
+
+ terms := make([]*Term, 0, len(s.keys))
+ for _, term := range s.sortedKeys() {
+ if !other.Contains(term) {
+ terms = append(terms, term)
+ }
+ }
+
+ return NewSet(terms...)
+}
+
+// Intersect returns the set containing elements in both s and other.
+func (s *set) Intersect(other Set) Set {
+ o := other.(*set)
+ n, m := s.Len(), o.Len()
+ ss := s
+ so := o
+ if m < n {
+ ss = o
+ so = s
+ n = m
+ }
+
+ terms := make([]*Term, 0, n)
+ for _, term := range ss.sortedKeys() {
+ if so.Contains(term) {
+ terms = append(terms, term)
+ }
+ }
+
+ return NewSet(terms...)
+}
+
+// Union returns the set containing all elements of s and other.
+func (s *set) Union(other Set) Set {
+ r := NewSet()
+ s.Foreach(r.Add)
+ other.Foreach(r.Add)
+ return r
+}
+
+// Add updates s to include t.
+func (s *set) Add(t *Term) {
+ s.insert(t, true)
+}
+
+// Iter calls f on each element in s. If f returns an error, iteration stops
+// and the return value is the error.
+func (s *set) Iter(f func(*Term) error) error {
+ for _, term := range s.sortedKeys() {
+ if err := f(term); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Until calls f on each element in s. If f returns true, iteration stops.
+func (s *set) Until(f func(*Term) bool) bool {
+ return slices.ContainsFunc(s.sortedKeys(), f)
+}
+
+// Foreach calls f on each element in s.
+func (s *set) Foreach(f func(*Term)) {
+ for _, term := range s.sortedKeys() {
+ f(term)
+ }
+}
+
+// Map returns a new Set obtained by applying f to each value in s.
+func (s *set) Map(f func(*Term) (*Term, error)) (Set, error) {
+ mapped := make([]*Term, 0, len(s.keys))
+ for _, x := range s.sortedKeys() {
+ term, err := f(x)
+ if err != nil {
+ return nil, err
+ }
+ mapped = append(mapped, term)
+ }
+ return NewSet(mapped...), nil
+}
+
+// Reduce returns a Term produced by applying f to each value in s. The first
+// argument to f is the reduced value (starting with i) and the second argument
+// to f is the element in s.
+func (s *set) Reduce(i *Term, f func(*Term, *Term) (*Term, error)) (*Term, error) {
+ err := s.Iter(func(x *Term) error {
+ var err error
+ i, err = f(i, x)
+ if err != nil {
+ return err
+ }
+ return nil
+ })
+ return i, err
+}
+
+// Contains returns true if t is in s.
+func (s *set) Contains(t *Term) bool {
+ return s.get(t) != nil
+}
+
+// Len returns the number of elements in the set.
+func (s *set) Len() int {
+ return len(s.keys)
+}
+
+// MarshalJSON returns JSON encoded bytes representing s.
+func (s *set) MarshalJSON() ([]byte, error) {
+ if s.keys == nil {
+ return []byte(`[]`), nil
+ }
+ return json.Marshal(s.sortedKeys())
+}
+
+// Sorted returns an Array that contains the sorted elements of s.
+func (s *set) Sorted() *Array {
+ cpy := make([]*Term, len(s.keys))
+ copy(cpy, s.sortedKeys())
+ return NewArray(cpy...)
+}
+
+// Slice returns a slice of terms contained in the set.
+func (s *set) Slice() []*Term {
+ return s.sortedKeys()
+}
+
+// NOTE(philipc): We assume a many-readers, single-writer model here.
+// This method should NOT be used concurrently, or else we risk data races.
+func (s *set) insert(x *Term, resetSortGuard bool) {
+ hash := x.Hash()
+ insertHash := hash
+
+ for curr, ok := s.elems[insertHash]; ok; {
+ if KeyHashEqual(curr.Value, x.Value) {
+ return
+ }
+
+ insertHash++
+ curr, ok = s.elems[insertHash]
+ }
+
+ s.elems[insertHash] = x
+ // O(1) insertion, but we'll have to re-sort the keys later.
+ s.keys = append(s.keys, x)
+
+ if resetSortGuard {
+ // Reset the sync.Once instance.
+ // See https://github.com/golang/go/issues/25955 for why we do it this way.
+ // Note that this will always be the case when external code calls insert via
+ // Add, or otherwise. Internal code may however benefit from not having to
+ // re-create this pointer when it's known not to be needed.
+ s.sortGuard = sync.Once{}
+ }
+
+ s.hash += hash
+ s.ground = s.ground && x.IsGround()
+}
+
+func (s *set) get(x *Term) *Term {
+ if len(s.elems) == 0 {
+ return nil
+ }
+
+ hash := x.Hash()
+
+ for curr, ok := s.elems[hash]; ok; {
+ // Pointer equality check first
+ if curr == x {
+ return curr
+ }
+ if KeyHashEqual(curr.Value, x.Value) {
+ return curr
+ }
+
+ hash++
+ curr, ok = s.elems[hash]
+ }
+ return nil
+}
+
+// Object represents an object as defined by the language.
+type Object interface {
+ Value
+ Len() int
+ Get(*Term) *Term
+ Copy() Object
+ Insert(*Term, *Term)
+ Iter(func(*Term, *Term) error) error
+ Until(func(*Term, *Term) bool) bool
+ Foreach(func(*Term, *Term))
+ Map(func(*Term, *Term) (*Term, *Term, error)) (Object, error)
+ Diff(other Object) Object
+ Intersect(other Object) [][3]*Term
+ Merge(other Object) (Object, bool)
+ MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool)
+ Filter(filter Object) (Object, error)
+ Keys() []*Term
+ KeysIterator() ObjectKeysIterator
+ get(k *Term) *objectElem // To prevent external implementations
+}
+
+// NewObject creates a new Object with t.
+func NewObject(t ...[2]*Term) Object {
+ obj := newobject(len(t))
+ for i := range t {
+ obj.insert(t[i][0], t[i][1], false)
+ }
+ return obj
+}
+
+// ObjectTerm creates a new Term with an Object value.
+func ObjectTerm(o ...[2]*Term) *Term {
+ return &Term{Value: NewObject(o...)}
+}
+
+func LazyObject(blob map[string]any) Object {
+ return &lazyObj{native: blob, cache: map[string]Value{}}
+}
+
+type lazyObj struct {
+ strict Object
+ cache map[string]Value
+ native map[string]any
+}
+
+func (l *lazyObj) force() Object {
+ if l.strict == nil {
+ l.strict = MustInterfaceToValue(l.native).(Object)
+ // NOTE(jf): a possible performance improvement here would be to check how many
+ // entries have been realized to AST in the cache, and if some threshold compared to the
+ // total number of keys is exceeded, realize the remaining entries and set l.strict to l.cache.
+ l.cache = map[string]Value{} // We don't need the cache anymore; drop it to free up memory.
+ }
+ return l.strict
+}
+
+func (l *lazyObj) Compare(other Value) int {
+ o1 := sortOrder(l)
+ o2 := sortOrder(other)
+ if o1 < o2 {
+ return -1
+ } else if o2 < o1 {
+ return 1
+ }
+ return l.force().Compare(other)
+}
+
+func (l *lazyObj) Copy() Object {
+ return l
+}
+
+func (l *lazyObj) Diff(other Object) Object {
+ return l.force().Diff(other)
+}
+
+func (l *lazyObj) Intersect(other Object) [][3]*Term {
+ return l.force().Intersect(other)
+}
+
+func (l *lazyObj) Iter(f func(*Term, *Term) error) error {
+ return l.force().Iter(f)
+}
+
+func (l *lazyObj) Until(f func(*Term, *Term) bool) bool {
+ // NOTE(sr): there could be benefits in not forcing here -- if we abort because
+ // `f` returns true, we could save us from converting the rest of the object.
+ return l.force().Until(f)
+}
+
+func (l *lazyObj) Foreach(f func(*Term, *Term)) {
+ l.force().Foreach(f)
+}
+
+func (l *lazyObj) Filter(filter Object) (Object, error) {
+ return l.force().Filter(filter)
+}
+
+func (l *lazyObj) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, error) {
+ return l.force().Map(f)
+}
+
+func (l *lazyObj) MarshalJSON() ([]byte, error) {
+ return l.force().(*object).MarshalJSON()
+}
+
+func (l *lazyObj) Merge(other Object) (Object, bool) {
+ return l.force().Merge(other)
+}
+
+func (l *lazyObj) MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) {
+ return l.force().MergeWith(other, conflictResolver)
+}
+
+func (l *lazyObj) Len() int {
+ return len(l.native)
+}
+
+func (l *lazyObj) String() string {
+ return l.force().String()
+}
+
+// get is merely there to implement the Object interface -- `get` there serves the
+// purpose of prohibiting external implementations. It's never called for lazyObj.
+func (*lazyObj) get(*Term) *objectElem {
+ return nil
+}
+
+func (l *lazyObj) Get(k *Term) *Term {
+ if l.strict != nil {
+ return l.strict.Get(k)
+ }
+ if s, ok := k.Value.(String); ok {
+ if v, ok := l.cache[string(s)]; ok {
+ return NewTerm(v)
+ }
+
+ if val, ok := l.native[string(s)]; ok {
+ var converted Value
+ switch val := val.(type) {
+ case map[string]any:
+ converted = LazyObject(val)
+ default:
+ converted = MustInterfaceToValue(val)
+ }
+ l.cache[string(s)] = converted
+ return NewTerm(converted)
+ }
+ }
+ return nil
+}
+
+func (l *lazyObj) Insert(k, v *Term) {
+ l.force().Insert(k, v)
+}
+
+func (*lazyObj) IsGround() bool {
+ return true
+}
+
+func (l *lazyObj) Hash() int {
+ return l.force().Hash()
+}
+
+func (l *lazyObj) Keys() []*Term {
+ if l.strict != nil {
+ return l.strict.Keys()
+ }
+ ret := make([]*Term, 0, len(l.native))
+ for k := range l.native {
+ ret = append(ret, StringTerm(k))
+ }
+ slices.SortFunc(ret, TermValueCompare)
+
+ return ret
+}
+
+func (l *lazyObj) KeysIterator() ObjectKeysIterator {
+ return &lazyObjKeysIterator{keys: l.Keys()}
+}
+
+type lazyObjKeysIterator struct {
+ current int
+ keys []*Term
+}
+
+func (ki *lazyObjKeysIterator) Next() (*Term, bool) {
+ if ki.current == len(ki.keys) {
+ return nil, false
+ }
+ ki.current++
+ return ki.keys[ki.current-1], true
+}
+
+func (l *lazyObj) Find(path Ref) (Value, error) {
+ if l.strict != nil {
+ return l.strict.Find(path)
+ }
+ if len(path) == 0 {
+ return l, nil
+ }
+ if p0, ok := path[0].Value.(String); ok {
+ if v, ok := l.cache[string(p0)]; ok {
+ return v.Find(path[1:])
+ }
+
+ if v, ok := l.native[string(p0)]; ok {
+ var converted Value
+ switch v := v.(type) {
+ case map[string]any:
+ converted = LazyObject(v)
+ default:
+ converted = MustInterfaceToValue(v)
+ }
+ l.cache[string(p0)] = converted
+ return converted.Find(path[1:])
+ }
+ }
+ return nil, errFindNotFound
+}
+
+type object struct {
+ elems map[int]*objectElem
+ keys []*objectElem
+ ground int // number of key and value grounds. Counting is required to support insert's key-value replace.
+ hash int
+ sortGuard sync.Once // Prevents race condition around sorting.
+}
+
+func newobject(n int) *object {
+ var keys []*objectElem
+ if n > 0 {
+ keys = make([]*objectElem, 0, n)
+ }
+ return &object{
+ elems: make(map[int]*objectElem, n),
+ keys: keys,
+ sortGuard: sync.Once{},
+ }
+}
+
+type objectElem struct {
+ key *Term
+ value *Term
+ next *objectElem
+}
+
+// Item is a helper for constructing an tuple containing two Terms
+// representing a key/value pair in an Object.
+func Item(key, value *Term) [2]*Term {
+ return [2]*Term{key, value}
+}
+
+func (obj *object) sortedKeys() []*objectElem {
+ obj.sortGuard.Do(func() {
+ slices.SortFunc(obj.keys, func(a, b *objectElem) int {
+ return a.key.Value.Compare(b.key.Value)
+ })
+ })
+ return obj.keys
+}
+
+// Compare compares obj to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (obj *object) Compare(other Value) int {
+ if x, ok := other.(*lazyObj); ok {
+ other = x.force()
+ }
+ o1 := sortOrder(obj)
+ o2 := sortOrder(other)
+ if o1 < o2 {
+ return -1
+ } else if o2 < o1 {
+ return 1
+ }
+ a := obj
+ b := other.(*object)
+ // Ensure that keys are in canonical sorted order before use!
+ akeys := a.sortedKeys()
+ bkeys := b.sortedKeys()
+ minLen := len(akeys)
+ if len(b.keys) < len(akeys) {
+ minLen = len(bkeys)
+ }
+ for i := range minLen {
+ keysCmp := Compare(akeys[i].key, bkeys[i].key)
+ if keysCmp < 0 {
+ return -1
+ }
+ if keysCmp > 0 {
+ return 1
+ }
+ valA := akeys[i].value
+ valB := bkeys[i].value
+ valCmp := Compare(valA, valB)
+ if valCmp != 0 {
+ return valCmp
+ }
+ }
+ if len(akeys) < len(bkeys) {
+ return -1
+ }
+ if len(bkeys) < len(akeys) {
+ return 1
+ }
+ return 0
+}
+
+// Find returns the value at the key or undefined.
+func (obj *object) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return obj, nil
+ }
+ term := obj.Get(path[0])
+ if term == nil {
+ return nil, errFindNotFound
+ }
+ // Using Find on scalar values costs an allocation (type -> Value conversion)
+ // and since we already have the Value here, we can avoid that.
+ if len(path) == 1 && IsScalar(term.Value) {
+ return term.Value, nil
+ }
+
+ return term.Value.Find(path[1:])
+}
+
+func (obj *object) Insert(k, v *Term) {
+ obj.insert(k, v, true)
+}
+
+// Get returns the value of k in obj if k exists, otherwise nil.
+func (obj *object) Get(k *Term) *Term {
+ if len(obj.elems) == 0 {
+ return nil
+ }
+
+ hash := k.Hash()
+ for curr := obj.elems[hash]; curr != nil; curr = curr.next {
+ // Pointer equality check always fastest, and not too unlikely with interning.
+ if curr.key == k {
+ return curr.value
+ }
+
+ if KeyHashEqual(curr.key.Value, k.Value) {
+ return curr.value
+ }
+ }
+ return nil
+}
+
+func KeyHashEqual(x, y Value) bool {
+ switch x := x.(type) {
+ case Null, Boolean, String, Var:
+ return x == y
+ case Number:
+ if y, ok := y.(Number); ok {
+ return x.Equal(y)
+ }
+ }
+
+ return Compare(x, y) == 0
+}
+
+// Hash returns the hash code for the Value.
+func (obj *object) Hash() int {
+ return obj.hash
+}
+
+// IsGround returns true if all of the Object key/value pairs are ground.
+func (obj *object) IsGround() bool {
+ return obj.ground == 2*len(obj.keys)
+}
+
+// Copy returns a deep copy of obj.
+func (obj *object) Copy() Object {
+ cpy, _ := obj.Map(func(k, v *Term) (*Term, *Term, error) {
+ return k.Copy(), v.Copy(), nil
+ })
+ cpy.(*object).hash = obj.hash
+ return cpy
+}
+
+// Diff returns a new Object that contains only the key/value pairs that exist in obj.
+func (obj *object) Diff(other Object) Object {
+ r := newobject(obj.Len())
+ for _, node := range obj.sortedKeys() {
+ if other.Get(node.key) == nil {
+ r.insert(node.key, node.value, false)
+ }
+ }
+ return r
+}
+
+// Intersect returns a slice of term triplets that represent the intersection of keys
+// between obj and other. For each intersecting key, the values from obj and other are included
+// as the last two terms in the triplet (respectively).
+func (obj *object) Intersect(other Object) [][3]*Term {
+ r := [][3]*Term{}
+ obj.Foreach(func(k, v *Term) {
+ if v2 := other.Get(k); v2 != nil {
+ r = append(r, [3]*Term{k, v, v2})
+ }
+ })
+ return r
+}
+
+// Iter calls the function f for each key-value pair in the object. If f
+// returns an error, iteration stops and the error is returned.
+func (obj *object) Iter(f func(*Term, *Term) error) error {
+ for _, node := range obj.sortedKeys() {
+ if err := f(node.key, node.value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Until calls f for each key-value pair in the object. If f returns
+// true, iteration stops and Until returns true. Otherwise, return
+// false.
+func (obj *object) Until(f func(*Term, *Term) bool) bool {
+ for _, node := range obj.sortedKeys() {
+ if f(node.key, node.value) {
+ return true
+ }
+ }
+ return false
+}
+
+// Foreach calls f for each key-value pair in the object.
+func (obj *object) Foreach(f func(*Term, *Term)) {
+ for _, node := range obj.sortedKeys() {
+ f(node.key, node.value)
+ }
+}
+
+// Map returns a new Object constructed by mapping each element in the object
+// using the function f. If f returns an error, the error is returned by Map.
+// If f return a nil key, the element is skipped.
+func (obj *object) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, error) {
+ cpy := newobject(obj.Len())
+ for _, node := range obj.sortedKeys() {
+ k, v, err := f(node.key, node.value)
+ if err != nil {
+ return nil, err
+ }
+ if k != nil {
+ cpy.insert(k, v, false)
+ }
+ }
+ return cpy, nil
+}
+
+// Keys returns the keys of obj.
+func (obj *object) Keys() []*Term {
+ keys := make([]*Term, len(obj.keys))
+
+ for i, elem := range obj.sortedKeys() {
+ keys[i] = elem.key
+ }
+
+ return keys
+}
+
+// Returns an iterator over the obj's keys.
+func (obj *object) KeysIterator() ObjectKeysIterator {
+ return newobjectKeysIterator(obj)
+}
+
+// MarshalJSON returns JSON encoded bytes representing obj.
+func (obj *object) MarshalJSON() ([]byte, error) {
+ sl := make([][2]*Term, obj.Len())
+ for i, node := range obj.sortedKeys() {
+ sl[i] = Item(node.key, node.value)
+ }
+ return json.Marshal(sl)
+}
+
+// Merge returns a new Object containing the non-overlapping keys of obj and other. If there are
+// overlapping keys between obj and other, the values of associated with the keys are merged. Only
+// objects can be merged with other objects. If the values cannot be merged, the second turn value
+// will be false.
+func (obj *object) Merge(other Object) (Object, bool) {
+ return obj.MergeWith(other, func(v1, v2 *Term) (*Term, bool) {
+ obj1, ok1 := v1.Value.(Object)
+ obj2, ok2 := v2.Value.(Object)
+ if !ok1 || !ok2 {
+ return nil, true
+ }
+ obj3, ok := obj1.Merge(obj2)
+ if !ok {
+ return nil, true
+ }
+ return NewTerm(obj3), false
+ })
+}
+
+// MergeWith returns a new Object containing the merged keys of obj and other.
+// If there are overlapping keys between obj and other, the conflictResolver
+// is called. The conflictResolver can return a merged value and a boolean
+// indicating if the merge has failed and should stop.
+func (obj *object) MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) {
+ result := NewObject()
+ stop := obj.Until(func(k, v *Term) bool {
+ v2 := other.Get(k)
+ // The key didn't exist in other, keep the original value
+ if v2 == nil {
+ result.Insert(k, v)
+ return false
+ }
+
+ // The key exists in both, resolve the conflict if possible
+ merged, stop := conflictResolver(v, v2)
+ if !stop {
+ result.Insert(k, merged)
+ }
+ return stop
+ })
+
+ if stop {
+ return nil, false
+ }
+
+ // Copy in any values from other for keys that don't exist in obj
+ other.Foreach(func(k, v *Term) {
+ if v2 := obj.Get(k); v2 == nil {
+ result.Insert(k, v)
+ }
+ })
+ return result, true
+}
+
+// Filter returns a new object from values in obj where the keys are
+// found in filter. Array indices for values can be specified as
+// number strings.
+func (obj *object) Filter(filter Object) (Object, error) {
+ filtered, err := filterObject(obj, filter)
+ if err != nil {
+ return nil, err
+ }
+ return filtered.(Object), nil
+}
+
+// Len returns the number of elements in the object.
+func (obj *object) Len() int {
+ return len(obj.keys)
+}
+
+func (obj *object) String() string {
+ sb := sbPool.Get()
+ sb.Grow(obj.Len() * 32)
+
+ defer sbPool.Put(sb)
+
+ sb.WriteByte('{')
+
+ for i, elem := range obj.sortedKeys() {
+ if i > 0 {
+ sb.WriteString(", ")
+ }
+ sb.WriteString(elem.key.String())
+ sb.WriteString(": ")
+ sb.WriteString(elem.value.String())
+ }
+ sb.WriteByte('}')
+
+ return sb.String()
+}
+
+func (*object) get(*Term) *objectElem {
+ return nil
+}
+
+// NOTE(philipc): We assume a many-readers, single-writer model here.
+// This method should NOT be used concurrently, or else we risk data races.
+func (obj *object) insert(k, v *Term, resetSortGuard bool) {
+ hash := k.Hash()
+ head := obj.elems[hash]
+
+ for curr := head; curr != nil; curr = curr.next {
+ if KeyHashEqual(curr.key.Value, k.Value) {
+ if curr.value.IsGround() {
+ obj.ground--
+ }
+ if v.IsGround() {
+ obj.ground++
+ }
+
+ // Update hash based on the new value
+ curr.value = v
+ obj.elems[hash] = curr
+ obj.hash = 0
+ for ehash := range obj.elems {
+ obj.hash += ehash + obj.elems[ehash].value.Hash()
+ }
+
+ return
+ }
+ }
+
+ obj.elems[hash] = &objectElem{key: k, value: v, next: head}
+ // O(1) insertion, but we'll have to re-sort the keys later.
+ obj.keys = append(obj.keys, obj.elems[hash])
+
+ if resetSortGuard {
+ // Reset the sync.Once instance.
+ // See https://github.com/golang/go/issues/25955 for why we do it this way.
+ // Note that this will always be the case when external code calls insert via
+ // Add, or otherwise. Internal code may however benefit from not having to
+ // re-create this when it's known not to be needed.
+ obj.sortGuard = sync.Once{}
+ }
+
+ obj.hash += hash + v.Hash()
+
+ if k.IsGround() {
+ obj.ground++
+ }
+ if v.IsGround() {
+ obj.ground++
+ }
+}
+
+func filterObject(o Value, filter Value) (Value, error) {
+ if (Null{}).Equal(filter) {
+ return o, nil
+ }
+
+ filteredObj, ok := filter.(*object)
+ if !ok {
+ return nil, fmt.Errorf("invalid filter value %q, expected an object", filter)
+ }
+
+ switch v := o.(type) {
+ case String, Number, Boolean, Null:
+ return o, nil
+ case *Array:
+ values := NewArray()
+ for i := range v.Len() {
+ subFilter := filteredObj.Get(InternedIntegerString(i))
+ if subFilter != nil {
+ filteredValue, err := filterObject(v.Elem(i).Value, subFilter.Value)
+ if err != nil {
+ return nil, err
+ }
+ values = values.Append(NewTerm(filteredValue))
+ }
+ }
+ return values, nil
+ case Set:
+ terms := make([]*Term, 0, v.Len())
+ for _, t := range v.Slice() {
+ if filteredObj.Get(t) != nil {
+ filteredValue, err := filterObject(t.Value, filteredObj.Get(t).Value)
+ if err != nil {
+ return nil, err
+ }
+ terms = append(terms, NewTerm(filteredValue))
+ }
+ }
+ return NewSet(terms...), nil
+ case *object:
+ values := NewObject()
+
+ iterObj := v
+ other := filteredObj
+ if v.Len() < filteredObj.Len() {
+ iterObj = filteredObj
+ other = v
+ }
+
+ err := iterObj.Iter(func(key *Term, _ *Term) error {
+ if other.Get(key) != nil {
+ filteredValue, err := filterObject(v.Get(key).Value, filteredObj.Get(key).Value)
+ if err != nil {
+ return err
+ }
+ values.Insert(key, NewTerm(filteredValue))
+ }
+ return nil
+ })
+ return values, err
+ default:
+ return nil, fmt.Errorf("invalid object value type %q", v)
+ }
+}
+
+// NOTE(philipc): The only way to get an ObjectKeyIterator should be
+// from an Object. This ensures that the iterator can have implementation-
+// specific details internally, with no contracts except to the very
+// limited interface.
+type ObjectKeysIterator interface {
+ Next() (*Term, bool)
+}
+
+type objectKeysIterator struct {
+ obj *object
+ numKeys int
+ index int
+}
+
+func newobjectKeysIterator(o *object) ObjectKeysIterator {
+ return &objectKeysIterator{
+ obj: o,
+ numKeys: o.Len(),
+ index: 0,
+ }
+}
+
+func (oki *objectKeysIterator) Next() (*Term, bool) {
+ if oki.index == oki.numKeys || oki.numKeys == 0 {
+ return nil, false
+ }
+ oki.index++
+ return oki.obj.sortedKeys()[oki.index-1].key, true
+}
+
+// ArrayComprehension represents an array comprehension as defined in the language.
+type ArrayComprehension struct {
+ Term *Term `json:"term"`
+ Body Body `json:"body"`
+}
+
+// ArrayComprehensionTerm creates a new Term with an ArrayComprehension value.
+func ArrayComprehensionTerm(term *Term, body Body) *Term {
+ return &Term{
+ Value: &ArrayComprehension{
+ Term: term,
+ Body: body,
+ },
+ }
+}
+
+// Copy returns a deep copy of ac.
+func (ac *ArrayComprehension) Copy() *ArrayComprehension {
+ cpy := *ac
+ cpy.Body = ac.Body.Copy()
+ cpy.Term = ac.Term.Copy()
+ return &cpy
+}
+
+// Equal returns true if ac is equal to other.
+func (ac *ArrayComprehension) Equal(other Value) bool {
+ return Compare(ac, other) == 0
+}
+
+// Compare compares ac to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (ac *ArrayComprehension) Compare(other Value) int {
+ return Compare(ac, other)
+}
+
+// Find returns the current value or a not found error.
+func (ac *ArrayComprehension) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return ac, nil
+ }
+ return nil, errFindNotFound
+}
+
+// Hash returns the hash code of the Value.
+func (ac *ArrayComprehension) Hash() int {
+ return ac.Term.Hash() + ac.Body.Hash()
+}
+
+// IsGround returns true if the Term and Body are ground.
+func (ac *ArrayComprehension) IsGround() bool {
+ return ac.Term.IsGround() && ac.Body.IsGround()
+}
+
+func (ac *ArrayComprehension) String() string {
+ return "[" + ac.Term.String() + " | " + ac.Body.String() + "]"
+}
+
+// ObjectComprehension represents an object comprehension as defined in the language.
+type ObjectComprehension struct {
+ Key *Term `json:"key"`
+ Value *Term `json:"value"`
+ Body Body `json:"body"`
+}
+
+// ObjectComprehensionTerm creates a new Term with an ObjectComprehension value.
+func ObjectComprehensionTerm(key, value *Term, body Body) *Term {
+ return &Term{
+ Value: &ObjectComprehension{
+ Key: key,
+ Value: value,
+ Body: body,
+ },
+ }
+}
+
+// Copy returns a deep copy of oc.
+func (oc *ObjectComprehension) Copy() *ObjectComprehension {
+ cpy := *oc
+ cpy.Body = oc.Body.Copy()
+ cpy.Key = oc.Key.Copy()
+ cpy.Value = oc.Value.Copy()
+ return &cpy
+}
+
+// Equal returns true if oc is equal to other.
+func (oc *ObjectComprehension) Equal(other Value) bool {
+ return Compare(oc, other) == 0
+}
+
+// Compare compares oc to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (oc *ObjectComprehension) Compare(other Value) int {
+ return Compare(oc, other)
+}
+
+// Find returns the current value or a not found error.
+func (oc *ObjectComprehension) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return oc, nil
+ }
+ return nil, errFindNotFound
+}
+
+// Hash returns the hash code of the Value.
+func (oc *ObjectComprehension) Hash() int {
+ return oc.Key.Hash() + oc.Value.Hash() + oc.Body.Hash()
+}
+
+// IsGround returns true if the Key, Value and Body are ground.
+func (oc *ObjectComprehension) IsGround() bool {
+ return oc.Key.IsGround() && oc.Value.IsGround() && oc.Body.IsGround()
+}
+
+func (oc *ObjectComprehension) String() string {
+ return "{" + oc.Key.String() + ": " + oc.Value.String() + " | " + oc.Body.String() + "}"
+}
+
+// SetComprehension represents a set comprehension as defined in the language.
+type SetComprehension struct {
+ Term *Term `json:"term"`
+ Body Body `json:"body"`
+}
+
+// SetComprehensionTerm creates a new Term with an SetComprehension value.
+func SetComprehensionTerm(term *Term, body Body) *Term {
+ return &Term{
+ Value: &SetComprehension{
+ Term: term,
+ Body: body,
+ },
+ }
+}
+
+// Copy returns a deep copy of sc.
+func (sc *SetComprehension) Copy() *SetComprehension {
+ cpy := *sc
+ cpy.Body = sc.Body.Copy()
+ cpy.Term = sc.Term.Copy()
+ return &cpy
+}
+
+// Equal returns true if sc is equal to other.
+func (sc *SetComprehension) Equal(other Value) bool {
+ return Compare(sc, other) == 0
+}
+
+// Compare compares sc to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (sc *SetComprehension) Compare(other Value) int {
+ return Compare(sc, other)
+}
+
+// Find returns the current value or a not found error.
+func (sc *SetComprehension) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return sc, nil
+ }
+ return nil, errFindNotFound
+}
+
+// Hash returns the hash code of the Value.
+func (sc *SetComprehension) Hash() int {
+ return sc.Term.Hash() + sc.Body.Hash()
+}
+
+// IsGround returns true if the Term and Body are ground.
+func (sc *SetComprehension) IsGround() bool {
+ return sc.Term.IsGround() && sc.Body.IsGround()
+}
+
+func (sc *SetComprehension) String() string {
+ return "{" + sc.Term.String() + " | " + sc.Body.String() + "}"
+}
+
+// Call represents as function call in the language.
+type Call []*Term
+
+// CallTerm returns a new Term with a Call value defined by terms. The first
+// term is the operator and the rest are operands.
+func CallTerm(terms ...*Term) *Term {
+ return NewTerm(Call(terms))
+}
+
+// Copy returns a deep copy of c.
+func (c Call) Copy() Call {
+ return termSliceCopy(c)
+}
+
+// Compare compares c to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (c Call) Compare(other Value) int {
+ return Compare(c, other)
+}
+
+// Find returns the current value or a not found error.
+func (Call) Find(Ref) (Value, error) {
+ return nil, errFindNotFound
+}
+
+// Hash returns the hash code for the Value.
+func (c Call) Hash() int {
+ return termSliceHash(c)
+}
+
+// IsGround returns true if the Value is ground.
+func (c Call) IsGround() bool {
+ return termSliceIsGround(c)
+}
+
+// MakeExpr returns an ew Expr from this call.
+func (c Call) MakeExpr(output *Term) *Expr {
+ terms := []*Term(c)
+ return NewExpr(append(terms, output))
+}
+
+func (c Call) String() string {
+ args := make([]string, len(c)-1)
+ for i := 1; i < len(c); i++ {
+ args[i-1] = c[i].String()
+ }
+ return fmt.Sprintf("%v(%v)", c[0], strings.Join(args, ", "))
+}
+
+func termSliceCopy(a []*Term) []*Term {
+ cpy := make([]*Term, len(a))
+ for i := range a {
+ cpy[i] = a[i].Copy()
+ }
+ return cpy
+}
+
+func termSliceEqual(a, b []*Term) bool {
+ if len(a) == len(b) {
+ for i := range a {
+ if !a[i].Equal(b[i]) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func termSliceHash(a []*Term) int {
+ var hash int
+ for _, v := range a {
+ hash += v.Value.Hash()
+ }
+ return hash
+}
+
+func termSliceIsGround(a []*Term) bool {
+ for _, v := range a {
+ if !v.IsGround() {
+ return false
+ }
+ }
+ return true
+}
+
+// Detect when String() need to use expensive JSON‐escaped form
+func isControlOrBackslash(r rune) bool {
+ return r == '\\' || unicode.IsControl(r)
+}
+
+// NOTE(tsandall): The unmarshalling errors in these functions are not
+// helpful for callers because they do not identify the source of the
+// unmarshalling error. Because OPA doesn't accept JSON describing ASTs
+// from callers, this is acceptable (for now). If that changes in the future,
+// the error messages should be revisited. The current approach focuses
+// on the happy path and treats all errors the same. If better error
+// reporting is needed, the error paths will need to be fleshed out.
+
+func unmarshalBody(b []any) (Body, error) {
+ buf := Body{}
+ for _, e := range b {
+ if m, ok := e.(map[string]any); ok {
+ expr := &Expr{}
+ if err := unmarshalExpr(expr, m); err == nil {
+ buf = append(buf, expr)
+ continue
+ }
+ }
+ goto unmarshal_error
+ }
+ return buf, nil
+unmarshal_error:
+ return nil, errors.New("ast: unable to unmarshal body")
+}
+
+func unmarshalExpr(expr *Expr, v map[string]any) error {
+ if x, ok := v["negated"]; ok {
+ if b, ok := x.(bool); ok {
+ expr.Negated = b
+ } else {
+ return fmt.Errorf("ast: unable to unmarshal negated field with type: %T (expected true or false)", v["negated"])
+ }
+ }
+ if generatedRaw, ok := v["generated"]; ok {
+ if b, ok := generatedRaw.(bool); ok {
+ expr.Generated = b
+ } else {
+ return fmt.Errorf("ast: unable to unmarshal generated field with type: %T (expected true or false)", v["generated"])
+ }
+ }
+
+ if err := unmarshalExprIndex(expr, v); err != nil {
+ return err
+ }
+ switch ts := v["terms"].(type) {
+ case map[string]any:
+ t, err := unmarshalTerm(ts)
+ if err != nil {
+ return err
+ }
+ expr.Terms = t
+ case []any:
+ terms, err := unmarshalTermSlice(ts)
+ if err != nil {
+ return err
+ }
+ expr.Terms = terms
+ default:
+ return fmt.Errorf(`ast: unable to unmarshal terms field with type: %T (expected {"value": ..., "type": ...} or [{"value": ..., "type": ...}, ...])`, v["terms"])
+ }
+ if x, ok := v["with"]; ok {
+ if sl, ok := x.([]any); ok {
+ ws := make([]*With, len(sl))
+ for i := range sl {
+ var err error
+ ws[i], err = unmarshalWith(sl[i])
+ if err != nil {
+ return err
+ }
+ }
+ expr.With = ws
+ }
+ }
+ if loc, ok := v["location"].(map[string]any); ok {
+ expr.Location = &Location{}
+ if err := unmarshalLocation(expr.Location, loc); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func unmarshalLocation(loc *Location, v map[string]any) error {
+ if x, ok := v["file"]; ok {
+ if s, ok := x.(string); ok {
+ loc.File = s
+ } else {
+ return fmt.Errorf("ast: unable to unmarshal file field with type: %T (expected string)", v["file"])
+ }
+ }
+ if x, ok := v["row"]; ok {
+ if n, ok := x.(json.Number); ok {
+ i64, err := n.Int64()
+ if err != nil {
+ return err
+ }
+ loc.Row = int(i64)
+ } else {
+ return fmt.Errorf("ast: unable to unmarshal row field with type: %T (expected number)", v["row"])
+ }
+ }
+ if x, ok := v["col"]; ok {
+ if n, ok := x.(json.Number); ok {
+ i64, err := n.Int64()
+ if err != nil {
+ return err
+ }
+ loc.Col = int(i64)
+ } else {
+ return fmt.Errorf("ast: unable to unmarshal col field with type: %T (expected number)", v["col"])
+ }
+ }
+
+ return nil
+}
+
+func unmarshalExprIndex(expr *Expr, v map[string]any) error {
+ if x, ok := v["index"]; ok {
+ if n, ok := x.(json.Number); ok {
+ i, err := n.Int64()
+ if err == nil {
+ expr.Index = int(i)
+ return nil
+ }
+ }
+ }
+ return fmt.Errorf("ast: unable to unmarshal index field with type: %T (expected integer)", v["index"])
+}
+
+func unmarshalTerm(m map[string]any) (*Term, error) {
+ var term Term
+
+ v, err := unmarshalValue(m)
+ if err != nil {
+ return nil, err
+ }
+ term.Value = v
+
+ if loc, ok := m["location"].(map[string]any); ok {
+ term.Location = &Location{}
+ if err := unmarshalLocation(term.Location, loc); err != nil {
+ return nil, err
+ }
+ }
+
+ return &term, nil
+}
+
+func unmarshalTermSlice(s []any) ([]*Term, error) {
+ buf := []*Term{}
+ for _, x := range s {
+ if m, ok := x.(map[string]any); ok {
+ t, err := unmarshalTerm(m)
+ if err == nil {
+ buf = append(buf, t)
+ continue
+ }
+ return nil, err
+ }
+ return nil, errors.New("ast: unable to unmarshal term")
+ }
+ return buf, nil
+}
+
+func unmarshalTermSliceValue(d map[string]any) ([]*Term, error) {
+ if s, ok := d["value"].([]any); ok {
+ return unmarshalTermSlice(s)
+ }
+ return nil, errors.New(`ast: unable to unmarshal term (expected {"value": [...], "type": ...} where type is one of: ref, array, or set)`)
+}
+
+func unmarshalWith(i any) (*With, error) {
+ if m, ok := i.(map[string]any); ok {
+ tgt, _ := m["target"].(map[string]any)
+ target, err := unmarshalTerm(tgt)
+ if err == nil {
+ val, _ := m["value"].(map[string]any)
+ value, err := unmarshalTerm(val)
+ if err == nil {
+ return &With{
+ Target: target,
+ Value: value,
+ }, nil
+ }
+ return nil, err
+ }
+ return nil, err
+ }
+ return nil, errors.New(`ast: unable to unmarshal with modifier (expected {"target": {...}, "value": {...}})`)
+}
+
+func unmarshalValue(d map[string]any) (Value, error) {
+ v := d["value"]
+ switch d["type"] {
+ case "null":
+ return NullValue, nil
+ case "boolean":
+ if b, ok := v.(bool); ok {
+ return Boolean(b), nil
+ }
+ case "number":
+ if n, ok := v.(json.Number); ok {
+ return Number(n), nil
+ }
+ case "string":
+ if s, ok := v.(string); ok {
+ return String(s), nil
+ }
+ case "var":
+ if s, ok := v.(string); ok {
+ return Var(s), nil
+ }
+ case "ref":
+ if s, err := unmarshalTermSliceValue(d); err == nil {
+ return Ref(s), nil
+ }
+ case "array":
+ if s, err := unmarshalTermSliceValue(d); err == nil {
+ return NewArray(s...), nil
+ }
+ case "set":
+ if s, err := unmarshalTermSliceValue(d); err == nil {
+ return NewSet(s...), nil
+ }
+ case "object":
+ if s, ok := v.([]any); ok {
+ buf := NewObject()
+ for _, x := range s {
+ if i, ok := x.([]any); ok && len(i) == 2 {
+ p, err := unmarshalTermSlice(i)
+ if err == nil {
+ buf.Insert(p[0], p[1])
+ continue
+ }
+ }
+ goto unmarshal_error
+ }
+ return buf, nil
+ }
+ case "arraycomprehension", "setcomprehension":
+ if m, ok := v.(map[string]any); ok {
+ t, ok := m["term"].(map[string]any)
+ if !ok {
+ goto unmarshal_error
+ }
+
+ term, err := unmarshalTerm(t)
+ if err != nil {
+ goto unmarshal_error
+ }
+
+ b, ok := m["body"].([]any)
+ if !ok {
+ goto unmarshal_error
+ }
+
+ body, err := unmarshalBody(b)
+ if err != nil {
+ goto unmarshal_error
+ }
+
+ if d["type"] == "arraycomprehension" {
+ return &ArrayComprehension{Term: term, Body: body}, nil
+ }
+ return &SetComprehension{Term: term, Body: body}, nil
+ }
+ case "objectcomprehension":
+ if m, ok := v.(map[string]any); ok {
+ k, ok := m["key"].(map[string]any)
+ if !ok {
+ goto unmarshal_error
+ }
+
+ key, err := unmarshalTerm(k)
+ if err != nil {
+ goto unmarshal_error
+ }
+
+ v, ok := m["value"].(map[string]any)
+ if !ok {
+ goto unmarshal_error
+ }
+
+ value, err := unmarshalTerm(v)
+ if err != nil {
+ goto unmarshal_error
+ }
+
+ b, ok := m["body"].([]any)
+ if !ok {
+ goto unmarshal_error
+ }
+
+ body, err := unmarshalBody(b)
+ if err != nil {
+ goto unmarshal_error
+ }
+
+ return &ObjectComprehension{Key: key, Value: value, Body: body}, nil
+ }
+ case "call":
+ if s, err := unmarshalTermSliceValue(d); err == nil {
+ return Call(s), nil
+ }
+ }
+unmarshal_error:
+ return nil, errors.New("ast: unable to unmarshal term")
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/transform.go b/vendor/github.com/open-policy-agent/opa/v1/ast/transform.go
new file mode 100644
index 0000000000..197ab6457d
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/transform.go
@@ -0,0 +1,431 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "fmt"
+)
+
+// Transformer defines the interface for transforming AST elements. If the
+// transformer returns nil and does not indicate an error, the AST element will
+// be set to nil and no transformations will be applied to children of the
+// element.
+type Transformer interface {
+ Transform(any) (any, error)
+}
+
+// Transform iterates the AST and calls the Transform function on the
+// Transformer t for x before recursing.
+func Transform(t Transformer, x any) (any, error) {
+
+ if term, ok := x.(*Term); ok {
+ return Transform(t, term.Value)
+ }
+
+ y, err := t.Transform(x)
+ if err != nil {
+ return x, err
+ }
+
+ if y == nil {
+ return nil, nil
+ }
+
+ var ok bool
+ switch y := y.(type) {
+ case *Module:
+ p, err := Transform(t, y.Package)
+ if err != nil {
+ return nil, err
+ }
+ if y.Package, ok = p.(*Package); !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", y.Package, p)
+ }
+ for i := range y.Imports {
+ imp, err := Transform(t, y.Imports[i])
+ if err != nil {
+ return nil, err
+ }
+ if y.Imports[i], ok = imp.(*Import); !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", y.Imports[i], imp)
+ }
+ }
+ for i := range y.Rules {
+ rule, err := Transform(t, y.Rules[i])
+ if err != nil {
+ return nil, err
+ }
+ if y.Rules[i], ok = rule.(*Rule); !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", y.Rules[i], rule)
+ }
+ }
+ for i := range y.Annotations {
+ a, err := Transform(t, y.Annotations[i])
+ if err != nil {
+ return nil, err
+ }
+ if y.Annotations[i], ok = a.(*Annotations); !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", y.Annotations[i], a)
+ }
+ }
+ for i := range y.Comments {
+ comment, err := Transform(t, y.Comments[i])
+ if err != nil {
+ return nil, err
+ }
+ if y.Comments[i], ok = comment.(*Comment); !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", y.Comments[i], comment)
+ }
+ }
+ return y, nil
+ case *Package:
+ ref, err := Transform(t, y.Path)
+ if err != nil {
+ return nil, err
+ }
+ if y.Path, ok = ref.(Ref); !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", y.Path, ref)
+ }
+ return y, nil
+ case *Import:
+ y.Path, err = transformTerm(t, y.Path)
+ if err != nil {
+ return nil, err
+ }
+ if y.Alias, err = transformVar(t, y.Alias); err != nil {
+ return nil, err
+ }
+ return y, nil
+ case *Rule:
+ if y.Head, err = transformHead(t, y.Head); err != nil {
+ return nil, err
+ }
+ if y.Body, err = transformBody(t, y.Body); err != nil {
+ return nil, err
+ }
+ if y.Else != nil {
+ rule, err := Transform(t, y.Else)
+ if err != nil {
+ return nil, err
+ }
+ if y.Else, ok = rule.(*Rule); !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", y.Else, rule)
+ }
+ }
+ return y, nil
+ case *Head:
+ if y.Reference, err = transformRef(t, y.Reference); err != nil {
+ return nil, err
+ }
+ if y.Name, err = transformVar(t, y.Name); err != nil {
+ return nil, err
+ }
+ if y.Args, err = transformArgs(t, y.Args); err != nil {
+ return nil, err
+ }
+ if y.Key != nil {
+ if y.Key, err = transformTerm(t, y.Key); err != nil {
+ return nil, err
+ }
+ }
+ if y.Value != nil {
+ if y.Value, err = transformTerm(t, y.Value); err != nil {
+ return nil, err
+ }
+ }
+ return y, nil
+ case Args:
+ for i := range y {
+ if y[i], err = transformTerm(t, y[i]); err != nil {
+ return nil, err
+ }
+ }
+ return y, nil
+ case Body:
+ for i, e := range y {
+ e, err := Transform(t, e)
+ if err != nil {
+ return nil, err
+ }
+ if y[i], ok = e.(*Expr); !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", y[i], e)
+ }
+ }
+ return y, nil
+ case *Expr:
+ switch ts := y.Terms.(type) {
+ case *SomeDecl:
+ decl, err := Transform(t, ts)
+ if err != nil {
+ return nil, err
+ }
+ if y.Terms, ok = decl.(*SomeDecl); !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", y, decl)
+ }
+ return y, nil
+ case []*Term:
+ for i := range ts {
+ if ts[i], err = transformTerm(t, ts[i]); err != nil {
+ return nil, err
+ }
+ }
+ case *Term:
+ if y.Terms, err = transformTerm(t, ts); err != nil {
+ return nil, err
+ }
+ case *Every:
+ if ts.Key != nil {
+ ts.Key, err = transformTerm(t, ts.Key)
+ if err != nil {
+ return nil, err
+ }
+ }
+ ts.Value, err = transformTerm(t, ts.Value)
+ if err != nil {
+ return nil, err
+ }
+ ts.Domain, err = transformTerm(t, ts.Domain)
+ if err != nil {
+ return nil, err
+ }
+ ts.Body, err = transformBody(t, ts.Body)
+ if err != nil {
+ return nil, err
+ }
+ y.Terms = ts
+ }
+ for i, w := range y.With {
+ w, err := Transform(t, w)
+ if err != nil {
+ return nil, err
+ }
+ if y.With[i], ok = w.(*With); !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", y.With[i], w)
+ }
+ }
+ return y, nil
+ case *With:
+ if y.Target, err = transformTerm(t, y.Target); err != nil {
+ return nil, err
+ }
+ if y.Value, err = transformTerm(t, y.Value); err != nil {
+ return nil, err
+ }
+ return y, nil
+ case Ref:
+ for i, term := range y {
+ if y[i], err = transformTerm(t, term); err != nil {
+ return nil, err
+ }
+ }
+ return y, nil
+ case *object:
+ return y.Map(func(k, v *Term) (*Term, *Term, error) {
+ k, err := transformTerm(t, k)
+ if err != nil {
+ return nil, nil, err
+ }
+ v, err = transformTerm(t, v)
+ if err != nil {
+ return nil, nil, err
+ }
+ return k, v, nil
+ })
+ case *Array:
+ for i := range y.Len() {
+ v, err := transformTerm(t, y.Elem(i))
+ if err != nil {
+ return nil, err
+ }
+ y.set(i, v)
+ }
+ return y, nil
+ case Set:
+ y, err = y.Map(func(term *Term) (*Term, error) {
+ return transformTerm(t, term)
+ })
+ if err != nil {
+ return nil, err
+ }
+ return y, nil
+ case *ArrayComprehension:
+ if y.Term, err = transformTerm(t, y.Term); err != nil {
+ return nil, err
+ }
+ if y.Body, err = transformBody(t, y.Body); err != nil {
+ return nil, err
+ }
+ return y, nil
+ case *ObjectComprehension:
+ if y.Key, err = transformTerm(t, y.Key); err != nil {
+ return nil, err
+ }
+ if y.Value, err = transformTerm(t, y.Value); err != nil {
+ return nil, err
+ }
+ if y.Body, err = transformBody(t, y.Body); err != nil {
+ return nil, err
+ }
+ return y, nil
+ case *SetComprehension:
+ if y.Term, err = transformTerm(t, y.Term); err != nil {
+ return nil, err
+ }
+ if y.Body, err = transformBody(t, y.Body); err != nil {
+ return nil, err
+ }
+ return y, nil
+ case Call:
+ for i := range y {
+ if y[i], err = transformTerm(t, y[i]); err != nil {
+ return nil, err
+ }
+ }
+ return y, nil
+ default:
+ return y, nil
+ }
+}
+
+// TransformRefs calls the function f on all references under x.
+func TransformRefs(x any, f func(Ref) (Value, error)) (any, error) {
+ t := &GenericTransformer{func(x any) (any, error) {
+ if r, ok := x.(Ref); ok {
+ return f(r)
+ }
+ return x, nil
+ }}
+ return Transform(t, x)
+}
+
+// TransformVars calls the function f on all vars under x.
+func TransformVars(x any, f func(Var) (Value, error)) (any, error) {
+ t := &GenericTransformer{func(x any) (any, error) {
+ if v, ok := x.(Var); ok {
+ return f(v)
+ }
+ return x, nil
+ }}
+ return Transform(t, x)
+}
+
+// TransformComprehensions calls the functio nf on all comprehensions under x.
+func TransformComprehensions(x any, f func(any) (Value, error)) (any, error) {
+ t := &GenericTransformer{func(x any) (any, error) {
+ switch x := x.(type) {
+ case *ArrayComprehension:
+ return f(x)
+ case *SetComprehension:
+ return f(x)
+ case *ObjectComprehension:
+ return f(x)
+ }
+ return x, nil
+ }}
+ return Transform(t, x)
+}
+
+// GenericTransformer implements the Transformer interface to provide a utility
+// to transform AST nodes using a closure.
+type GenericTransformer struct {
+ f func(any) (any, error)
+}
+
+// NewGenericTransformer returns a new GenericTransformer that will transform
+// AST nodes using the function f.
+func NewGenericTransformer(f func(x any) (any, error)) *GenericTransformer {
+ return &GenericTransformer{
+ f: f,
+ }
+}
+
+// Transform calls the function f on the GenericTransformer.
+func (t *GenericTransformer) Transform(x any) (any, error) {
+ return t.f(x)
+}
+
+func transformHead(t Transformer, head *Head) (*Head, error) {
+ y, err := Transform(t, head)
+ if err != nil {
+ return nil, err
+ }
+ h, ok := y.(*Head)
+ if !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", head, y)
+ }
+ return h, nil
+}
+
+func transformArgs(t Transformer, args Args) (Args, error) {
+ y, err := Transform(t, args)
+ if err != nil {
+ return nil, err
+ }
+ a, ok := y.(Args)
+ if !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", args, y)
+ }
+ return a, nil
+}
+
+func transformBody(t Transformer, body Body) (Body, error) {
+ y, err := Transform(t, body)
+ if err != nil {
+ return nil, err
+ }
+ r, ok := y.(Body)
+ if !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", body, y)
+ }
+ return r, nil
+}
+
+func transformTerm(t Transformer, term *Term) (*Term, error) {
+ v, err := transformValue(t, term.Value)
+ if err != nil {
+ return nil, err
+ }
+ r := &Term{
+ Value: v,
+ Location: term.Location,
+ }
+ return r, nil
+}
+
+func transformValue(t Transformer, v Value) (Value, error) {
+ v1, err := Transform(t, v)
+ if err != nil {
+ return nil, err
+ }
+ r, ok := v1.(Value)
+ if !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", v, v1)
+ }
+ return r, nil
+}
+
+func transformVar(t Transformer, v Var) (Var, error) {
+ v1, err := Transform(t, v)
+ if err != nil {
+ return "", err
+ }
+ r, ok := v1.(Var)
+ if !ok {
+ return "", fmt.Errorf("illegal transform: %T != %T", v, v1)
+ }
+ return r, nil
+}
+
+func transformRef(t Transformer, r Ref) (Ref, error) {
+ r1, err := Transform(t, r)
+ if err != nil {
+ return nil, err
+ }
+ r2, ok := r1.(Ref)
+ if !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", r, r2)
+ }
+ return r2, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/unify.go b/vendor/github.com/open-policy-agent/opa/v1/ast/unify.go
new file mode 100644
index 0000000000..acbe275c0f
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/unify.go
@@ -0,0 +1,240 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+func isRefSafe(ref Ref, safe VarSet) bool {
+ switch head := ref[0].Value.(type) {
+ case Var:
+ return safe.Contains(head)
+ case Call:
+ return isCallSafe(head, safe)
+ default:
+ for v := range ref[0].Vars() {
+ if !safe.Contains(v) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+func isCallSafe(call Call, safe VarSet) bool {
+ vis := varVisitorPool.Get().WithParams(SafetyCheckVisitorParams)
+ vis.Walk(call)
+ isSafe := vis.Vars().DiffCount(safe) == 0
+ varVisitorPool.Put(vis)
+
+ return isSafe
+}
+
+// Unify returns a set of variables that will be unified when the equality expression defined by
+// terms a and b is evaluated. The unifier assumes that variables in the VarSet safe are already
+// unified.
+func Unify(safe VarSet, a *Term, b *Term) VarSet {
+ u := &unifier{
+ safe: safe,
+ unified: VarSet{},
+ unknown: map[Var]VarSet{},
+ }
+ u.unify(a, b)
+ return u.unified
+}
+
+type unifier struct {
+ safe VarSet
+ unified VarSet
+ unknown map[Var]VarSet
+}
+
+func (u *unifier) isSafe(x Var) bool {
+ return u.safe.Contains(x) || u.unified.Contains(x)
+}
+
+func (u *unifier) unify(a *Term, b *Term) {
+
+ switch a := a.Value.(type) {
+
+ case Var:
+ switch b := b.Value.(type) {
+ case Var:
+ if u.isSafe(b) {
+ u.markSafe(a)
+ } else if u.isSafe(a) {
+ u.markSafe(b)
+ } else {
+ u.markUnknown(a, b)
+ u.markUnknown(b, a)
+ }
+ case *Array, Object:
+ u.unifyAll(a, b)
+ case Ref:
+ if isRefSafe(b, u.safe) {
+ u.markSafe(a)
+ }
+ case Call:
+ if isCallSafe(b, u.safe) {
+ u.markSafe(a)
+ }
+ default:
+ u.markSafe(a)
+ }
+
+ case Ref:
+ if isRefSafe(a, u.safe) {
+ switch b := b.Value.(type) {
+ case Var:
+ u.markSafe(b)
+ case *Array, Object:
+ u.markAllSafe(b)
+ }
+ }
+
+ case Call:
+ if isCallSafe(a, u.safe) {
+ switch b := b.Value.(type) {
+ case Var:
+ u.markSafe(b)
+ case *Array, Object:
+ u.markAllSafe(b)
+ }
+ }
+
+ case *ArrayComprehension:
+ switch b := b.Value.(type) {
+ case Var:
+ u.markSafe(b)
+ case *Array:
+ u.markAllSafe(b)
+ }
+ case *ObjectComprehension:
+ switch b := b.Value.(type) {
+ case Var:
+ u.markSafe(b)
+ case *object:
+ u.markAllSafe(b)
+ }
+ case *SetComprehension:
+ switch b := b.Value.(type) {
+ case Var:
+ u.markSafe(b)
+ }
+
+ case *Array:
+ switch b := b.Value.(type) {
+ case Var:
+ u.unifyAll(b, a)
+ case *ArrayComprehension, *ObjectComprehension, *SetComprehension:
+ u.markAllSafe(a)
+ case Ref:
+ if isRefSafe(b, u.safe) {
+ u.markAllSafe(a)
+ }
+ case Call:
+ if isCallSafe(b, u.safe) {
+ u.markAllSafe(a)
+ }
+ case *Array:
+ if a.Len() == b.Len() {
+ for i := range a.Len() {
+ u.unify(a.Elem(i), b.Elem(i))
+ }
+ }
+ }
+
+ case *object:
+ switch b := b.Value.(type) {
+ case Var:
+ u.unifyAll(b, a)
+ case Ref:
+ if isRefSafe(b, u.safe) {
+ u.markAllSafe(a)
+ }
+ case Call:
+ if isCallSafe(b, u.safe) {
+ u.markAllSafe(a)
+ }
+ case *object:
+ if a.Len() == b.Len() {
+ _ = a.Iter(func(k, v *Term) error {
+ if v2 := b.Get(k); v2 != nil {
+ u.unify(v, v2)
+ }
+ return nil
+ }) // impossible to return error
+ }
+ }
+
+ default:
+ switch b := b.Value.(type) {
+ case Var:
+ u.markSafe(b)
+ }
+ }
+}
+
+func (u *unifier) markAllSafe(x Value) {
+ vis := varVisitorPool.Get().WithParams(VarVisitorParams{
+ SkipRefHead: true,
+ SkipObjectKeys: true,
+ SkipClosures: true,
+ })
+ vis.Walk(x)
+ for v := range vis.Vars() {
+ u.markSafe(v)
+ }
+ varVisitorPool.Put(vis)
+}
+
+func (u *unifier) markSafe(x Var) {
+ u.unified.Add(x)
+
+ // Add dependencies of 'x' to safe set
+ vs := u.unknown[x]
+ delete(u.unknown, x)
+ for v := range vs {
+ u.markSafe(v)
+ }
+
+ // Add dependants of 'x' to safe set if they have no more
+ // dependencies.
+ for v, deps := range u.unknown {
+ if deps.Contains(x) {
+ delete(deps, x)
+ if len(deps) == 0 {
+ u.markSafe(v)
+ }
+ }
+ }
+}
+
+func (u *unifier) markUnknown(a, b Var) {
+ if _, ok := u.unknown[a]; !ok {
+ u.unknown[a] = NewVarSet(b)
+ } else {
+ u.unknown[a].Add(b)
+ }
+}
+
+func (u *unifier) unifyAll(a Var, b Value) {
+ if u.isSafe(a) {
+ u.markAllSafe(b)
+ } else {
+ vis := varVisitorPool.Get().WithParams(VarVisitorParams{
+ SkipRefHead: true,
+ SkipObjectKeys: true,
+ SkipClosures: true,
+ })
+ vis.Walk(b)
+ unsafe := vis.Vars().Diff(u.safe).Diff(u.unified)
+ if len(unsafe) == 0 {
+ u.markSafe(a)
+ } else {
+ for v := range unsafe {
+ u.markUnknown(a, v)
+ }
+ }
+ varVisitorPool.Put(vis)
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/varset.go b/vendor/github.com/open-policy-agent/opa/v1/ast/varset.go
new file mode 100644
index 0000000000..e5bd52ae8c
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/varset.go
@@ -0,0 +1,121 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "fmt"
+ "slices"
+
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+// VarSet represents a set of variables.
+type VarSet map[Var]struct{}
+
+// NewVarSet returns a new VarSet containing the specified variables.
+func NewVarSet(vs ...Var) VarSet {
+ s := make(VarSet, len(vs))
+ for _, v := range vs {
+ s.Add(v)
+ }
+ return s
+}
+
+// NewVarSet returns a new VarSet containing the specified variables.
+func NewVarSetOfSize(size int) VarSet {
+ return make(VarSet, size)
+}
+
+// Add updates the set to include the variable "v".
+func (s VarSet) Add(v Var) {
+ s[v] = struct{}{}
+}
+
+// Contains returns true if the set contains the variable "v".
+func (s VarSet) Contains(v Var) bool {
+ _, ok := s[v]
+ return ok
+}
+
+// Copy returns a shallow copy of the VarSet.
+func (s VarSet) Copy() VarSet {
+ cpy := NewVarSetOfSize(len(s))
+ for v := range s {
+ cpy.Add(v)
+ }
+ return cpy
+}
+
+// Diff returns a VarSet containing variables in s that are not in vs.
+func (s VarSet) Diff(vs VarSet) VarSet {
+ r := NewVarSetOfSize(s.DiffCount(vs))
+ for v := range s {
+ if !vs.Contains(v) {
+ r.Add(v)
+ }
+ }
+ return r
+}
+
+// DiffCount returns the number of variables in s that are not in vs.
+func (s VarSet) DiffCount(vs VarSet) (i int) {
+ for v := range s {
+ if !vs.Contains(v) {
+ i++
+ }
+ }
+ return
+}
+
+// Equal returns true if s contains exactly the same elements as vs.
+func (s VarSet) Equal(vs VarSet) bool {
+ if len(s) != len(vs) {
+ return false
+ }
+ for v := range s {
+ if !vs.Contains(v) {
+ return false
+ }
+ }
+ return true
+}
+
+// Intersect returns a VarSet containing variables in s that are in vs.
+func (s VarSet) Intersect(vs VarSet) VarSet {
+ i := 0
+ for v := range s {
+ if vs.Contains(v) {
+ i++
+ }
+ }
+ r := NewVarSetOfSize(i)
+ for v := range s {
+ if vs.Contains(v) {
+ r.Add(v)
+ }
+ }
+ return r
+}
+
+// Sorted returns a new sorted slice of vars from s.
+func (s VarSet) Sorted() []Var {
+ sorted := make([]Var, 0, len(s))
+ for v := range s {
+ sorted = append(sorted, v)
+ }
+ slices.SortFunc(sorted, VarCompare)
+ return sorted
+}
+
+// Update merges the other VarSet into this VarSet.
+func (s VarSet) Update(vs VarSet) {
+ for v := range vs {
+ s.Add(v)
+ }
+}
+
+func (s VarSet) String() string {
+ return fmt.Sprintf("%v", util.KeysSorted(s))
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/version_index.json b/vendor/github.com/open-policy-agent/opa/v1/ast/version_index.json
similarity index 98%
rename from vendor/github.com/open-policy-agent/opa/ast/version_index.json
rename to vendor/github.com/open-policy-agent/opa/v1/ast/version_index.json
index 718df220f9..b02f785299 100644
--- a/vendor/github.com/open-policy-agent/opa/ast/version_index.json
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/version_index.json
@@ -497,6 +497,13 @@
"PreRelease": "",
"Metadata": ""
},
+ "internal.test_case": {
+ "Major": 1,
+ "Minor": 2,
+ "Patch": 0,
+ "PreRelease": "",
+ "Metadata": ""
+ },
"intersection": {
"Major": 0,
"Minor": 17,
@@ -532,6 +539,13 @@
"PreRelease": "",
"Metadata": ""
},
+ "io.jwt.verify_eddsa": {
+ "Major": 1,
+ "Minor": 8,
+ "Patch": 0,
+ "PreRelease": "",
+ "Metadata": ""
+ },
"io.jwt.verify_es256": {
"Major": 0,
"Minor": 17,
@@ -1395,6 +1409,20 @@
}
},
"features": {
+ "keywords_in_refs": {
+ "Major": 1,
+ "Minor": 6,
+ "Patch": 0,
+ "PreRelease": "",
+ "Metadata": ""
+ },
+ "rego_v1": {
+ "Major": 1,
+ "Minor": 0,
+ "Patch": 0,
+ "PreRelease": "",
+ "Metadata": ""
+ },
"rego_v1_import": {
"Major": 0,
"Minor": 59,
diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/visit.go b/vendor/github.com/open-policy-agent/opa/v1/ast/visit.go
new file mode 100644
index 0000000000..4ae6569ad7
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/ast/visit.go
@@ -0,0 +1,832 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+// Visitor defines the interface for iterating AST elements. The Visit function
+// can return a Visitor w which will be used to visit the children of the AST
+// element v. If the Visit function returns nil, the children will not be
+// visited.
+// Deprecated: use GenericVisitor or another visitor implementation
+type Visitor interface {
+ Visit(v any) (w Visitor)
+}
+
+// BeforeAndAfterVisitor wraps Visitor to provide hooks for being called before
+// and after the AST has been visited.
+// Deprecated: use GenericVisitor or another visitor implementation
+type BeforeAndAfterVisitor interface {
+ Visitor
+ Before(x any)
+ After(x any)
+}
+
+// Walk iterates the AST by calling the Visit function on the Visitor
+// v for x before recursing.
+// Deprecated: use GenericVisitor.Walk
+func Walk(v Visitor, x any) {
+ if bav, ok := v.(BeforeAndAfterVisitor); !ok {
+ walk(v, x)
+ } else {
+ bav.Before(x)
+ defer bav.After(x)
+ walk(bav, x)
+ }
+}
+
+// WalkBeforeAndAfter iterates the AST by calling the Visit function on the
+// Visitor v for x before recursing.
+// Deprecated: use GenericVisitor.Walk
+func WalkBeforeAndAfter(v BeforeAndAfterVisitor, x any) {
+ Walk(v, x)
+}
+
+func walk(v Visitor, x any) {
+ w := v.Visit(x)
+ if w == nil {
+ return
+ }
+ switch x := x.(type) {
+ case *Module:
+ Walk(w, x.Package)
+ for i := range x.Imports {
+ Walk(w, x.Imports[i])
+ }
+ for i := range x.Rules {
+ Walk(w, x.Rules[i])
+ }
+ for i := range x.Annotations {
+ Walk(w, x.Annotations[i])
+ }
+ for i := range x.Comments {
+ Walk(w, x.Comments[i])
+ }
+ case *Package:
+ Walk(w, x.Path)
+ case *Import:
+ Walk(w, x.Path)
+ Walk(w, x.Alias)
+ case *Rule:
+ Walk(w, x.Head)
+ Walk(w, x.Body)
+ if x.Else != nil {
+ Walk(w, x.Else)
+ }
+ case *Head:
+ Walk(w, x.Name)
+ Walk(w, x.Args)
+ if x.Key != nil {
+ Walk(w, x.Key)
+ }
+ if x.Value != nil {
+ Walk(w, x.Value)
+ }
+ case Body:
+ for i := range x {
+ Walk(w, x[i])
+ }
+ case Args:
+ for i := range x {
+ Walk(w, x[i])
+ }
+ case *Expr:
+ switch ts := x.Terms.(type) {
+ case *Term, *SomeDecl, *Every:
+ Walk(w, ts)
+ case []*Term:
+ for i := range ts {
+ Walk(w, ts[i])
+ }
+ }
+ for i := range x.With {
+ Walk(w, x.With[i])
+ }
+ case *With:
+ Walk(w, x.Target)
+ Walk(w, x.Value)
+ case *Term:
+ Walk(w, x.Value)
+ case Ref:
+ for i := range x {
+ Walk(w, x[i])
+ }
+ case *object:
+ x.Foreach(func(k, vv *Term) {
+ Walk(w, k)
+ Walk(w, vv)
+ })
+ case *Array:
+ x.Foreach(func(t *Term) {
+ Walk(w, t)
+ })
+ case Set:
+ x.Foreach(func(t *Term) {
+ Walk(w, t)
+ })
+ case *ArrayComprehension:
+ Walk(w, x.Term)
+ Walk(w, x.Body)
+ case *ObjectComprehension:
+ Walk(w, x.Key)
+ Walk(w, x.Value)
+ Walk(w, x.Body)
+ case *SetComprehension:
+ Walk(w, x.Term)
+ Walk(w, x.Body)
+ case Call:
+ for i := range x {
+ Walk(w, x[i])
+ }
+ case *Every:
+ if x.Key != nil {
+ Walk(w, x.Key)
+ }
+ Walk(w, x.Value)
+ Walk(w, x.Domain)
+ Walk(w, x.Body)
+ case *SomeDecl:
+ for i := range x.Symbols {
+ Walk(w, x.Symbols[i])
+ }
+ }
+}
+
+// WalkVars calls the function f on all vars under x. If the function f
+// returns true, AST nodes under the last node will not be visited.
+func WalkVars(x any, f func(Var) bool) {
+ vis := &GenericVisitor{func(x any) bool {
+ if v, ok := x.(Var); ok {
+ return f(v)
+ }
+ return false
+ }}
+ vis.Walk(x)
+}
+
+// WalkClosures calls the function f on all closures under x. If the function f
+// returns true, AST nodes under the last node will not be visited.
+func WalkClosures(x any, f func(any) bool) {
+ vis := &GenericVisitor{func(x any) bool {
+ switch x := x.(type) {
+ case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *Every:
+ return f(x)
+ }
+ return false
+ }}
+ vis.Walk(x)
+}
+
+// WalkRefs calls the function f on all references under x. If the function f
+// returns true, AST nodes under the last node will not be visited.
+func WalkRefs(x any, f func(Ref) bool) {
+ vis := &GenericVisitor{func(x any) bool {
+ if r, ok := x.(Ref); ok {
+ return f(r)
+ }
+ return false
+ }}
+ vis.Walk(x)
+}
+
+// WalkTerms calls the function f on all terms under x. If the function f
+// returns true, AST nodes under the last node will not be visited.
+func WalkTerms(x any, f func(*Term) bool) {
+ vis := &GenericVisitor{func(x any) bool {
+ if term, ok := x.(*Term); ok {
+ return f(term)
+ }
+ return false
+ }}
+ vis.Walk(x)
+}
+
+// WalkWiths calls the function f on all with modifiers under x. If the function f
+// returns true, AST nodes under the last node will not be visited.
+func WalkWiths(x any, f func(*With) bool) {
+ vis := &GenericVisitor{func(x any) bool {
+ if w, ok := x.(*With); ok {
+ return f(w)
+ }
+ return false
+ }}
+ vis.Walk(x)
+}
+
+// WalkExprs calls the function f on all expressions under x. If the function f
+// returns true, AST nodes under the last node will not be visited.
+func WalkExprs(x any, f func(*Expr) bool) {
+ vis := &GenericVisitor{func(x any) bool {
+ if r, ok := x.(*Expr); ok {
+ return f(r)
+ }
+ return false
+ }}
+ vis.Walk(x)
+}
+
+// WalkBodies calls the function f on all bodies under x. If the function f
+// returns true, AST nodes under the last node will not be visited.
+func WalkBodies(x any, f func(Body) bool) {
+ vis := &GenericVisitor{func(x any) bool {
+ if b, ok := x.(Body); ok {
+ return f(b)
+ }
+ return false
+ }}
+ vis.Walk(x)
+}
+
+// WalkRules calls the function f on all rules under x. If the function f
+// returns true, AST nodes under the last node will not be visited.
+func WalkRules(x any, f func(*Rule) bool) {
+ vis := &GenericVisitor{func(x any) bool {
+ if r, ok := x.(*Rule); ok {
+ stop := f(r)
+ // NOTE(tsandall): since rules cannot be embedded inside of queries
+ // we can stop early if there is no else block.
+ if stop || r.Else == nil {
+ return true
+ }
+ }
+ return false
+ }}
+ vis.Walk(x)
+}
+
+// WalkNodes calls the function f on all nodes under x. If the function f
+// returns true, AST nodes under the last node will not be visited.
+func WalkNodes(x any, f func(Node) bool) {
+ vis := &GenericVisitor{func(x any) bool {
+ if n, ok := x.(Node); ok {
+ return f(n)
+ }
+ return false
+ }}
+ vis.Walk(x)
+}
+
+// GenericVisitor provides a utility to walk over AST nodes using a
+// closure. If the closure returns true, the visitor will not walk
+// over AST nodes under x.
+type GenericVisitor struct {
+ f func(x any) bool
+}
+
+// NewGenericVisitor returns a new GenericVisitor that will invoke the function
+// f on AST nodes.
+func NewGenericVisitor(f func(x any) bool) *GenericVisitor {
+ return &GenericVisitor{f}
+}
+
+// Walk iterates the AST by calling the function f on the
+// GenericVisitor before recursing. Contrary to the generic Walk, this
+// does not require allocating the visitor from heap.
+func (vis *GenericVisitor) Walk(x any) {
+ if vis.f(x) {
+ return
+ }
+
+ switch x := x.(type) {
+ case *Module:
+ vis.Walk(x.Package)
+ for i := range x.Imports {
+ vis.Walk(x.Imports[i])
+ }
+ for i := range x.Rules {
+ vis.Walk(x.Rules[i])
+ }
+ for i := range x.Annotations {
+ vis.Walk(x.Annotations[i])
+ }
+ for i := range x.Comments {
+ vis.Walk(x.Comments[i])
+ }
+ case *Package:
+ vis.Walk(x.Path)
+ case *Import:
+ vis.Walk(x.Path)
+ vis.Walk(x.Alias)
+ case *Rule:
+ vis.Walk(x.Head)
+ vis.Walk(x.Body)
+ if x.Else != nil {
+ vis.Walk(x.Else)
+ }
+ case *Head:
+ vis.Walk(x.Name)
+ vis.Walk(x.Args)
+ if x.Key != nil {
+ vis.Walk(x.Key)
+ }
+ if x.Value != nil {
+ vis.Walk(x.Value)
+ }
+ case Body:
+ for i := range x {
+ vis.Walk(x[i])
+ }
+ case Args:
+ for i := range x {
+ vis.Walk(x[i])
+ }
+ case *Expr:
+ switch ts := x.Terms.(type) {
+ case *Term, *SomeDecl, *Every:
+ vis.Walk(ts)
+ case []*Term:
+ for i := range ts {
+ vis.Walk(ts[i])
+ }
+ }
+ for i := range x.With {
+ vis.Walk(x.With[i])
+ }
+ case *With:
+ vis.Walk(x.Target)
+ vis.Walk(x.Value)
+ case *Term:
+ vis.Walk(x.Value)
+ case Ref:
+ for i := range x {
+ vis.Walk(x[i])
+ }
+ case *object:
+ x.Foreach(func(k, _ *Term) {
+ vis.Walk(k)
+ vis.Walk(x.Get(k))
+ })
+ case Object:
+ for _, k := range x.Keys() {
+ vis.Walk(k)
+ vis.Walk(x.Get(k))
+ }
+ case *Array:
+ for i := range x.Len() {
+ vis.Walk(x.Elem(i))
+ }
+ case Set:
+ xSlice := x.Slice()
+ for i := range xSlice {
+ vis.Walk(xSlice[i])
+ }
+ case *ArrayComprehension:
+ vis.Walk(x.Term)
+ vis.Walk(x.Body)
+ case *ObjectComprehension:
+ vis.Walk(x.Key)
+ vis.Walk(x.Value)
+ vis.Walk(x.Body)
+ case *SetComprehension:
+ vis.Walk(x.Term)
+ vis.Walk(x.Body)
+ case Call:
+ for i := range x {
+ vis.Walk(x[i])
+ }
+ case *Every:
+ if x.Key != nil {
+ vis.Walk(x.Key)
+ }
+ vis.Walk(x.Value)
+ vis.Walk(x.Domain)
+ vis.Walk(x.Body)
+ case *SomeDecl:
+ for i := range x.Symbols {
+ vis.Walk(x.Symbols[i])
+ }
+ }
+}
+
+// BeforeAfterVisitor provides a utility to walk over AST nodes using
+// closures. If the before closure returns true, the visitor will not
+// walk over AST nodes under x. The after closure is invoked always
+// after visiting a node.
+type BeforeAfterVisitor struct {
+ before func(x any) bool
+ after func(x any)
+}
+
+// NewBeforeAfterVisitor returns a new BeforeAndAfterVisitor that
+// will invoke the functions before and after AST nodes.
+func NewBeforeAfterVisitor(before func(x any) bool, after func(x any)) *BeforeAfterVisitor {
+ return &BeforeAfterVisitor{before, after}
+}
+
+// Walk iterates the AST by calling the functions on the
+// BeforeAndAfterVisitor before and after recursing. Contrary to the
+// generic Walk, this does not require allocating the visitor from
+// heap.
+func (vis *BeforeAfterVisitor) Walk(x any) {
+ defer vis.after(x)
+ if vis.before(x) {
+ return
+ }
+
+ switch x := x.(type) {
+ case *Module:
+ vis.Walk(x.Package)
+ for i := range x.Imports {
+ vis.Walk(x.Imports[i])
+ }
+ for i := range x.Rules {
+ vis.Walk(x.Rules[i])
+ }
+ for i := range x.Annotations {
+ vis.Walk(x.Annotations[i])
+ }
+ for i := range x.Comments {
+ vis.Walk(x.Comments[i])
+ }
+ case *Package:
+ vis.Walk(x.Path)
+ case *Import:
+ vis.Walk(x.Path)
+ vis.Walk(x.Alias)
+ case *Rule:
+ vis.Walk(x.Head)
+ vis.Walk(x.Body)
+ if x.Else != nil {
+ vis.Walk(x.Else)
+ }
+ case *Head:
+ if len(x.Reference) > 0 {
+ vis.Walk(x.Reference)
+ } else {
+ vis.Walk(x.Name)
+ if x.Key != nil {
+ vis.Walk(x.Key)
+ }
+ }
+ vis.Walk(x.Args)
+ if x.Value != nil {
+ vis.Walk(x.Value)
+ }
+ case Body:
+ for i := range x {
+ vis.Walk(x[i])
+ }
+ case Args:
+ for i := range x {
+ vis.Walk(x[i])
+ }
+ case *Expr:
+ switch ts := x.Terms.(type) {
+ case *Term, *SomeDecl, *Every:
+ vis.Walk(ts)
+ case []*Term:
+ for i := range ts {
+ vis.Walk(ts[i])
+ }
+ }
+ for i := range x.With {
+ vis.Walk(x.With[i])
+ }
+ case *With:
+ vis.Walk(x.Target)
+ vis.Walk(x.Value)
+ case *Term:
+ vis.Walk(x.Value)
+ case Ref:
+ for i := range x {
+ vis.Walk(x[i])
+ }
+ case *object:
+ x.Foreach(func(k, _ *Term) {
+ vis.Walk(k)
+ vis.Walk(x.Get(k))
+ })
+ case Object:
+ x.Foreach(func(k, _ *Term) {
+ vis.Walk(k)
+ vis.Walk(x.Get(k))
+ })
+ case *Array:
+ x.Foreach(func(t *Term) {
+ vis.Walk(t)
+ })
+ case Set:
+ xSlice := x.Slice()
+ for i := range xSlice {
+ vis.Walk(xSlice[i])
+ }
+ case *ArrayComprehension:
+ vis.Walk(x.Term)
+ vis.Walk(x.Body)
+ case *ObjectComprehension:
+ vis.Walk(x.Key)
+ vis.Walk(x.Value)
+ vis.Walk(x.Body)
+ case *SetComprehension:
+ vis.Walk(x.Term)
+ vis.Walk(x.Body)
+ case Call:
+ for i := range x {
+ vis.Walk(x[i])
+ }
+ case *Every:
+ if x.Key != nil {
+ vis.Walk(x.Key)
+ }
+ vis.Walk(x.Value)
+ vis.Walk(x.Domain)
+ vis.Walk(x.Body)
+ case *SomeDecl:
+ for i := range x.Symbols {
+ vis.Walk(x.Symbols[i])
+ }
+ }
+}
+
+// VarVisitor walks AST nodes under a given node and collects all encountered
+// variables. The collected variables can be controlled by specifying
+// VarVisitorParams when creating the visitor.
+type VarVisitor struct {
+ params VarVisitorParams
+ vars VarSet
+}
+
+// VarVisitorParams contains settings for a VarVisitor.
+type VarVisitorParams struct {
+ SkipRefHead bool
+ SkipRefCallHead bool
+ SkipObjectKeys bool
+ SkipClosures bool
+ SkipWithTarget bool
+ SkipSets bool
+}
+
+// NewVarVisitor returns a new VarVisitor object.
+func NewVarVisitor() *VarVisitor {
+ return &VarVisitor{
+ vars: NewVarSet(),
+ }
+}
+
+// Clear resets the visitor to its initial state, and returns it for chaining.
+func (vis *VarVisitor) Clear() *VarVisitor {
+ vis.params = VarVisitorParams{}
+ clear(vis.vars)
+
+ return vis
+}
+
+// ClearOrNew returns a new VarVisitor if vis is nil, or else a cleared VarVisitor.
+func (vis *VarVisitor) ClearOrNew() *VarVisitor {
+ if vis == nil {
+ return NewVarVisitor()
+ }
+ return vis.Clear()
+}
+
+// WithParams sets the parameters in params on vis.
+func (vis *VarVisitor) WithParams(params VarVisitorParams) *VarVisitor {
+ vis.params = params
+ return vis
+}
+
+// Add adds a variable v to the visitor's set of variables.
+func (vis *VarVisitor) Add(v Var) {
+ if vis.vars == nil {
+ vis.vars = NewVarSet(v)
+ } else {
+ vis.vars.Add(v)
+ }
+}
+
+// Vars returns a VarSet that contains collected vars.
+func (vis *VarVisitor) Vars() VarSet {
+ return vis.vars
+}
+
+// visit determines if the VarVisitor will recurse into x: if it returns `true`,
+// the visitor will _skip_ that branch of the AST
+func (vis *VarVisitor) visit(v any) bool {
+ if vis.params.SkipObjectKeys {
+ if o, ok := v.(Object); ok {
+ o.Foreach(func(_, v *Term) {
+ vis.Walk(v)
+ })
+ return true
+ }
+ }
+ if vis.params.SkipRefHead {
+ if r, ok := v.(Ref); ok {
+ rSlice := r[1:]
+ for i := range rSlice {
+ vis.Walk(rSlice[i])
+ }
+ return true
+ }
+ }
+ if vis.params.SkipClosures {
+ switch v := v.(type) {
+ case *ArrayComprehension, *ObjectComprehension, *SetComprehension:
+ return true
+ case *Expr:
+ if ev, ok := v.Terms.(*Every); ok {
+ vis.Walk(ev.Domain)
+ // We're _not_ walking ev.Body -- that's the closure here
+ return true
+ }
+ }
+ }
+ if vis.params.SkipWithTarget {
+ if v, ok := v.(*With); ok {
+ vis.Walk(v.Value)
+ return true
+ }
+ }
+ if vis.params.SkipSets {
+ if _, ok := v.(Set); ok {
+ return true
+ }
+ }
+ if vis.params.SkipRefCallHead {
+ switch v := v.(type) {
+ case *Expr:
+ if terms, ok := v.Terms.([]*Term); ok {
+ termSlice := terms[0].Value.(Ref)[1:]
+ for i := range termSlice {
+ vis.Walk(termSlice[i])
+ }
+ for i := 1; i < len(terms); i++ {
+ vis.Walk(terms[i])
+ }
+ for i := range v.With {
+ vis.Walk(v.With[i])
+ }
+ return true
+ }
+ case Call:
+ operator := v[0].Value.(Ref)
+ for i := 1; i < len(operator); i++ {
+ vis.Walk(operator[i])
+ }
+ for i := 1; i < len(v); i++ {
+ vis.Walk(v[i])
+ }
+ return true
+ case *With:
+ if ref, ok := v.Target.Value.(Ref); ok {
+ refSlice := ref[1:]
+ for i := range refSlice {
+ vis.Walk(refSlice[i])
+ }
+ }
+ if ref, ok := v.Value.Value.(Ref); ok {
+ refSlice := ref[1:]
+ for i := range refSlice {
+ vis.Walk(refSlice[i])
+ }
+ } else {
+ vis.Walk(v.Value)
+ }
+ return true
+ }
+ }
+ if v, ok := v.(Var); ok {
+ vis.Add(v)
+ }
+ return false
+}
+
+// Walk iterates the AST by calling the function f on the
+// GenericVisitor before recursing. Contrary to the generic Walk, this
+// does not require allocating the visitor from heap.
+func (vis *VarVisitor) Walk(x any) {
+ if vis.visit(x) {
+ return
+ }
+
+ switch x := x.(type) {
+ case *Module:
+ vis.Walk(x.Package)
+ for i := range x.Imports {
+ vis.Walk(x.Imports[i])
+ }
+ for i := range x.Rules {
+ vis.Walk(x.Rules[i])
+ }
+ for i := range x.Comments {
+ vis.Walk(x.Comments[i])
+ }
+ case *Package:
+ vis.WalkRef(x.Path)
+ case *Import:
+ vis.Walk(x.Path)
+ if x.Alias != "" {
+ vis.Add(x.Alias)
+ }
+ case *Rule:
+ vis.Walk(x.Head)
+ vis.WalkBody(x.Body)
+ if x.Else != nil {
+ vis.Walk(x.Else)
+ }
+ case *Head:
+ if len(x.Reference) > 0 {
+ vis.WalkRef(x.Reference)
+ } else {
+ vis.Add(x.Name)
+ if x.Key != nil {
+ vis.Walk(x.Key)
+ }
+ }
+ vis.WalkArgs(x.Args)
+ if x.Value != nil {
+ vis.Walk(x.Value)
+ }
+ case Body:
+ vis.WalkBody(x)
+ case Args:
+ vis.WalkArgs(x)
+ case *Expr:
+ switch ts := x.Terms.(type) {
+ case *Term, *SomeDecl, *Every:
+ vis.Walk(ts)
+ case []*Term:
+ for i := range ts {
+ vis.Walk(ts[i].Value)
+ }
+ }
+ for i := range x.With {
+ vis.Walk(x.With[i])
+ }
+ case *With:
+ vis.Walk(x.Target.Value)
+ vis.Walk(x.Value.Value)
+ case *Term:
+ vis.Walk(x.Value)
+ case Ref:
+ for i := range x {
+ vis.Walk(x[i].Value)
+ }
+ case *object:
+ x.Foreach(func(k, _ *Term) {
+ vis.Walk(k)
+ vis.Walk(x.Get(k))
+ })
+ case *Array:
+ x.Foreach(func(t *Term) {
+ vis.Walk(t)
+ })
+ case Set:
+ xSlice := x.Slice()
+ for i := range xSlice {
+ vis.Walk(xSlice[i])
+ }
+ case *ArrayComprehension:
+ vis.Walk(x.Term.Value)
+ vis.WalkBody(x.Body)
+ case *ObjectComprehension:
+ vis.Walk(x.Key.Value)
+ vis.Walk(x.Value.Value)
+ vis.WalkBody(x.Body)
+ case *SetComprehension:
+ vis.Walk(x.Term.Value)
+ vis.WalkBody(x.Body)
+ case Call:
+ for i := range x {
+ vis.Walk(x[i].Value)
+ }
+ case *Every:
+ if x.Key != nil {
+ vis.Walk(x.Key.Value)
+ }
+ vis.Walk(x.Value)
+ vis.Walk(x.Domain)
+ vis.WalkBody(x.Body)
+ case *SomeDecl:
+ for i := range x.Symbols {
+ vis.Walk(x.Symbols[i])
+ }
+ }
+}
+
+// WalkArgs exists only to avoid the allocation cost of boxing Args to `any` in the VarVisitor.
+// Use it when you know beforehand that the type to walk is Args.
+func (vis *VarVisitor) WalkArgs(x Args) {
+ for i := range x {
+ vis.Walk(x[i].Value)
+ }
+}
+
+// WalkRef exists only to avoid the allocation cost of boxing Ref to `any` in the VarVisitor.
+// Use it when you know beforehand that the type to walk is a Ref.
+func (vis *VarVisitor) WalkRef(ref Ref) {
+ if vis.params.SkipRefHead {
+ ref = ref[1:]
+ }
+ for _, term := range ref {
+ vis.Walk(term.Value)
+ }
+}
+
+// WalkBody exists only to avoid the allocation cost of boxing Body to `any` in the VarVisitor.
+// Use it when you know beforehand that the type to walk is a Body.
+func (vis *VarVisitor) WalkBody(body Body) {
+ for _, expr := range body {
+ vis.Walk(expr)
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go
new file mode 100644
index 0000000000..5b418c360b
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go
@@ -0,0 +1,1845 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package bundle implements bundle loading.
+package bundle
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "maps"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "sync"
+
+ "github.com/gobwas/glob"
+ "github.com/open-policy-agent/opa/internal/file/archive"
+ "github.com/open-policy-agent/opa/internal/merge"
+ "github.com/open-policy-agent/opa/v1/ast"
+ astJSON "github.com/open-policy-agent/opa/v1/ast/json"
+ "github.com/open-policy-agent/opa/v1/format"
+ "github.com/open-policy-agent/opa/v1/metrics"
+ "github.com/open-policy-agent/opa/v1/storage"
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+// Common file extensions and file names.
+const (
+ RegoExt = ".rego"
+ WasmFile = "policy.wasm"
+ PlanFile = "plan.json"
+ ManifestExt = ".manifest"
+ SignaturesFile = "signatures.json"
+ patchFile = "patch.json"
+ dataFile = "data.json"
+ yamlDataFile = "data.yaml"
+ ymlDataFile = "data.yml"
+ defaultHashingAlg = "SHA-256"
+ DefaultSizeLimitBytes = (1024 * 1024 * 1024) // limit bundle reads to 1GB to protect against gzip bombs
+ DeltaBundleType = "delta"
+ SnapshotBundleType = "snapshot"
+)
+
+// Bundle represents a loaded bundle. The bundle can contain data and policies.
+type Bundle struct {
+ Signatures SignaturesConfig
+ Manifest Manifest
+ Data map[string]any
+ Modules []ModuleFile
+ Wasm []byte // Deprecated. Use WasmModules instead
+ WasmModules []WasmModuleFile
+ PlanModules []PlanModuleFile
+ Patch Patch
+ Etag string
+ Raw []Raw
+
+ lazyLoadingMode bool
+ sizeLimitBytes int64
+}
+
+// Raw contains raw bytes representing the bundle's content
+type Raw struct {
+ Path string
+ Value []byte
+ module *ModuleFile
+}
+
+// Patch contains an array of objects wherein each object represents the patch operation to be
+// applied to the bundle data.
+type Patch struct {
+ Data []PatchOperation `json:"data,omitempty"`
+}
+
+// PatchOperation models a single patch operation against a document.
+type PatchOperation struct {
+ Op string `json:"op"`
+ Path string `json:"path"`
+ Value any `json:"value"`
+}
+
+// SignaturesConfig represents an array of JWTs that encapsulate the signatures for the bundle.
+type SignaturesConfig struct {
+ Signatures []string `json:"signatures,omitempty"`
+ Plugin string `json:"plugin,omitempty"`
+}
+
+// isEmpty returns if the SignaturesConfig is empty.
+func (s SignaturesConfig) isEmpty() bool {
+ return reflect.DeepEqual(s, SignaturesConfig{})
+}
+
+// DecodedSignature represents the decoded JWT payload.
+type DecodedSignature struct {
+ Files []FileInfo `json:"files"`
+ KeyID string `json:"keyid"` // Deprecated, use kid in the JWT header instead.
+ Scope string `json:"scope"`
+ IssuedAt int64 `json:"iat"`
+ Issuer string `json:"iss"`
+}
+
+// FileInfo contains the hashing algorithm used, resulting digest etc.
+type FileInfo struct {
+ Name string `json:"name"`
+ Hash string `json:"hash"`
+ Algorithm string `json:"algorithm"`
+}
+
+// NewFile returns a new FileInfo.
+func NewFile(name, hash, alg string) FileInfo {
+ return FileInfo{
+ Name: name,
+ Hash: hash,
+ Algorithm: alg,
+ }
+}
+
+// Manifest represents the manifest from a bundle. The manifest may contain
+// metadata such as the bundle revision.
+type Manifest struct {
+ Revision string `json:"revision"`
+ Roots *[]string `json:"roots,omitempty"`
+ WasmResolvers []WasmResolver `json:"wasm,omitempty"`
+ // RegoVersion is the global Rego version for the bundle described by this Manifest.
+ // The Rego version of individual files can be overridden in FileRegoVersions.
+ // We don't use ast.RegoVersion here, as this iota type's order isn't guaranteed to be stable over time.
+ // We use a pointer so that we can support hand-made bundles that don't have an explicit version appropriately.
+ // E.g. in OPA 0.x if --v1-compatible is used when consuming the bundle, and there is no specified version,
+ // we should default to v1; if --v1-compatible isn't used, we should default to v0. In OPA 1.0, no --x-compatible
+ // flag and no explicit bundle version should default to v1.
+ RegoVersion *int `json:"rego_version,omitempty"`
+ // FileRegoVersions is a map from file paths to Rego versions.
+ // This allows individual files to override the global Rego version specified by RegoVersion.
+ FileRegoVersions map[string]int `json:"file_rego_versions,omitempty"`
+ Metadata map[string]any `json:"metadata,omitempty"`
+
+ compiledFileRegoVersions []fileRegoVersion
+}
+
+type fileRegoVersion struct {
+ path glob.Glob
+ version int
+}
+
+// WasmResolver maps a wasm module to an entrypoint ref.
+type WasmResolver struct {
+ Entrypoint string `json:"entrypoint,omitempty"`
+ Module string `json:"module,omitempty"`
+ Annotations []*ast.Annotations `json:"annotations,omitempty"`
+}
+
+// Init initializes the manifest. If you instantiate a manifest
+// manually, call Init to ensure that the roots are set properly.
+func (m *Manifest) Init() {
+ if m.Roots == nil {
+ defaultRoots := []string{""}
+ m.Roots = &defaultRoots
+ }
+}
+
+// AddRoot adds r to the roots of m. This function is idempotent.
+func (m *Manifest) AddRoot(r string) {
+ m.Init()
+ if !RootPathsContain(*m.Roots, r) {
+ *m.Roots = append(*m.Roots, r)
+ }
+}
+
+func (m *Manifest) SetRegoVersion(v ast.RegoVersion) {
+ m.Init()
+ regoVersion := 0
+ if v == ast.RegoV1 {
+ regoVersion = 1
+ }
+ m.RegoVersion = ®oVersion
+}
+
+// Equal returns true if m is semantically equivalent to other.
+func (m Manifest) Equal(other Manifest) bool {
+
+ // This is safe since both are passed by value.
+ m.Init()
+ other.Init()
+
+ if m.Revision != other.Revision {
+ return false
+ }
+
+ if m.RegoVersion == nil && other.RegoVersion != nil {
+ return false
+ }
+ if m.RegoVersion != nil && other.RegoVersion == nil {
+ return false
+ }
+ if m.RegoVersion != nil && other.RegoVersion != nil && *m.RegoVersion != *other.RegoVersion {
+ return false
+ }
+
+ // If both are nil, or both are empty, we consider them equal.
+ if !(len(m.FileRegoVersions) == 0 && len(other.FileRegoVersions) == 0) &&
+ !reflect.DeepEqual(m.FileRegoVersions, other.FileRegoVersions) {
+ return false
+ }
+
+ if !reflect.DeepEqual(m.Metadata, other.Metadata) {
+ return false
+ }
+
+ return m.equalWasmResolversAndRoots(other)
+}
+
+func (m Manifest) Empty() bool {
+ return m.Equal(Manifest{})
+}
+
+// Copy returns a deep copy of the manifest.
+func (m Manifest) Copy() Manifest {
+ m.Init()
+ roots := make([]string, len(*m.Roots))
+ copy(roots, *m.Roots)
+ m.Roots = &roots
+
+ wasmModules := make([]WasmResolver, len(m.WasmResolvers))
+ copy(wasmModules, m.WasmResolvers)
+ m.WasmResolvers = wasmModules
+
+ metadata := m.Metadata
+
+ if metadata != nil {
+ m.Metadata = make(map[string]any)
+ maps.Copy(m.Metadata, metadata)
+ }
+
+ return m
+}
+
+func (m Manifest) String() string {
+ m.Init()
+ if m.RegoVersion != nil {
+ return fmt.Sprintf("",
+ m.Revision, *m.RegoVersion, *m.Roots, m.WasmResolvers, m.Metadata)
+ }
+ return fmt.Sprintf("",
+ m.Revision, *m.Roots, m.WasmResolvers, m.Metadata)
+}
+
+func (m Manifest) rootSet() stringSet {
+ rs := map[string]struct{}{}
+
+ for _, r := range *m.Roots {
+ rs[r] = struct{}{}
+ }
+
+ return stringSet(rs)
+}
+
+func (m Manifest) equalWasmResolversAndRoots(other Manifest) bool {
+ if len(m.WasmResolvers) != len(other.WasmResolvers) {
+ return false
+ }
+
+ for i := range len(m.WasmResolvers) {
+ if !m.WasmResolvers[i].Equal(&other.WasmResolvers[i]) {
+ return false
+ }
+ }
+
+ return m.rootSet().Equal(other.rootSet())
+}
+
+func (wr *WasmResolver) Equal(other *WasmResolver) bool {
+ if wr == nil && other == nil {
+ return true
+ }
+
+ if wr == nil || other == nil {
+ return false
+ }
+
+ if wr.Module != other.Module {
+ return false
+ }
+
+ if wr.Entrypoint != other.Entrypoint {
+ return false
+ }
+
+ annotLen := len(wr.Annotations)
+ if annotLen != len(other.Annotations) {
+ return false
+ }
+
+ for i := range annotLen {
+ if wr.Annotations[i].Compare(other.Annotations[i]) != 0 {
+ return false
+ }
+ }
+
+ return true
+}
+
+type stringSet map[string]struct{}
+
+func (ss stringSet) Equal(other stringSet) bool {
+ if len(ss) != len(other) {
+ return false
+ }
+ for k := range other {
+ if _, ok := ss[k]; !ok {
+ return false
+ }
+ }
+ return true
+}
+
+func (m *Manifest) validateAndInjectDefaults(b Bundle) error {
+
+ m.Init()
+
+ // Validate roots in bundle.
+ roots := *m.Roots
+
+ // Standardize the roots (no starting or trailing slash)
+ for i := range roots {
+ roots[i] = strings.Trim(roots[i], "/")
+ }
+
+ for i := range len(roots) - 1 {
+ for j := i + 1; j < len(roots); j++ {
+ if RootPathsOverlap(roots[i], roots[j]) {
+ return fmt.Errorf("manifest has overlapped roots: '%v' and '%v'", roots[i], roots[j])
+ }
+ }
+ }
+
+ // Validate modules in bundle.
+ for _, module := range b.Modules {
+ found := false
+ if path, err := module.Parsed.Package.Path.Ptr(); err == nil {
+ found = RootPathsContain(roots, path)
+ }
+ if !found {
+ return fmt.Errorf("manifest roots %v do not permit '%v' in module '%v'", roots, module.Parsed.Package, module.Path)
+ }
+ }
+
+ // Build a set of wasm module entrypoints to validate
+ wasmModuleToEps := map[string]string{}
+ seenEps := map[string]struct{}{}
+ for _, wm := range b.WasmModules {
+ wasmModuleToEps[wm.Path] = ""
+ }
+
+ for _, wmConfig := range b.Manifest.WasmResolvers {
+ _, ok := wasmModuleToEps[wmConfig.Module]
+ if !ok {
+ return fmt.Errorf("manifest references wasm module '%s' but the module file does not exist", wmConfig.Module)
+ }
+
+ // Ensure wasm module entrypoint in within bundle roots
+ if !RootPathsContain(roots, wmConfig.Entrypoint) {
+ return fmt.Errorf("manifest roots %v do not permit '%v' entrypoint for wasm module '%v'", roots, wmConfig.Entrypoint, wmConfig.Module)
+ }
+
+ if _, ok := seenEps[wmConfig.Entrypoint]; ok {
+ return fmt.Errorf("entrypoint '%s' cannot be used by more than one wasm module", wmConfig.Entrypoint)
+ }
+ seenEps[wmConfig.Entrypoint] = struct{}{}
+
+ wasmModuleToEps[wmConfig.Module] = wmConfig.Entrypoint
+ }
+
+ // Validate data patches in bundle.
+ for _, patch := range b.Patch.Data {
+ path := strings.Trim(patch.Path, "/")
+ if !RootPathsContain(roots, path) {
+ return fmt.Errorf("manifest roots %v do not permit data patch at path '%s'", roots, path)
+ }
+ }
+
+ if b.lazyLoadingMode {
+ return nil
+ }
+
+ // Validate data in bundle.
+ return dfs(b.Data, "", func(path string, node any) (bool, error) {
+ path = strings.Trim(path, "/")
+ if RootPathsContain(roots, path) {
+ return true, nil
+ }
+
+ if _, ok := node.(map[string]any); ok {
+ for i := range roots {
+ if RootPathsContain(strings.Split(path, "/"), roots[i]) {
+ return false, nil
+ }
+ }
+ }
+ return false, fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, path)
+ })
+}
+
+// ModuleFile represents a single module contained in a bundle.
+type ModuleFile struct {
+ URL string
+ Path string
+ RelativePath string
+ Raw []byte
+ Parsed *ast.Module
+}
+
+// WasmModuleFile represents a single wasm module contained in a bundle.
+type WasmModuleFile struct {
+ URL string
+ Path string
+ Entrypoints []ast.Ref
+ Raw []byte
+}
+
+// PlanModuleFile represents a single plan module contained in a bundle.
+//
+// NOTE(tsandall): currently the plans are just opaque binary blobs. In the
+// future we could inject the entrypoints so that the plans could be executed
+// inside of OPA proper like we do for Wasm modules.
+type PlanModuleFile struct {
+ URL string
+ Path string
+ Raw []byte
+}
+
+var (
+ pluginMtx sync.Mutex
+
+ // The bundle activator to use by default.
+ bundleExtActivator string
+
+ // The function to use for creating a storage.Store for bundles.
+ BundleExtStore func() storage.Store
+)
+
+// RegisterDefaultBundleActivator sets the default bundle activator for OPA to use for bundle activation.
+// The id must already have been registered with RegisterActivator.
+func RegisterDefaultBundleActivator(id string) {
+ pluginMtx.Lock()
+ defer pluginMtx.Unlock()
+
+ bundleExtActivator = id
+}
+
+// RegisterStoreFunc sets the function to use for creating storage for bundles
+// in OPA. If no function is registered, OPA will use situational defaults to
+// decide on what sort of storage.Store to create when bundle storage is
+// needed. Typically the default is inmem.Store.
+func RegisterStoreFunc(s func() storage.Store) {
+ pluginMtx.Lock()
+ defer pluginMtx.Unlock()
+
+ BundleExtStore = s
+}
+
+// HasExtension returns true if a default bundle activator has been set
+// with RegisterDefaultBundleActivator.
+func HasExtension() bool {
+ pluginMtx.Lock()
+ defer pluginMtx.Unlock()
+
+ return bundleExtActivator != ""
+}
+
+// Reader contains the reader to load the bundle from.
+type Reader struct {
+ loader DirectoryLoader
+ includeManifestInData bool
+ metrics metrics.Metrics
+ baseDir string
+ verificationConfig *VerificationConfig
+ skipVerify bool
+ processAnnotations bool
+ capabilities *ast.Capabilities
+ files map[string]FileInfo // files in the bundle signature payload
+ sizeLimitBytes int64
+ etag string
+ lazyLoadingMode bool
+ name string
+ persist bool
+ regoVersion ast.RegoVersion
+ followSymlinks bool
+}
+
+// NewReader is deprecated. Use NewCustomReader instead.
+func NewReader(r io.Reader) *Reader {
+ return NewCustomReader(NewTarballLoader(r))
+}
+
+// NewCustomReader returns a new Reader configured to use the
+// specified DirectoryLoader.
+func NewCustomReader(loader DirectoryLoader) *Reader {
+ nr := Reader{
+ loader: loader,
+ metrics: metrics.New(),
+ files: make(map[string]FileInfo),
+ sizeLimitBytes: DefaultSizeLimitBytes + 1,
+ lazyLoadingMode: HasExtension(),
+ }
+ return &nr
+}
+
+// IncludeManifestInData sets whether the manifest metadata should be
+// included in the bundle's data.
+func (r *Reader) IncludeManifestInData(includeManifestInData bool) *Reader {
+ r.includeManifestInData = includeManifestInData
+ return r
+}
+
+// WithMetrics sets the metrics object to be used while loading bundles
+func (r *Reader) WithMetrics(m metrics.Metrics) *Reader {
+ r.metrics = m
+ return r
+}
+
+// WithBaseDir sets a base directory for file paths of loaded Rego
+// modules. This will *NOT* affect the loaded path of data files.
+func (r *Reader) WithBaseDir(dir string) *Reader {
+ r.baseDir = dir
+ return r
+}
+
+// WithBundleVerificationConfig sets the key configuration used to verify a signed bundle
+func (r *Reader) WithBundleVerificationConfig(config *VerificationConfig) *Reader {
+ r.verificationConfig = config
+ return r
+}
+
+// WithSkipBundleVerification skips verification of a signed bundle
+func (r *Reader) WithSkipBundleVerification(skipVerify bool) *Reader {
+ r.skipVerify = skipVerify
+ return r
+}
+
+// WithProcessAnnotations enables annotation processing during .rego file parsing.
+func (r *Reader) WithProcessAnnotations(yes bool) *Reader {
+ r.processAnnotations = yes
+ return r
+}
+
+// WithCapabilities sets the supported capabilities when loading the files
+func (r *Reader) WithCapabilities(caps *ast.Capabilities) *Reader {
+ r.capabilities = caps
+ return r
+}
+
+// WithJSONOptions sets the JSON options on the parser (now a no-op).
+//
+// Deprecated: Use SetOptions in the json package instead, where a longer description
+// of why this is deprecated also can be found.
+func (r *Reader) WithJSONOptions(*astJSON.Options) *Reader {
+ return r
+}
+
+// WithSizeLimitBytes sets the size limit to apply to files in the bundle. If files are larger
+// than this, an error will be returned by the reader.
+func (r *Reader) WithSizeLimitBytes(n int64) *Reader {
+ r.sizeLimitBytes = n + 1
+ return r
+}
+
+// WithBundleEtag sets the given etag value on the bundle
+func (r *Reader) WithBundleEtag(etag string) *Reader {
+ r.etag = etag
+ return r
+}
+
+// WithBundleName specifies the bundle name
+func (r *Reader) WithBundleName(name string) *Reader {
+ r.name = name
+ return r
+}
+
+func (r *Reader) WithFollowSymlinks(yes bool) *Reader {
+ r.followSymlinks = yes
+ return r
+}
+
+// WithLazyLoadingMode sets the bundle loading mode. If true,
+// bundles will be read in lazy mode. In this mode, data files in the bundle will not be
+// deserialized and the check to validate that the bundle data does not contain paths
+// outside the bundle's roots will not be performed while reading the bundle.
+func (r *Reader) WithLazyLoadingMode(yes bool) *Reader {
+ r.lazyLoadingMode = yes
+ return r
+}
+
+// WithBundlePersistence specifies if the downloaded bundle will eventually be persisted to disk.
+func (r *Reader) WithBundlePersistence(persist bool) *Reader {
+ r.persist = persist
+ return r
+}
+
+func (r *Reader) WithRegoVersion(version ast.RegoVersion) *Reader {
+ r.regoVersion = version
+ return r
+}
+
+func (r *Reader) ParserOptions() ast.ParserOptions {
+ return ast.ParserOptions{
+ ProcessAnnotation: r.processAnnotations,
+ Capabilities: r.capabilities,
+ RegoVersion: r.regoVersion,
+ }
+}
+
+// Read returns a new Bundle loaded from the reader.
+func (r *Reader) Read() (Bundle, error) {
+
+ var bundle Bundle
+ var descriptors []*Descriptor
+ var err error
+ var raw []Raw
+
+ bundle.Signatures, bundle.Patch, descriptors, err = preProcessBundle(r.loader, r.skipVerify, r.sizeLimitBytes)
+ if err != nil {
+ return bundle, err
+ }
+
+ bundle.lazyLoadingMode = r.lazyLoadingMode
+ bundle.sizeLimitBytes = r.sizeLimitBytes
+
+ if bundle.Type() == SnapshotBundleType {
+ err = r.checkSignaturesAndDescriptors(bundle.Signatures)
+ if err != nil {
+ return bundle, err
+ }
+
+ bundle.Data = map[string]any{}
+ }
+
+ var modules []ModuleFile
+ for _, f := range descriptors {
+ buf, err := readFile(f, r.sizeLimitBytes)
+ if err != nil {
+ return bundle, err
+ }
+
+ // verify the file content
+ if bundle.Type() == SnapshotBundleType && !bundle.Signatures.isEmpty() {
+ path := f.Path()
+ if r.baseDir != "" {
+ path = f.URL()
+ }
+ path = strings.TrimPrefix(path, "/")
+
+ // check if the file is to be excluded from bundle verification
+ if r.isFileExcluded(path) {
+ delete(r.files, path)
+ } else {
+ if err = r.verifyBundleFile(path, buf); err != nil {
+ return bundle, err
+ }
+ }
+ }
+
+ // Normalize the paths to use `/` separators
+ path := filepath.ToSlash(f.Path())
+
+ if strings.HasSuffix(path, RegoExt) {
+ fullPath := r.fullPath(path)
+ bs := buf.Bytes()
+
+ // Modules are parsed after we've had a chance to read the manifest
+ mf := ModuleFile{
+ URL: f.URL(),
+ Path: fullPath,
+ RelativePath: path,
+ Raw: bs,
+ }
+ modules = append(modules, mf)
+
+ if r.lazyLoadingMode {
+ p := fullPath
+ if r.name != "" {
+ p = modulePathWithPrefix(r.name, fullPath)
+ }
+
+ raw = append(raw, Raw{Path: p, Value: bs, module: &mf})
+ }
+ } else if filepath.Base(path) == WasmFile {
+ bundle.WasmModules = append(bundle.WasmModules, WasmModuleFile{
+ URL: f.URL(),
+ Path: r.fullPath(path),
+ Raw: buf.Bytes(),
+ })
+ } else if filepath.Base(path) == PlanFile {
+ bundle.PlanModules = append(bundle.PlanModules, PlanModuleFile{
+ URL: f.URL(),
+ Path: r.fullPath(path),
+ Raw: buf.Bytes(),
+ })
+ } else if filepath.Base(path) == dataFile {
+ if r.lazyLoadingMode {
+ raw = append(raw, Raw{Path: path, Value: buf.Bytes()})
+ continue
+ }
+
+ var value any
+
+ r.metrics.Timer(metrics.RegoDataParse).Start()
+ err := util.UnmarshalJSON(buf.Bytes(), &value)
+ r.metrics.Timer(metrics.RegoDataParse).Stop()
+
+ if err != nil {
+ return bundle, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err)
+ }
+
+ if err := insertValue(&bundle, path, value); err != nil {
+ return bundle, err
+ }
+
+ } else if filepath.Base(path) == yamlDataFile || filepath.Base(path) == ymlDataFile {
+ if r.lazyLoadingMode {
+ raw = append(raw, Raw{Path: path, Value: buf.Bytes()})
+ continue
+ }
+
+ var value any
+
+ r.metrics.Timer(metrics.RegoDataParse).Start()
+ err := util.Unmarshal(buf.Bytes(), &value)
+ r.metrics.Timer(metrics.RegoDataParse).Stop()
+
+ if err != nil {
+ return bundle, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err)
+ }
+
+ if err := insertValue(&bundle, path, value); err != nil {
+ return bundle, err
+ }
+
+ } else if strings.HasSuffix(path, ManifestExt) {
+ if err := util.NewJSONDecoder(&buf).Decode(&bundle.Manifest); err != nil {
+ return bundle, fmt.Errorf("bundle load failed on manifest decode: %w", err)
+ }
+ }
+ }
+
+ // Parse modules
+ popts := r.ParserOptions()
+ popts.RegoVersion = bundle.RegoVersion(popts.EffectiveRegoVersion())
+ for _, mf := range modules {
+ modulePopts := popts
+ if regoVersion, err := bundle.RegoVersionForFile(mf.RelativePath, popts.EffectiveRegoVersion()); err != nil {
+ return bundle, err
+ } else if regoVersion != ast.RegoUndefined {
+ // We don't expect ast.RegoUndefined here, but don't override configured rego-version if we do just to be extra protective
+ modulePopts.RegoVersion = regoVersion
+ }
+ r.metrics.Timer(metrics.RegoModuleParse).Start()
+ mf.Parsed, err = ast.ParseModuleWithOpts(mf.Path, util.ByteSliceToString(mf.Raw), modulePopts)
+ r.metrics.Timer(metrics.RegoModuleParse).Stop()
+ if err != nil {
+ return bundle, err
+ }
+ bundle.Modules = append(bundle.Modules, mf)
+ }
+
+ if bundle.Type() == DeltaBundleType {
+ if len(bundle.Data) != 0 {
+ return bundle, errors.New("delta bundle expected to contain only patch file but data files found")
+ }
+
+ if len(bundle.Modules) != 0 {
+ return bundle, errors.New("delta bundle expected to contain only patch file but policy files found")
+ }
+
+ if len(bundle.WasmModules) != 0 {
+ return bundle, errors.New("delta bundle expected to contain only patch file but wasm files found")
+ }
+
+ if r.persist {
+ return bundle, errors.New("'persist' property is true in config. persisting delta bundle to disk is not supported")
+ }
+ }
+
+ // check if the bundle signatures specify any files that weren't found in the bundle
+ if bundle.Type() == SnapshotBundleType && len(r.files) != 0 {
+ extra := []string{}
+ for k := range r.files {
+ extra = append(extra, k)
+ }
+ return bundle, fmt.Errorf("file(s) %v specified in bundle signatures but not found in the target bundle", extra)
+ }
+
+ if err := bundle.Manifest.validateAndInjectDefaults(bundle); err != nil {
+ return bundle, err
+ }
+
+ // Inject the wasm module entrypoint refs into the WasmModuleFile structs
+ epMap := map[string][]string{}
+ for _, r := range bundle.Manifest.WasmResolvers {
+ epMap[r.Module] = append(epMap[r.Module], r.Entrypoint)
+ }
+ for i := range len(bundle.WasmModules) {
+ entrypoints := epMap[bundle.WasmModules[i].Path]
+ for _, entrypoint := range entrypoints {
+ ref, err := ast.PtrRef(ast.DefaultRootDocument, entrypoint)
+ if err != nil {
+ return bundle, fmt.Errorf("failed to parse wasm module entrypoint '%s': %s", entrypoint, err)
+ }
+ bundle.WasmModules[i].Entrypoints = append(bundle.WasmModules[i].Entrypoints, ref)
+ }
+ }
+
+ if r.includeManifestInData {
+ var metadata map[string]any
+
+ b, err := json.Marshal(&bundle.Manifest)
+ if err != nil {
+ return bundle, fmt.Errorf("bundle load failed on manifest marshal: %w", err)
+ }
+
+ err = util.UnmarshalJSON(b, &metadata)
+ if err != nil {
+ return bundle, fmt.Errorf("bundle load failed on manifest unmarshal: %w", err)
+ }
+
+ // For backwards compatibility always write to the old unnamed manifest path
+ // This will *not* be correct if >1 bundle is in use...
+ if err := bundle.insertData(legacyManifestStoragePath, metadata); err != nil {
+ return bundle, fmt.Errorf("bundle load failed on %v: %w", legacyRevisionStoragePath, err)
+ }
+ }
+
+ bundle.Etag = r.etag
+ bundle.Raw = raw
+
+ return bundle, nil
+}
+
+func (r *Reader) isFileExcluded(path string) bool {
+ for _, e := range r.verificationConfig.Exclude {
+ match, _ := filepath.Match(e, path)
+ if match {
+ return true
+ }
+ }
+ return false
+}
+
+func (r *Reader) checkSignaturesAndDescriptors(signatures SignaturesConfig) error {
+ if r.skipVerify {
+ return nil
+ }
+
+ if signatures.isEmpty() && r.verificationConfig != nil && r.verificationConfig.KeyID != "" {
+ return errors.New("bundle missing .signatures.json file")
+ }
+
+ if !signatures.isEmpty() {
+ if r.verificationConfig == nil {
+ return errors.New("verification key not provided")
+ }
+
+ // verify the JWT signatures included in the `.signatures.json` file
+ if err := r.verifyBundleSignature(signatures); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *Reader) verifyBundleSignature(sc SignaturesConfig) error {
+ var err error
+ r.files, err = VerifyBundleSignature(sc, r.verificationConfig)
+ return err
+}
+
+func (r *Reader) verifyBundleFile(path string, data bytes.Buffer) error {
+ return VerifyBundleFile(path, data, r.files)
+}
+
+func (r *Reader) fullPath(path string) string {
+ if r.baseDir != "" {
+ path = filepath.Join(r.baseDir, path)
+ }
+ return path
+}
+
+// Write is deprecated. Use NewWriter instead.
+func Write(w io.Writer, bundle Bundle) error {
+ return NewWriter(w).
+ UseModulePath(true).
+ DisableFormat(true).
+ Write(bundle)
+}
+
+// Writer implements bundle serialization.
+type Writer struct {
+ usePath bool
+ disableFormat bool
+ w io.Writer
+}
+
+// NewWriter returns a bundle writer that writes to w.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ }
+}
+
+// UseModulePath configures the writer to use the module file path instead of the
+// module file URL during serialization. This is for backwards compatibility.
+func (w *Writer) UseModulePath(yes bool) *Writer {
+ w.usePath = yes
+ return w
+}
+
+// DisableFormat configures the writer to just write out raw bytes instead
+// of formatting modules before serialization.
+func (w *Writer) DisableFormat(yes bool) *Writer {
+ w.disableFormat = yes
+ return w
+}
+
+// Write writes the bundle to the writer's output stream.
+func (w *Writer) Write(bundle Bundle) error {
+ gw := gzip.NewWriter(w.w)
+ tw := tar.NewWriter(gw)
+
+ bundleType := bundle.Type()
+
+ if bundleType == SnapshotBundleType {
+ var buf bytes.Buffer
+
+ if err := json.NewEncoder(&buf).Encode(bundle.Data); err != nil {
+ return err
+ }
+
+ if err := archive.WriteFile(tw, "data.json", buf.Bytes()); err != nil {
+ return err
+ }
+
+ for _, module := range bundle.Modules {
+ path := module.URL
+ if w.usePath {
+ path = module.Path
+ }
+
+ if err := archive.WriteFile(tw, path, module.Raw); err != nil {
+ return err
+ }
+ }
+
+ if err := w.writeWasm(tw, bundle); err != nil {
+ return err
+ }
+
+ if err := writeSignatures(tw, bundle); err != nil {
+ return err
+ }
+
+ if err := w.writePlan(tw, bundle); err != nil {
+ return err
+ }
+ } else if bundleType == DeltaBundleType {
+ if err := writePatch(tw, bundle); err != nil {
+ return err
+ }
+ }
+
+ if err := writeManifest(tw, bundle); err != nil {
+ return err
+ }
+
+ if err := tw.Close(); err != nil {
+ return err
+ }
+
+ return gw.Close()
+}
+
+func (w *Writer) writeWasm(tw *tar.Writer, bundle Bundle) error {
+ for _, wm := range bundle.WasmModules {
+ path := wm.URL
+ if w.usePath {
+ path = wm.Path
+ }
+
+ err := archive.WriteFile(tw, path, wm.Raw)
+ if err != nil {
+ return err
+ }
+ }
+
+ if len(bundle.Wasm) > 0 {
+ err := archive.WriteFile(tw, "/"+WasmFile, bundle.Wasm)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (w *Writer) writePlan(tw *tar.Writer, bundle Bundle) error {
+ for _, wm := range bundle.PlanModules {
+ path := wm.URL
+ if w.usePath {
+ path = wm.Path
+ }
+
+ err := archive.WriteFile(tw, path, wm.Raw)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func writeManifest(tw *tar.Writer, bundle Bundle) error {
+
+ if bundle.Manifest.Empty() {
+ return nil
+ }
+
+ var buf bytes.Buffer
+
+ if err := json.NewEncoder(&buf).Encode(bundle.Manifest); err != nil {
+ return err
+ }
+
+ return archive.WriteFile(tw, ManifestExt, buf.Bytes())
+}
+
+func writePatch(tw *tar.Writer, bundle Bundle) error {
+
+ var buf bytes.Buffer
+
+ if err := json.NewEncoder(&buf).Encode(bundle.Patch); err != nil {
+ return err
+ }
+
+ return archive.WriteFile(tw, patchFile, buf.Bytes())
+}
+
+func writeSignatures(tw *tar.Writer, bundle Bundle) error {
+
+ if bundle.Signatures.isEmpty() {
+ return nil
+ }
+
+ bs, err := json.MarshalIndent(bundle.Signatures, "", " ")
+ if err != nil {
+ return err
+ }
+
+ return archive.WriteFile(tw, fmt.Sprintf(".%v", SignaturesFile), bs)
+}
+
+func hashBundleFiles(hash SignatureHasher, b *Bundle) ([]FileInfo, error) {
+
+ files := []FileInfo{}
+
+ bs, err := hash.HashFile(b.Data)
+ if err != nil {
+ return files, err
+ }
+ files = append(files, NewFile(strings.TrimPrefix("data.json", "/"), hex.EncodeToString(bs), defaultHashingAlg))
+
+ if len(b.Wasm) != 0 {
+ bs, err := hash.HashFile(b.Wasm)
+ if err != nil {
+ return files, err
+ }
+ files = append(files, NewFile(strings.TrimPrefix(WasmFile, "/"), hex.EncodeToString(bs), defaultHashingAlg))
+ }
+
+ for _, wasmModule := range b.WasmModules {
+ bs, err := hash.HashFile(wasmModule.Raw)
+ if err != nil {
+ return files, err
+ }
+ files = append(files, NewFile(strings.TrimPrefix(wasmModule.Path, "/"), hex.EncodeToString(bs), defaultHashingAlg))
+ }
+
+ for _, planmodule := range b.PlanModules {
+ bs, err := hash.HashFile(planmodule.Raw)
+ if err != nil {
+ return files, err
+ }
+ files = append(files, NewFile(strings.TrimPrefix(planmodule.Path, "/"), hex.EncodeToString(bs), defaultHashingAlg))
+ }
+
+ // If the manifest is essentially empty, don't add it to the signatures since it
+ // won't be written to the bundle. Otherwise:
+ // parse the manifest into a JSON structure;
+ // then recursively order the fields of all objects alphabetically and then apply
+ // the hash function to result to compute the hash.
+ if !b.Manifest.Empty() {
+ mbs, err := json.Marshal(b.Manifest)
+ if err != nil {
+ return files, err
+ }
+
+ var result map[string]any
+ if err := util.Unmarshal(mbs, &result); err != nil {
+ return files, err
+ }
+
+ bs, err = hash.HashFile(result)
+ if err != nil {
+ return files, err
+ }
+
+ files = append(files, NewFile(strings.TrimPrefix(ManifestExt, "/"), hex.EncodeToString(bs), defaultHashingAlg))
+ }
+
+ return files, err
+}
+
+// FormatModules formats Rego modules
+// Modules will be formatted to comply with [ast.DefaultRegoVersion], but Rego compatibility of individual parsed modules will be respected (e.g. if 'rego.v1' is imported).
+func (b *Bundle) FormatModules(useModulePath bool) error {
+ return b.FormatModulesForRegoVersion(ast.DefaultRegoVersion, true, useModulePath)
+}
+
+// FormatModulesForRegoVersion formats Rego modules to comply with a given Rego version
+func (b *Bundle) FormatModulesForRegoVersion(version ast.RegoVersion, preserveModuleRegoVersion bool, useModulePath bool) error {
+ return b.FormatModulesWithOptions(BundleFormatOptions{
+ RegoVersion: version,
+ PreserveModuleRegoVersion: preserveModuleRegoVersion,
+ UseModulePath: useModulePath,
+ })
+}
+
+type BundleFormatOptions struct {
+ RegoVersion ast.RegoVersion
+ Capabilities *ast.Capabilities
+ PreserveModuleRegoVersion bool
+ UseModulePath bool
+}
+
+// FormatModulesWithOptions formats Rego modules with the given options.
+func (b *Bundle) FormatModulesWithOptions(opts BundleFormatOptions) error {
+ var err error
+
+ for i, module := range b.Modules {
+ fmtOpts := format.Opts{
+ RegoVersion: opts.RegoVersion,
+ Capabilities: opts.Capabilities,
+ }
+
+ if module.Parsed != nil {
+ fmtOpts.ParserOptions = &ast.ParserOptions{
+ RegoVersion: module.Parsed.RegoVersion(),
+ }
+ if opts.PreserveModuleRegoVersion {
+ fmtOpts.RegoVersion = module.Parsed.RegoVersion()
+ }
+ }
+
+ if fmtOpts.Capabilities == nil {
+ fmtOpts.Capabilities = ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(fmtOpts.RegoVersion))
+ }
+
+ if module.Raw == nil {
+ module.Raw, err = format.AstWithOpts(module.Parsed, fmtOpts)
+ if err != nil {
+ return err
+ }
+ } else {
+ p := module.URL
+ if opts.UseModulePath {
+ p = module.Path
+ }
+
+ module.Raw, err = format.SourceWithOpts(p, module.Raw, fmtOpts)
+ if err != nil {
+ return err
+ }
+ }
+ b.Modules[i].Raw = module.Raw
+ }
+ return nil
+}
+
+// GenerateSignature generates the signature for the given bundle.
+func (b *Bundle) GenerateSignature(signingConfig *SigningConfig, keyID string, useModulePath bool) error {
+
+ hash, err := NewSignatureHasher(HashingAlgorithm(defaultHashingAlg))
+ if err != nil {
+ return err
+ }
+
+ files := []FileInfo{}
+
+ for _, module := range b.Modules {
+ bytes, err := hash.HashFile(module.Raw)
+ if err != nil {
+ return err
+ }
+
+ path := module.URL
+ if useModulePath {
+ path = module.Path
+ }
+ files = append(files, NewFile(strings.TrimPrefix(path, "/"), hex.EncodeToString(bytes), defaultHashingAlg))
+ }
+
+ result, err := hashBundleFiles(hash, b)
+ if err != nil {
+ return err
+ }
+ files = append(files, result...)
+
+ // generate signed token
+ token, err := GenerateSignedToken(files, signingConfig, keyID)
+ if err != nil {
+ return err
+ }
+
+ if b.Signatures.isEmpty() {
+ b.Signatures = SignaturesConfig{}
+ }
+
+ if signingConfig.Plugin != "" {
+ b.Signatures.Plugin = signingConfig.Plugin
+ }
+
+ b.Signatures.Signatures = []string{token}
+
+ return nil
+}
+
+// ParsedModules returns a map of parsed modules with names that are
+// unique and human readable for the given a bundle name.
+func (b *Bundle) ParsedModules(bundleName string) map[string]*ast.Module {
+
+ mods := make(map[string]*ast.Module, len(b.Modules))
+
+ for _, mf := range b.Modules {
+ mods[modulePathWithPrefix(bundleName, mf.Path)] = mf.Parsed
+ }
+
+ return mods
+}
+
+func (b *Bundle) RegoVersion(def ast.RegoVersion) ast.RegoVersion {
+ if v := b.Manifest.RegoVersion; v != nil {
+ if *v == 0 {
+ return ast.RegoV0
+ } else if *v == 1 {
+ return ast.RegoV1
+ }
+ }
+ return def
+}
+
+func (b *Bundle) SetRegoVersion(v ast.RegoVersion) {
+ b.Manifest.SetRegoVersion(v)
+}
+
+// RegoVersionForFile returns the rego-version for the specified file path.
+// If there is no defined version for the given path, the default version def is returned.
+// If the version does not correspond to ast.RegoV0 or ast.RegoV1, an error is returned.
+func (b *Bundle) RegoVersionForFile(path string, def ast.RegoVersion) (ast.RegoVersion, error) {
+ version, err := b.Manifest.numericRegoVersionForFile(path)
+ if err != nil {
+ return def, err
+ } else if version == nil {
+ return def, nil
+ } else if *version == 0 {
+ return ast.RegoV0, nil
+ } else if *version == 1 {
+ return ast.RegoV1, nil
+ }
+ return def, fmt.Errorf("unknown bundle rego-version %d for file '%s'", *version, path)
+}
+
+func (m *Manifest) RegoVersionForFile(path string) (ast.RegoVersion, error) {
+ v, err := m.numericRegoVersionForFile(path)
+ if err != nil {
+ return ast.RegoUndefined, err
+ }
+
+ if v == nil {
+ return ast.RegoUndefined, nil
+ }
+
+ return ast.RegoVersionFromInt(*v), nil
+}
+
+func (m *Manifest) numericRegoVersionForFile(path string) (*int, error) {
+ var version *int
+
+ if len(m.FileRegoVersions) != len(m.compiledFileRegoVersions) {
+ m.compiledFileRegoVersions = make([]fileRegoVersion, 0, len(m.FileRegoVersions))
+ for pattern, v := range m.FileRegoVersions {
+ compiled, err := glob.Compile(pattern)
+ if err != nil {
+ return nil, fmt.Errorf("failed to compile glob pattern %s: %s", pattern, err)
+ }
+ m.compiledFileRegoVersions = append(m.compiledFileRegoVersions, fileRegoVersion{compiled, v})
+ }
+ }
+
+ for _, fv := range m.compiledFileRegoVersions {
+ if fv.path.Match(path) {
+ version = &fv.version
+ break
+ }
+ }
+
+ if version == nil {
+ version = m.RegoVersion
+ }
+ return version, nil
+}
+
+// Equal returns true if this bundle's contents equal the other bundle's
+// contents.
+func (b Bundle) Equal(other Bundle) bool {
+ if !reflect.DeepEqual(b.Data, other.Data) {
+ return false
+ }
+
+ if len(b.Modules) != len(other.Modules) {
+ return false
+ }
+ for i := range b.Modules {
+ // To support bundles built from rootless filesystems we ignore a "/" prefix
+ // for URLs and Paths, such that "/file" and "file" are equivalent
+ if strings.TrimPrefix(b.Modules[i].URL, string(filepath.Separator)) !=
+ strings.TrimPrefix(other.Modules[i].URL, string(filepath.Separator)) {
+ return false
+ }
+ if strings.TrimPrefix(b.Modules[i].Path, string(filepath.Separator)) !=
+ strings.TrimPrefix(other.Modules[i].Path, string(filepath.Separator)) {
+ return false
+ }
+ if !b.Modules[i].Parsed.Equal(other.Modules[i].Parsed) {
+ return false
+ }
+ if !bytes.Equal(b.Modules[i].Raw, other.Modules[i].Raw) {
+ return false
+ }
+ }
+ if (b.Wasm == nil && other.Wasm != nil) || (b.Wasm != nil && other.Wasm == nil) {
+ return false
+ }
+
+ return bytes.Equal(b.Wasm, other.Wasm)
+}
+
+// Copy returns a deep copy of the bundle.
+func (b Bundle) Copy() Bundle {
+
+ // Copy data.
+ var x any = b.Data
+
+ if err := util.RoundTrip(&x); err != nil {
+ panic(err)
+ }
+
+ if x != nil {
+ b.Data = x.(map[string]any)
+ }
+
+ // Copy modules.
+ for i := range b.Modules {
+ bs := make([]byte, len(b.Modules[i].Raw))
+ copy(bs, b.Modules[i].Raw)
+ b.Modules[i].Raw = bs
+ b.Modules[i].Parsed = b.Modules[i].Parsed.Copy()
+ }
+
+ // Copy manifest.
+ b.Manifest = b.Manifest.Copy()
+
+ return b
+}
+
+func (b *Bundle) insertData(key []string, value any) error {
+ // Build an object with the full structure for the value
+ obj, err := mktree(key, value)
+ if err != nil {
+ return err
+ }
+
+ // Merge the new data in with the current bundle data object
+ merged, ok := merge.InterfaceMaps(b.Data, obj)
+ if !ok {
+ return fmt.Errorf("failed to insert data file from path %s", filepath.Join(key...))
+ }
+
+ b.Data = merged
+
+ return nil
+}
+
+func (b *Bundle) readData(key []string) *any {
+
+ if len(key) == 0 {
+ if len(b.Data) == 0 {
+ return nil
+ }
+ var result any = b.Data
+ return &result
+ }
+
+ node := b.Data
+
+ for i := range len(key) - 1 {
+
+ child, ok := node[key[i]]
+ if !ok {
+ return nil
+ }
+
+ childObj, ok := child.(map[string]any)
+ if !ok {
+ return nil
+ }
+
+ node = childObj
+ }
+
+ child, ok := node[key[len(key)-1]]
+ if !ok {
+ return nil
+ }
+
+ return &child
+}
+
+// Type returns the type of the bundle.
+func (b *Bundle) Type() string {
+ if len(b.Patch.Data) != 0 {
+ return DeltaBundleType
+ }
+ return SnapshotBundleType
+}
+
+func mktree(path []string, value any) (map[string]any, error) {
+ if len(path) == 0 {
+ // For 0 length path the value is the full tree.
+ obj, ok := value.(map[string]any)
+ if !ok {
+ return nil, errors.New("root value must be object")
+ }
+ return obj, nil
+ }
+
+ dir := map[string]any{}
+ for i := len(path) - 1; i > 0; i-- {
+ dir[path[i]] = value
+ value = dir
+ dir = map[string]any{}
+ }
+ dir[path[0]] = value
+
+ return dir, nil
+}
+
+// Merge accepts a set of bundles and merges them into a single result bundle. If there are
+// any conflicts during the merge (e.g., with roots) an error is returned. The result bundle
+// will have an empty revision except in the special case where a single bundle is provided
+// (and in that case the bundle is just returned unmodified.)
+func Merge(bundles []*Bundle) (*Bundle, error) {
+ return MergeWithRegoVersion(bundles, ast.DefaultRegoVersion, false)
+}
+
+// MergeWithRegoVersion creates a merged bundle from the provided bundles, similar to Merge.
+// If more than one bundle is provided, the rego version of the result bundle is set to the provided regoVersion.
+// Any Rego files in a bundle of conflicting rego version will be marked in the result's manifest with the rego version
+// of its original bundle. If the Rego file already had an overriding rego version, it will be preserved.
+// If a single bundle is provided, it will retain any rego version information it already had. If it has none, the
+// provided regoVersion will be applied to it.
+// If usePath is true, per-file rego-versions will be calculated using the file's ModuleFile.Path; otherwise, the file's
+// ModuleFile.URL will be used.
+func MergeWithRegoVersion(bundles []*Bundle, regoVersion ast.RegoVersion, usePath bool) (*Bundle, error) {
+
+ if len(bundles) == 0 {
+ return nil, errors.New("expected at least one bundle")
+ }
+
+ if regoVersion == ast.RegoUndefined {
+ regoVersion = ast.DefaultRegoVersion
+ }
+
+ if len(bundles) == 1 {
+ result := bundles[0]
+ // We respect the bundle rego-version, defaulting to the provided rego version if not set.
+ result.SetRegoVersion(result.RegoVersion(regoVersion))
+ fileRegoVersions, err := bundleRegoVersions(result, result.RegoVersion(regoVersion), usePath)
+ if err != nil {
+ return nil, err
+ }
+ result.Manifest.FileRegoVersions = fileRegoVersions
+ return result, nil
+ }
+
+ var roots []string
+ var result Bundle
+
+ for _, b := range bundles {
+
+ if b.Manifest.Roots == nil {
+ return nil, errors.New("bundle manifest not initialized")
+ }
+
+ roots = append(roots, *b.Manifest.Roots...)
+
+ result.Modules = append(result.Modules, b.Modules...)
+
+ for _, root := range *b.Manifest.Roots {
+ key := strings.Split(root, "/")
+ if val := b.readData(key); val != nil {
+ if err := result.insertData(key, *val); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ result.Manifest.WasmResolvers = append(result.Manifest.WasmResolvers, b.Manifest.WasmResolvers...)
+ result.WasmModules = append(result.WasmModules, b.WasmModules...)
+ result.PlanModules = append(result.PlanModules, b.PlanModules...)
+
+ if b.Manifest.RegoVersion != nil || len(b.Manifest.FileRegoVersions) > 0 {
+ if result.Manifest.FileRegoVersions == nil {
+ result.Manifest.FileRegoVersions = map[string]int{}
+ }
+
+ fileRegoVersions, err := bundleRegoVersions(b, regoVersion, usePath)
+ if err != nil {
+ return nil, err
+ }
+ maps.Copy(result.Manifest.FileRegoVersions, fileRegoVersions)
+ }
+ }
+
+ // We respect the bundle rego-version, defaulting to the provided rego version if not set.
+ result.SetRegoVersion(result.RegoVersion(regoVersion))
+
+ if result.Data == nil {
+ result.Data = map[string]any{}
+ }
+
+ result.Manifest.Roots = &roots
+
+ if err := result.Manifest.validateAndInjectDefaults(result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func bundleRegoVersions(bundle *Bundle, regoVersion ast.RegoVersion, usePath bool) (map[string]int, error) {
+ fileRegoVersions := map[string]int{}
+
+ // we drop the bundle-global rego versions and record individual rego versions for each module.
+ for _, m := range bundle.Modules {
+ // We fetch rego-version by the path relative to the bundle root, as the complete path of the module might
+ // contain the path between OPA working directory and the bundle root.
+ v, err := bundle.RegoVersionForFile(bundleRelativePath(m, usePath), bundle.RegoVersion(regoVersion))
+ if err != nil {
+ return nil, err
+ }
+
+ // only record the rego version if it's different from the one applied globally to the result bundle
+ if v != ast.RegoUndefined {
+ if regoVersion == ast.RegoUndefined {
+ // We store the rego version by the absolute path to the bundle root, as this will be the - possibly new - path
+ // to the module inside the merged bundle.
+ fileRegoVersions[bundleAbsolutePath(m, usePath)] = v.Int()
+ } else {
+ vInt := v.Int()
+ gVInt := regoVersion.Int()
+ if vInt != gVInt {
+ fileRegoVersions[bundleAbsolutePath(m, usePath)] = vInt
+ }
+ }
+ }
+ }
+
+ return fileRegoVersions, nil
+}
+
+func bundleRelativePath(m ModuleFile, usePath bool) string {
+ p := m.RelativePath
+ if p == "" {
+ if usePath {
+ p = m.Path
+ } else {
+ p = m.URL
+ }
+ }
+ return p
+}
+
+func bundleAbsolutePath(m ModuleFile, usePath bool) string {
+ var p string
+ if usePath {
+ p = m.Path
+ } else {
+ p = m.URL
+ }
+ if !path.IsAbs(p) {
+ p = "/" + p
+ }
+ return path.Clean(p)
+}
+
+// RootPathsOverlap takes in two bundle root paths and returns true if they overlap.
+func RootPathsOverlap(pathA string, pathB string) bool {
+ a := rootPathSegments(pathA)
+ b := rootPathSegments(pathB)
+ return rootContains(a, b) || rootContains(b, a)
+}
+
+// RootPathsContain takes a set of bundle root paths and returns true if the path is contained.
+func RootPathsContain(roots []string, path string) bool {
+ segments := rootPathSegments(path)
+ for i := range roots {
+ if rootContains(rootPathSegments(roots[i]), segments) {
+ return true
+ }
+ }
+ return false
+}
+
+func rootPathSegments(path string) []string {
+ return strings.Split(path, "/")
+}
+
+func rootContains(root []string, other []string) bool {
+
+ // A single segment, empty string root always contains the other.
+ if len(root) == 1 && root[0] == "" {
+ return true
+ }
+
+ if len(root) > len(other) {
+ return false
+ }
+
+ for j := range root {
+ if root[j] != other[j] {
+ return false
+ }
+ }
+
+ return true
+}
+
+func insertValue(b *Bundle, path string, value any) error {
+ if err := b.insertData(getNormalizedPath(path), value); err != nil {
+ return fmt.Errorf("bundle load failed on %v: %w", path, err)
+ }
+ return nil
+}
+
+func getNormalizedPath(path string) []string {
+ // Remove leading / and . characters from the directory path. If the bundle
+ // was written with OPA then the paths will contain a leading slash. On the
+ // other hand, if the path is empty, filepath.Dir will return '.'.
+ // Note: filepath.Dir can return paths with '\' separators, always use
+ // filepath.ToSlash to keep them normalized.
+ dirpath := strings.TrimLeft(normalizePath(filepath.Dir(path)), "/.")
+ var key []string
+ if dirpath != "" {
+ key = strings.Split(dirpath, "/")
+ }
+ return key
+}
+
+func dfs(value any, path string, fn func(string, any) (bool, error)) error {
+ if stop, err := fn(path, value); err != nil {
+ return err
+ } else if stop {
+ return nil
+ }
+ obj, ok := value.(map[string]any)
+ if !ok {
+ return nil
+ }
+ for key := range obj {
+ if err := dfs(obj[key], path+"/"+key, fn); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func modulePathWithPrefix(bundleName string, modulePath string) string {
+ // Default prefix is just the bundle name
+ prefix := bundleName
+
+ // Bundle names are sometimes just file paths, some of which
+ // are full urls (file:///foo/). Parse these and only use the path.
+ parsed, err := url.Parse(bundleName)
+ if err == nil {
+ prefix = filepath.Join(parsed.Host, parsed.Path)
+ }
+
+ // Note: filepath.Join can return paths with '\' separators, always use
+ // filepath.ToSlash to keep them normalized.
+ return normalizePath(filepath.Join(prefix, modulePath))
+}
+
+// IsStructuredDoc checks if the file name equals a structured file extension ex. ".json"
+func IsStructuredDoc(name string) bool {
+ return filepath.Base(name) == dataFile || filepath.Base(name) == yamlDataFile ||
+ filepath.Base(name) == SignaturesFile || filepath.Base(name) == ManifestExt
+}
+
+func preProcessBundle(loader DirectoryLoader, skipVerify bool, sizeLimitBytes int64) (SignaturesConfig, Patch, []*Descriptor, error) {
+ descriptors := []*Descriptor{}
+ var signatures SignaturesConfig
+ var patch Patch
+
+ for {
+ f, err := loader.NextFile()
+ if err == io.EOF {
+ break
+ }
+
+ if err != nil {
+ return signatures, patch, nil, fmt.Errorf("bundle read failed: %w", err)
+ }
+
+ // check for the signatures file
+ if !skipVerify && strings.HasSuffix(f.Path(), SignaturesFile) {
+ buf, err := readFile(f, sizeLimitBytes)
+ if err != nil {
+ return signatures, patch, nil, err
+ }
+
+ if err := util.NewJSONDecoder(&buf).Decode(&signatures); err != nil {
+ return signatures, patch, nil, fmt.Errorf("bundle load failed on signatures decode: %w", err)
+ }
+ } else if !strings.HasSuffix(f.Path(), SignaturesFile) {
+ descriptors = append(descriptors, f)
+
+ if filepath.Base(f.Path()) == patchFile {
+
+ var b bytes.Buffer
+ tee := io.TeeReader(f.reader, &b)
+ f.reader = tee
+
+ buf, err := readFile(f, sizeLimitBytes)
+ if err != nil {
+ return signatures, patch, nil, err
+ }
+
+ if err := util.NewJSONDecoder(&buf).Decode(&patch); err != nil {
+ return signatures, patch, nil, fmt.Errorf("bundle load failed on patch decode: %w", err)
+ }
+
+ f.reader = &b
+ }
+ }
+ }
+ return signatures, patch, descriptors, nil
+}
+
+func readFile(f *Descriptor, sizeLimitBytes int64) (bytes.Buffer, error) {
+ // Case for pre-loaded byte buffers, like those from the tarballLoader.
+ if bb, ok := f.reader.(*bytes.Buffer); ok {
+ _ = f.Close() // always close, even on error
+
+ if int64(bb.Len()) >= sizeLimitBytes {
+ return *bb, fmt.Errorf("bundle file '%v' size (%d bytes) exceeded max size (%v bytes)",
+ strings.TrimPrefix(f.Path(), "/"), bb.Len(), sizeLimitBytes-1)
+ }
+
+ return *bb, nil
+ }
+
+ // Case for *lazyFile readers:
+ if lf, ok := f.reader.(*lazyFile); ok {
+ var buf bytes.Buffer
+ if lf.file == nil {
+ var err error
+ if lf.file, err = os.Open(lf.path); err != nil {
+ return buf, fmt.Errorf("failed to open file %s: %w", f.path, err)
+ }
+ }
+ // Bail out if we can't read the whole file-- there's nothing useful we can do at that point!
+ fileSize, _ := fstatFileSize(lf.file)
+ if fileSize > sizeLimitBytes {
+ return buf, fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(f.Path(), "/"), fileSize, sizeLimitBytes-1)
+ }
+ // Prealloc the buffer for the file read.
+ buffer := make([]byte, fileSize)
+ _, err := io.ReadFull(lf.file, buffer)
+ if err != nil {
+ return buf, err
+ }
+ _ = lf.file.Close() // always close, even on error
+
+ // Note(philipc): Replace the lazyFile reader in the *Descriptor with a
+ // pointer to the wrapping bytes.Buffer, so that we don't re-read the
+ // file on disk again by accident.
+ buf = *bytes.NewBuffer(buffer)
+ f.reader = &buf
+ return buf, nil
+ }
+
+ // Fallback case:
+ var buf bytes.Buffer
+ n, err := f.Read(&buf, sizeLimitBytes)
+ _ = f.Close() // always close, even on error
+
+ if err != nil && err != io.EOF {
+ return buf, err
+ } else if err == nil && n >= sizeLimitBytes {
+ return buf, fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(f.Path(), "/"), n, sizeLimitBytes-1)
+ }
+
+ return buf, nil
+}
+
+// Takes an already open file handle and invokes the os.Stat system call on it
+// to determine the file's size. Passes any errors from *File.Stat on up to the
+// caller.
+func fstatFileSize(f *os.File) (int64, error) {
+ fileInfo, err := f.Stat()
+ if err != nil {
+ return 0, err
+ }
+ return fileInfo.Size(), nil
+}
+
+func normalizePath(p string) string {
+ return filepath.ToSlash(p)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go
new file mode 100644
index 0000000000..d008c3d44c
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go
@@ -0,0 +1,517 @@
+package bundle
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/open-policy-agent/opa/v1/loader/filter"
+
+ "github.com/open-policy-agent/opa/v1/storage"
+)
+
+const maxSizeLimitBytesErrMsg = "bundle file %s size (%d bytes) exceeds configured size_limit_bytes (%d bytes)"
+
+// Descriptor contains information about a file and
+// can be used to read the file contents.
+type Descriptor struct {
+ url string
+ path string
+ reader io.Reader
+ closer io.Closer
+ closeOnce *sync.Once
+}
+
+// lazyFile defers reading the file until the first call of Read
+type lazyFile struct {
+ path string
+ file *os.File
+}
+
+// newLazyFile creates a new instance of lazyFile
+func newLazyFile(path string) *lazyFile {
+ return &lazyFile{path: path}
+}
+
+// Read implements io.Reader. It will check if the file has been opened
+// and open it if it has not before attempting to read using the file's
+// read method
+func (f *lazyFile) Read(b []byte) (int, error) {
+ var err error
+
+ if f.file == nil {
+ if f.file, err = os.Open(f.path); err != nil {
+ return 0, fmt.Errorf("failed to open file %s: %w", f.path, err)
+ }
+ }
+
+ return f.file.Read(b)
+}
+
+// Close closes the lazy file if it has been opened using the file's
+// close method
+func (f *lazyFile) Close() error {
+ if f.file != nil {
+ return f.file.Close()
+ }
+
+ return nil
+}
+
+func NewDescriptor(url, path string, reader io.Reader) *Descriptor {
+ return &Descriptor{
+ url: url,
+ path: path,
+ reader: reader,
+ }
+}
+
+func (d *Descriptor) WithCloser(closer io.Closer) *Descriptor {
+ d.closer = closer
+ d.closeOnce = new(sync.Once)
+ return d
+}
+
+// Path returns the path of the file.
+func (d *Descriptor) Path() string {
+ return d.path
+}
+
+// URL returns the url of the file.
+func (d *Descriptor) URL() string {
+ return d.url
+}
+
+// Read will read all the contents from the file the Descriptor refers to
+// into the dest writer up n bytes. Will return an io.EOF error
+// if EOF is encountered before n bytes are read.
+func (d *Descriptor) Read(dest io.Writer, n int64) (int64, error) {
+ n, err := io.CopyN(dest, d.reader, n)
+ return n, err
+}
+
+// Close the file, on some Loader implementations this might be a no-op.
+// It should *always* be called regardless of file.
+func (d *Descriptor) Close() error {
+ var err error
+ if d.closer != nil {
+ d.closeOnce.Do(func() {
+ err = d.closer.Close()
+ })
+ }
+ return err
+}
+
+type PathFormat int64
+
+const (
+ Chrooted PathFormat = iota
+ SlashRooted
+ Passthrough
+)
+
+// DirectoryLoader defines an interface which can be used to load
+// files from a directory by iterating over each one in the tree.
+type DirectoryLoader interface {
+ // NextFile must return io.EOF if there is no next value. The returned
+ // descriptor should *always* be closed when no longer needed.
+ NextFile() (*Descriptor, error)
+ WithFilter(filter filter.LoaderFilter) DirectoryLoader
+ WithPathFormat(PathFormat) DirectoryLoader
+ WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader
+ WithFollowSymlinks(followSymlinks bool) DirectoryLoader
+}
+
+type dirLoader struct {
+ root string
+ files []string
+ idx int
+ filter filter.LoaderFilter
+ pathFormat PathFormat
+ maxSizeLimitBytes int64
+ followSymlinks bool
+}
+
+// Normalize root directory, ex "./src/bundle" -> "src/bundle"
+// We don't need an absolute path, but this makes the joined/trimmed
+// paths more uniform.
+func normalizeRootDirectory(root string) string {
+ if len(root) > 1 {
+ if root[0] == '.' && root[1] == filepath.Separator {
+ if len(root) == 2 {
+ root = root[:1] // "./" -> "."
+ } else {
+ root = root[2:] // remove leading "./"
+ }
+ }
+ }
+ return root
+}
+
+// NewDirectoryLoader returns a basic DirectoryLoader implementation
+// that will load files from a given root directory path.
+func NewDirectoryLoader(root string) DirectoryLoader {
+ d := dirLoader{
+ root: normalizeRootDirectory(root),
+ pathFormat: Chrooted,
+ }
+ return &d
+}
+
+// WithFilter specifies the filter object to use to filter files while loading bundles
+func (d *dirLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader {
+ d.filter = filter
+ return d
+}
+
+// WithPathFormat specifies how a path is formatted in a Descriptor
+func (d *dirLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader {
+ d.pathFormat = pathFormat
+ return d
+}
+
+// WithSizeLimitBytes specifies the maximum size of any file in the directory to read
+func (d *dirLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader {
+ d.maxSizeLimitBytes = sizeLimitBytes
+ return d
+}
+
+// WithFollowSymlinks specifies whether to follow symlinks when loading files from the directory
+func (d *dirLoader) WithFollowSymlinks(followSymlinks bool) DirectoryLoader {
+ d.followSymlinks = followSymlinks
+ return d
+}
+
+func formatPath(fileName string, root string, pathFormat PathFormat) string {
+ switch pathFormat {
+ case SlashRooted:
+ if !strings.HasPrefix(fileName, string(filepath.Separator)) {
+ return string(filepath.Separator) + fileName
+ }
+ return fileName
+ case Chrooted:
+ // Trim off the root directory and return path as if chrooted
+ result := strings.TrimPrefix(fileName, filepath.FromSlash(root))
+ if root == "." && filepath.Base(fileName) == ManifestExt {
+ result = fileName
+ }
+ if !strings.HasPrefix(result, string(filepath.Separator)) {
+ result = string(filepath.Separator) + result
+ }
+ return result
+ case Passthrough:
+ fallthrough
+ default:
+ return fileName
+ }
+}
+
+// NextFile iterates to the next file in the directory tree
+// and returns a file Descriptor for the file.
+func (d *dirLoader) NextFile() (*Descriptor, error) {
+ // build a list of all files we will iterate over and read, but only one time
+ if d.files == nil {
+ d.files = []string{}
+ err := filepath.Walk(d.root, func(path string, info os.FileInfo, _ error) error {
+ if info == nil {
+ return nil
+ }
+
+ if info.Mode().IsRegular() {
+ if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) {
+ return nil
+ }
+ if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes {
+ return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes)
+ }
+ d.files = append(d.files, path)
+ } else if d.followSymlinks && info.Mode().Type()&fs.ModeSymlink == fs.ModeSymlink {
+ if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) {
+ return nil
+ }
+ if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes {
+ return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes)
+ }
+ d.files = append(d.files, path)
+ } else if info.Mode().IsDir() {
+ if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) {
+ return filepath.SkipDir
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to list files: %w", err)
+ }
+ }
+
+ // If done reading files then just return io.EOF
+ // errors for each NextFile() call
+ if d.idx >= len(d.files) {
+ return nil, io.EOF
+ }
+
+ fileName := d.files[d.idx]
+ d.idx++
+ fh := newLazyFile(fileName)
+
+ cleanedPath := formatPath(fileName, d.root, d.pathFormat)
+ f := NewDescriptor(filepath.Join(d.root, cleanedPath), cleanedPath, fh).WithCloser(fh)
+ return f, nil
+}
+
+type tarballLoader struct {
+ baseURL string
+ r io.Reader
+ tr *tar.Reader
+ files []file
+ idx int
+ filter filter.LoaderFilter
+ skipDir map[string]struct{}
+ pathFormat PathFormat
+ maxSizeLimitBytes int64
+}
+
+type file struct {
+ name string
+ reader io.Reader
+ path storage.Path
+ raw []byte
+}
+
+// NewTarballLoader is deprecated. Use NewTarballLoaderWithBaseURL instead.
+func NewTarballLoader(r io.Reader) DirectoryLoader {
+ l := tarballLoader{
+ r: r,
+ pathFormat: Passthrough,
+ }
+ return &l
+}
+
+// NewTarballLoaderWithBaseURL returns a new DirectoryLoader that reads
+// files out of a gzipped tar archive. The file URLs will be prefixed
+// with the baseURL.
+func NewTarballLoaderWithBaseURL(r io.Reader, baseURL string) DirectoryLoader {
+ l := tarballLoader{
+ baseURL: strings.TrimSuffix(baseURL, "/"),
+ r: r,
+ pathFormat: Passthrough,
+ }
+ return &l
+}
+
+// WithFilter specifies the filter object to use to filter files while loading bundles
+func (t *tarballLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader {
+ t.filter = filter
+ return t
+}
+
+// WithPathFormat specifies how a path is formatted in a Descriptor
+func (t *tarballLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader {
+ t.pathFormat = pathFormat
+ return t
+}
+
+// WithSizeLimitBytes specifies the maximum size of any file in the tarball to read
+func (t *tarballLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader {
+ t.maxSizeLimitBytes = sizeLimitBytes
+ return t
+}
+
+// WithFollowSymlinks is a no-op for tarballLoader
+func (t *tarballLoader) WithFollowSymlinks(_ bool) DirectoryLoader {
+ return t
+}
+
+// NextFile iterates to the next file in the directory tree
+// and returns a file Descriptor for the file.
+func (t *tarballLoader) NextFile() (*Descriptor, error) {
+ if t.tr == nil {
+ gr, err := gzip.NewReader(t.r)
+ if err != nil {
+ return nil, fmt.Errorf("archive read failed: %w", err)
+ }
+
+ t.tr = tar.NewReader(gr)
+ }
+
+ if t.files == nil {
+ t.files = []file{}
+
+ if t.skipDir == nil {
+ t.skipDir = map[string]struct{}{}
+ }
+
+ for {
+ header, err := t.tr.Next()
+
+ if err == io.EOF {
+ break
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ // Keep iterating on the archive until we find a normal file
+ if header.Typeflag == tar.TypeReg {
+
+ if t.filter != nil {
+
+ if t.filter(filepath.ToSlash(header.Name), header.FileInfo(), getdepth(header.Name, false)) {
+ continue
+ }
+
+ basePath := strings.Trim(filepath.Dir(filepath.ToSlash(header.Name)), "/")
+
+ // check if the directory is to be skipped
+ if _, ok := t.skipDir[basePath]; ok {
+ continue
+ }
+
+ match := false
+ for p := range t.skipDir {
+ if strings.HasPrefix(basePath, p) {
+ match = true
+ break
+ }
+ }
+
+ if match {
+ continue
+ }
+ }
+
+ if t.maxSizeLimitBytes > 0 && header.Size > t.maxSizeLimitBytes {
+ return nil, fmt.Errorf(maxSizeLimitBytesErrMsg, header.Name, header.Size, t.maxSizeLimitBytes)
+ }
+
+ f := file{name: header.Name}
+
+ // Note(philipc): We rely on the previous size check in this loop for safety.
+ buf := bytes.NewBuffer(make([]byte, 0, header.Size))
+ if _, err := io.Copy(buf, t.tr); err != nil {
+ return nil, fmt.Errorf("failed to copy file %s: %w", header.Name, err)
+ }
+
+ f.reader = buf
+
+ t.files = append(t.files, f)
+ } else if header.Typeflag == tar.TypeDir {
+ cleanedPath := filepath.ToSlash(header.Name)
+ if t.filter != nil && t.filter(cleanedPath, header.FileInfo(), getdepth(header.Name, true)) {
+ t.skipDir[strings.Trim(cleanedPath, "/")] = struct{}{}
+ }
+ }
+ }
+ }
+
+ // If done reading files then just return io.EOF
+ // errors for each NextFile() call
+ if t.idx >= len(t.files) {
+ return nil, io.EOF
+ }
+
+ f := t.files[t.idx]
+ t.idx++
+
+ cleanedPath := formatPath(f.name, "", t.pathFormat)
+ d := NewDescriptor(filepath.Join(t.baseURL, cleanedPath), cleanedPath, f.reader)
+ return d, nil
+}
+
+// Next implements the storage.Iterator interface.
+// It iterates to the next policy or data file in the directory tree
+// and returns a storage.Update for the file.
+func (it *iterator) Next() (*storage.Update, error) {
+ if it.files == nil {
+ it.files = []file{}
+
+ for _, item := range it.raw {
+ f := file{name: item.Path}
+
+ p, err := getFileStoragePath(f.name)
+ if err != nil {
+ return nil, err
+ }
+
+ f.path = p
+
+ f.raw = item.Value
+
+ it.files = append(it.files, f)
+ }
+
+ sortFilePathAscend(it.files)
+ }
+
+ // If done reading files then just return io.EOF
+ // errors for each NextFile() call
+ if it.idx >= len(it.files) {
+ return nil, io.EOF
+ }
+
+ f := it.files[it.idx]
+ it.idx++
+
+ var isPolicy bool
+ if strings.HasSuffix(f.name, RegoExt) {
+ isPolicy = true
+ }
+
+ return &storage.Update{
+ Path: f.path,
+ Value: f.raw,
+ IsPolicy: isPolicy,
+ }, nil
+}
+
+type iterator struct {
+ raw []Raw
+ files []file
+ idx int
+}
+
+func NewIterator(raw []Raw) storage.Iterator {
+ it := iterator{
+ raw: raw,
+ }
+ return &it
+}
+
+func sortFilePathAscend(files []file) {
+ sort.Slice(files, func(i, j int) bool {
+ return len(files[i].path) < len(files[j].path)
+ })
+}
+
+func getdepth(path string, isDir bool) int {
+ if isDir {
+ cleanedPath := strings.Trim(filepath.ToSlash(path), "/")
+ return len(strings.Split(cleanedPath, "/"))
+ }
+
+ basePath := strings.Trim(filepath.Dir(filepath.ToSlash(path)), "/")
+ return len(strings.Split(basePath, "/"))
+}
+
+func getFileStoragePath(path string) (storage.Path, error) {
+ fpath := strings.TrimLeft(normalizePath(filepath.Dir(path)), "/.")
+ if strings.HasSuffix(path, RegoExt) {
+ fpath = strings.Trim(normalizePath(path), "/")
+ }
+
+ p, ok := storage.ParsePathEscaped("/" + fpath)
+ if !ok {
+ return nil, fmt.Errorf("storage path invalid: %v", path)
+ }
+ return p, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/filefs.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/filefs.go
new file mode 100644
index 0000000000..7ab3de989c
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/filefs.go
@@ -0,0 +1,143 @@
+//go:build go1.16
+// +build go1.16
+
+package bundle
+
+import (
+ "fmt"
+ "io"
+ "io/fs"
+ "path/filepath"
+ "sync"
+
+ "github.com/open-policy-agent/opa/v1/loader/filter"
+)
+
+const (
+ defaultFSLoaderRoot = "."
+)
+
+type dirLoaderFS struct {
+ sync.Mutex
+ filesystem fs.FS
+ files []string
+ idx int
+ filter filter.LoaderFilter
+ root string
+ pathFormat PathFormat
+ maxSizeLimitBytes int64
+ followSymlinks bool
+}
+
+// NewFSLoader returns a basic DirectoryLoader implementation
+// that will load files from a fs.FS interface
+func NewFSLoader(filesystem fs.FS) (DirectoryLoader, error) {
+ return NewFSLoaderWithRoot(filesystem, defaultFSLoaderRoot), nil
+}
+
+// NewFSLoaderWithRoot returns a basic DirectoryLoader implementation
+// that will load files from a fs.FS interface at the supplied root
+func NewFSLoaderWithRoot(filesystem fs.FS, root string) DirectoryLoader {
+ d := dirLoaderFS{
+ filesystem: filesystem,
+ root: normalizeRootDirectory(root),
+ pathFormat: Chrooted,
+ }
+
+ return &d
+}
+
+func (d *dirLoaderFS) walkDir(path string, dirEntry fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if dirEntry != nil {
+ info, err := dirEntry.Info()
+ if err != nil {
+ return err
+ }
+
+ if dirEntry.Type().IsRegular() {
+ if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) {
+ return nil
+ }
+
+ if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes {
+ return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes)
+ }
+
+ d.files = append(d.files, path)
+ } else if dirEntry.Type()&fs.ModeSymlink != 0 && d.followSymlinks {
+ if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) {
+ return nil
+ }
+
+ if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes {
+ return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes)
+ }
+
+ d.files = append(d.files, path)
+ } else if dirEntry.Type().IsDir() {
+ if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) {
+ return fs.SkipDir
+ }
+ }
+ }
+ return nil
+}
+
+// WithFilter specifies the filter object to use to filter files while loading bundles
+func (d *dirLoaderFS) WithFilter(filter filter.LoaderFilter) DirectoryLoader {
+ d.filter = filter
+ return d
+}
+
+// WithPathFormat specifies how a path is formatted in a Descriptor
+func (d *dirLoaderFS) WithPathFormat(pathFormat PathFormat) DirectoryLoader {
+ d.pathFormat = pathFormat
+ return d
+}
+
+// WithSizeLimitBytes specifies the maximum size of any file in the filesystem directory to read
+func (d *dirLoaderFS) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader {
+ d.maxSizeLimitBytes = sizeLimitBytes
+ return d
+}
+
+func (d *dirLoaderFS) WithFollowSymlinks(followSymlinks bool) DirectoryLoader {
+ d.followSymlinks = followSymlinks
+ return d
+}
+
+// NextFile iterates to the next file in the directory tree
+// and returns a file Descriptor for the file.
+func (d *dirLoaderFS) NextFile() (*Descriptor, error) {
+ d.Lock()
+ defer d.Unlock()
+
+ if d.files == nil {
+ err := fs.WalkDir(d.filesystem, d.root, d.walkDir)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list files: %w", err)
+ }
+ }
+
+ // If done reading files then just return io.EOF
+ // errors for each NextFile() call
+ if d.idx >= len(d.files) {
+ return nil, io.EOF
+ }
+
+ fileName := d.files[d.idx]
+ d.idx++
+
+ fh, err := d.filesystem.Open(fileName)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open file %s: %w", fileName, err)
+ }
+
+ cleanedPath := formatPath(fileName, d.root, d.pathFormat)
+ f := NewDescriptor(cleanedPath, cleanedPath, fh).WithCloser(fh)
+ return f, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/hash.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/hash.go
new file mode 100644
index 0000000000..5a62d2dc00
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/hash.go
@@ -0,0 +1,136 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package bundle
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/json"
+ "fmt"
+ "hash"
+ "io"
+ "strings"
+
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+// HashingAlgorithm represents a subset of hashing algorithms implemented in Go
+type HashingAlgorithm string
+
+// Supported values for HashingAlgorithm
+const (
+ MD5 HashingAlgorithm = "MD5"
+ SHA1 HashingAlgorithm = "SHA-1"
+ SHA224 HashingAlgorithm = "SHA-224"
+ SHA256 HashingAlgorithm = "SHA-256"
+ SHA384 HashingAlgorithm = "SHA-384"
+ SHA512 HashingAlgorithm = "SHA-512"
+ SHA512224 HashingAlgorithm = "SHA-512-224"
+ SHA512256 HashingAlgorithm = "SHA-512-256"
+)
+
+// String returns the string representation of a HashingAlgorithm
+func (alg HashingAlgorithm) String() string {
+ return string(alg)
+}
+
+// SignatureHasher computes a signature digest for a file with (structured or unstructured) data and policy
+type SignatureHasher interface {
+ HashFile(v any) ([]byte, error)
+}
+
+type hasher struct {
+ h func() hash.Hash // hash function factory
+}
+
+// NewSignatureHasher returns a signature hasher suitable for a particular hashing algorithm
+func NewSignatureHasher(alg HashingAlgorithm) (SignatureHasher, error) {
+ h := &hasher{}
+
+ switch alg {
+ case MD5:
+ h.h = md5.New
+ case SHA1:
+ h.h = sha1.New
+ case SHA224:
+ h.h = sha256.New224
+ case SHA256:
+ h.h = sha256.New
+ case SHA384:
+ h.h = sha512.New384
+ case SHA512:
+ h.h = sha512.New
+ case SHA512224:
+ h.h = sha512.New512_224
+ case SHA512256:
+ h.h = sha512.New512_256
+ default:
+ return nil, fmt.Errorf("unsupported hashing algorithm: %s", alg)
+ }
+
+ return h, nil
+}
+
+// HashFile hashes the file content, JSON or binary, both in golang native format.
+func (h *hasher) HashFile(v any) ([]byte, error) {
+ hf := h.h()
+ walk(v, hf)
+ return hf.Sum(nil), nil
+}
+
+// walk hashes the file content, JSON or binary, both in golang native format.
+//
+// Computation for unstructured documents is a hash of the document.
+//
+// Computation for the types of structured JSON document is as follows:
+//
+// object: Hash {, then each key (in alphabetical order) and digest of the value, then comma (between items) and finally }.
+//
+// array: Hash [, then digest of the value, then comma (between items) and finally ].
+func walk(v any, h io.Writer) {
+
+ switch x := v.(type) {
+ case map[string]any:
+ _, _ = h.Write([]byte("{"))
+
+ for i, key := range util.KeysSorted(x) {
+ if i > 0 {
+ _, _ = h.Write([]byte(","))
+ }
+
+ _, _ = h.Write(encodePrimitive(key))
+ _, _ = h.Write([]byte(":"))
+ walk(x[key], h)
+ }
+
+ _, _ = h.Write([]byte("}"))
+ case []any:
+ _, _ = h.Write([]byte("["))
+
+ for i, e := range x {
+ if i > 0 {
+ _, _ = h.Write([]byte(","))
+ }
+ walk(e, h)
+ }
+
+ _, _ = h.Write([]byte("]"))
+ case []byte:
+ _, _ = h.Write(x)
+ default:
+ _, _ = h.Write(encodePrimitive(x))
+ }
+}
+
+func encodePrimitive(v any) []byte {
+ var buf bytes.Buffer
+ encoder := json.NewEncoder(&buf)
+ encoder.SetEscapeHTML(false)
+ _ = encoder.Encode(v)
+ return []byte(strings.Trim(buf.String(), "\n"))
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/keys.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/keys.go
new file mode 100644
index 0000000000..f16fe37fc7
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/keys.go
@@ -0,0 +1,173 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package bundle provide helpers that assist in creating the verification and signing key configuration
+package bundle
+
+import (
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/open-policy-agent/opa/v1/keys"
+
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+const (
+ defaultTokenSigningAlg = "RS256"
+)
+
+// KeyConfig holds the keys used to sign or verify bundles and tokens
+// Moved to own package, alias kept for backwards compatibility
+type KeyConfig = keys.Config
+
+// VerificationConfig represents the key configuration used to verify a signed bundle
+type VerificationConfig struct {
+ PublicKeys map[string]*KeyConfig
+ KeyID string `json:"keyid"`
+ Scope string `json:"scope"`
+ Exclude []string `json:"exclude_files"`
+}
+
+// NewVerificationConfig return a new VerificationConfig
+func NewVerificationConfig(keys map[string]*KeyConfig, id, scope string, exclude []string) *VerificationConfig {
+ return &VerificationConfig{
+ PublicKeys: keys,
+ KeyID: id,
+ Scope: scope,
+ Exclude: exclude,
+ }
+}
+
+// ValidateAndInjectDefaults validates the config and inserts default values
+func (vc *VerificationConfig) ValidateAndInjectDefaults(keys map[string]*KeyConfig) error {
+ vc.PublicKeys = keys
+
+ if vc.KeyID != "" {
+ found := false
+ for key := range keys {
+ if key == vc.KeyID {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return fmt.Errorf("key id %s not found", vc.KeyID)
+ }
+ }
+ return nil
+}
+
+// GetPublicKey returns the public key corresponding to the given key id
+func (vc *VerificationConfig) GetPublicKey(id string) (*KeyConfig, error) {
+ var kc *KeyConfig
+ var ok bool
+
+ if kc, ok = vc.PublicKeys[id]; !ok {
+ return nil, fmt.Errorf("verification key corresponding to ID %v not found", id)
+ }
+ return kc, nil
+}
+
+// SigningConfig represents the key configuration used to generate a signed bundle
+type SigningConfig struct {
+ Plugin string
+ Key string
+ Algorithm string
+ ClaimsPath string
+}
+
+// NewSigningConfig return a new SigningConfig
+func NewSigningConfig(key, alg, claimsPath string) *SigningConfig {
+ if alg == "" {
+ alg = defaultTokenSigningAlg
+ }
+
+ return &SigningConfig{
+ Plugin: defaultSignerID,
+ Key: key,
+ Algorithm: alg,
+ ClaimsPath: claimsPath,
+ }
+}
+
+// WithPlugin sets the signing plugin in the signing config
+func (s *SigningConfig) WithPlugin(plugin string) *SigningConfig {
+ if plugin != "" {
+ s.Plugin = plugin
+ }
+ return s
+}
+
+// GetPrivateKey returns the private key or secret from the signing config
+func (s *SigningConfig) GetPrivateKey() (any, error) {
+ var keyData string
+
+ alg, ok := jwa.LookupSignatureAlgorithm(s.Algorithm)
+ if !ok {
+ return nil, fmt.Errorf("unknown signature algorithm: %s", s.Algorithm)
+ }
+
+ // Check if the key looks like PEM data first (starts with -----BEGIN)
+ if strings.HasPrefix(s.Key, "-----BEGIN") {
+ keyData = s.Key
+ } else {
+ // Try to read as a file path
+ if _, err := os.Stat(s.Key); err == nil {
+ bs, err := os.ReadFile(s.Key)
+ if err != nil {
+ return nil, err
+ }
+ keyData = string(bs)
+ } else if os.IsNotExist(err) {
+ // Not a file, treat as raw key data
+ keyData = s.Key
+ } else {
+ return nil, err
+ }
+ }
+
+ // For HMAC algorithms, return the key as bytes
+ if alg == jwa.HS256() || alg == jwa.HS384() || alg == jwa.HS512() {
+ return []byte(keyData), nil
+ }
+
+ // For RSA/ECDSA algorithms, parse the PEM-encoded key
+ block, _ := pem.Decode([]byte(keyData))
+ if block == nil {
+ return nil, errors.New("failed to parse PEM block containing the key")
+ }
+
+ switch block.Type {
+ case "RSA PRIVATE KEY":
+ return x509.ParsePKCS1PrivateKey(block.Bytes)
+ case "PRIVATE KEY":
+ return x509.ParsePKCS8PrivateKey(block.Bytes)
+ case "EC PRIVATE KEY":
+ return x509.ParseECPrivateKey(block.Bytes)
+ default:
+ return nil, fmt.Errorf("unsupported key type: %s", block.Type)
+ }
+}
+
+// GetClaims returns the claims by reading the file specified in the signing config
+func (s *SigningConfig) GetClaims() (map[string]any, error) {
+ var claims map[string]any
+
+ bs, err := os.ReadFile(s.ClaimsPath)
+ if err != nil {
+ return claims, err
+ }
+
+ if err := util.UnmarshalJSON(bs, &claims); err != nil {
+ return claims, err
+ }
+ return claims, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/sign.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/sign.go
new file mode 100644
index 0000000000..30640731ef
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/sign.go
@@ -0,0 +1,130 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package bundle provide helpers that assist in the creating a signed bundle
+package bundle
+
+import (
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jwk"
+ "github.com/lestrrat-go/jwx/v3/jwt"
+)
+
+const defaultSignerID = "_default"
+
+var signers map[string]Signer
+
+// Signer is the interface expected for implementations that generate bundle signatures.
+type Signer interface {
+ GenerateSignedToken([]FileInfo, *SigningConfig, string) (string, error)
+}
+
+// GenerateSignedToken will retrieve the Signer implementation based on the Plugin specified
+// in SigningConfig, and call its implementation of GenerateSignedToken. The signer generates
+// a signed token given the list of files to be included in the payload and the bundle
+// signing config. The keyID if non-empty, represents the value for the "keyid" claim in the token.
+func GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) {
+ var plugin string
+ // for backwards compatibility, check if there is no plugin specified, and use default
+ if sc.Plugin == "" {
+ plugin = defaultSignerID
+ } else {
+ plugin = sc.Plugin
+ }
+ signer, err := GetSigner(plugin)
+ if err != nil {
+ return "", err
+ }
+ return signer.GenerateSignedToken(files, sc, keyID)
+}
+
+// DefaultSigner is the default bundle signing implementation. It signs bundles by generating
+// a JWT and signing it using a locally-accessible private key.
+type DefaultSigner struct{}
+
+// GenerateSignedToken generates a signed token given the list of files to be
+// included in the payload and the bundle signing config. The keyID if non-empty,
+// represents the value for the "keyid" claim in the token
+func (*DefaultSigner) GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) {
+ token, err := generateToken(files, sc, keyID)
+ if err != nil {
+ return "", err
+ }
+
+ privateKey, err := sc.GetPrivateKey()
+ if err != nil {
+ return "", err
+ }
+
+ // Parse the algorithm string to jwa.SignatureAlgorithm
+ alg, ok := jwa.LookupSignatureAlgorithm(sc.Algorithm)
+ if !ok {
+ return "", fmt.Errorf("unknown signature algorithm: %s", sc.Algorithm)
+ }
+
+ // In order to sign the token with a kid, we need a key ID _on_ the key
+ // (note: we might be able to make this more efficient if we just load
+ // the key as a JWK from the start)
+ jwkKey, err := jwk.Import(privateKey)
+ if err != nil {
+ return "", fmt.Errorf("failed to import private key: %w", err)
+ }
+ if err := jwkKey.Set(jwk.KeyIDKey, keyID); err != nil {
+ return "", fmt.Errorf("failed to set key ID on JWK: %w", err)
+ }
+
+ // Since v3.0.6, jwx will take the fast path for signing the token if
+ // there's exactly one WithKey in the options with no sub-options
+ signed, err := jwt.Sign(token, jwt.WithKey(alg, jwkKey))
+ if err != nil {
+ return "", err
+ }
+ return string(signed), nil
+}
+
+func generateToken(files []FileInfo, sc *SigningConfig, keyID string) (jwt.Token, error) {
+ tb := jwt.NewBuilder()
+ tb.Claim("files", files)
+
+ if sc.ClaimsPath != "" {
+ claims, err := sc.GetClaims()
+ if err != nil {
+ return nil, err
+ }
+
+ for k, v := range claims {
+ tb.Claim(k, v)
+ }
+ } else if keyID != "" {
+ // keyid claim is deprecated but include it for backwards compatibility.
+ tb.Claim("keyid", keyID)
+ }
+ return tb.Build()
+}
+
+// GetSigner returns the Signer registered under the given id
+func GetSigner(id string) (Signer, error) {
+ signer, ok := signers[id]
+ if !ok {
+ return nil, fmt.Errorf("no signer exists under id %s", id)
+ }
+ return signer, nil
+}
+
+// RegisterSigner registers a Signer under the given id
+func RegisterSigner(id string, s Signer) error {
+ if id == defaultSignerID {
+ return fmt.Errorf("signer id %s is reserved, use a different id", id)
+ }
+ signers[id] = s
+ return nil
+}
+
+func init() {
+ signers = map[string]Signer{
+ defaultSignerID: &DefaultSigner{},
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/store.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/store.go
new file mode 100644
index 0000000000..992bf78f63
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/store.go
@@ -0,0 +1,1251 @@
+// Copyright 2019 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package bundle
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "maps"
+ "path/filepath"
+ "slices"
+ "sort"
+ "strings"
+ "sync"
+
+ iCompiler "github.com/open-policy-agent/opa/internal/compiler"
+ "github.com/open-policy-agent/opa/internal/json/patch"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/metrics"
+ "github.com/open-policy-agent/opa/v1/storage"
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+const defaultActivatorID = "_default"
+
+var (
+ activators = map[string]Activator{
+ defaultActivatorID: &DefaultActivator{},
+ }
+ activatorMtx sync.Mutex
+)
+
+// BundlesBasePath is the storage path used for storing bundle metadata
+var BundlesBasePath = storage.MustParsePath("/system/bundles")
+
+var ModulesInfoBasePath = storage.MustParsePath("/system/modules")
+
+// Note: As needed these helpers could be memoized.
+
+// ManifestStoragePath is the storage path used for the given named bundle manifest.
+func ManifestStoragePath(name string) storage.Path {
+ return append(BundlesBasePath, name, "manifest")
+}
+
+// EtagStoragePath is the storage path used for the given named bundle etag.
+func EtagStoragePath(name string) storage.Path {
+ return append(BundlesBasePath, name, "etag")
+}
+
+func namedBundlePath(name string) storage.Path {
+ return append(BundlesBasePath, name)
+}
+
+func rootsPath(name string) storage.Path {
+ return append(BundlesBasePath, name, "manifest", "roots")
+}
+
+func revisionPath(name string) storage.Path {
+ return append(BundlesBasePath, name, "manifest", "revision")
+}
+
+func wasmModulePath(name string) storage.Path {
+ return append(BundlesBasePath, name, "wasm")
+}
+
+func wasmEntrypointsPath(name string) storage.Path {
+ return append(BundlesBasePath, name, "manifest", "wasm")
+}
+
+func metadataPath(name string) storage.Path {
+ return append(BundlesBasePath, name, "manifest", "metadata")
+}
+
+func moduleRegoVersionPath(id string) storage.Path {
+ return append(ModulesInfoBasePath, strings.Trim(id, "/"), "rego_version")
+}
+
+func moduleInfoPath(id string) storage.Path {
+ return append(ModulesInfoBasePath, strings.Trim(id, "/"))
+}
+
+func read(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (any, error) {
+ value, err := store.Read(ctx, txn, path)
+ if err != nil {
+ if storage.IsNotFound(err) {
+ return nil, &storage.Error{
+ Code: storage.NotFoundErr,
+ Message: strings.TrimPrefix(path.String(), "/system") + ": document does not exist",
+ }
+ }
+ return nil, err
+ }
+
+ if astValue, ok := value.(ast.Value); ok {
+ value, err = ast.JSON(astValue)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return value, nil
+}
+
+// ReadBundleNamesFromStore will return a list of bundle names which have had their metadata stored.
+func ReadBundleNamesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) ([]string, error) {
+ value, err := read(ctx, store, txn, BundlesBasePath)
+ if err != nil {
+ return nil, err
+ }
+
+ bundleMap, ok := value.(map[string]any)
+ if !ok {
+ return nil, errors.New("corrupt manifest roots")
+ }
+
+ bundles := make([]string, len(bundleMap))
+ idx := 0
+ for name := range bundleMap {
+ bundles[idx] = name
+ idx++
+ }
+ return bundles, nil
+}
+
+// WriteManifestToStore will write the manifest into the storage. This function is called when
+// the bundle is activated.
+func WriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string, manifest Manifest) error {
+ return write(ctx, store, txn, ManifestStoragePath(name), manifest)
+}
+
+// WriteEtagToStore will write the bundle etag into the storage. This function is called when the bundle is activated.
+func WriteEtagToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name, etag string) error {
+ return write(ctx, store, txn, EtagStoragePath(name), etag)
+}
+
+func write(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path, value any) error {
+ if err := util.RoundTrip(&value); err != nil {
+ return err
+ }
+
+ var dir []string
+ if len(path) > 1 {
+ dir = path[:len(path)-1]
+ }
+
+ if err := storage.MakeDir(ctx, store, txn, dir); err != nil {
+ return err
+ }
+
+ return store.Write(ctx, txn, storage.AddOp, path, value)
+}
+
+// EraseManifestFromStore will remove the manifest from storage. This function is called
+// when the bundle is deactivated.
+func EraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error {
+ path := namedBundlePath(name)
+ err := store.Write(ctx, txn, storage.RemoveOp, path, nil)
+ return suppressNotFound(err)
+}
+
+// eraseBundleEtagFromStore will remove the bundle etag from storage. This function is called
+// when the bundle is deactivated.
+func eraseBundleEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error {
+ path := EtagStoragePath(name)
+ err := store.Write(ctx, txn, storage.RemoveOp, path, nil)
+ return suppressNotFound(err)
+}
+
+func suppressNotFound(err error) error {
+ if err == nil || storage.IsNotFound(err) {
+ return nil
+ }
+ return err
+}
+
+func writeWasmModulesToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string, b *Bundle) error {
+ basePath := wasmModulePath(name)
+ for _, wm := range b.WasmModules {
+ path := append(basePath, wm.Path)
+ err := write(ctx, store, txn, path, base64.StdEncoding.EncodeToString(wm.Raw))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func eraseWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error {
+ path := wasmModulePath(name)
+
+ err := store.Write(ctx, txn, storage.RemoveOp, path, nil)
+ return suppressNotFound(err)
+}
+
+func eraseModuleRegoVersionsFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, modules []string) error {
+ for _, module := range modules {
+ err := store.Write(ctx, txn, storage.RemoveOp, moduleInfoPath(module), nil)
+ if err := suppressNotFound(err); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ReadWasmMetadataFromStore will read Wasm module resolver metadata from the store.
+func ReadWasmMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]WasmResolver, error) {
+ path := wasmEntrypointsPath(name)
+ value, err := read(ctx, store, txn, path)
+ if err != nil {
+ return nil, err
+ }
+
+ bs, err := json.Marshal(value)
+ if err != nil {
+ return nil, errors.New("corrupt wasm manifest data")
+ }
+
+ var wasmMetadata []WasmResolver
+
+ err = util.UnmarshalJSON(bs, &wasmMetadata)
+ if err != nil {
+ return nil, errors.New("corrupt wasm manifest data")
+ }
+
+ return wasmMetadata, nil
+}
+
+// ReadWasmModulesFromStore will write Wasm module resolver metadata from the store.
+func ReadWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string][]byte, error) {
+ path := wasmModulePath(name)
+ value, err := read(ctx, store, txn, path)
+ if err != nil {
+ return nil, err
+ }
+
+ encodedModules, ok := value.(map[string]any)
+ if !ok {
+ return nil, errors.New("corrupt wasm modules")
+ }
+
+ rawModules := map[string][]byte{}
+ for path, enc := range encodedModules {
+ encStr, ok := enc.(string)
+ if !ok {
+ return nil, errors.New("corrupt wasm modules")
+ }
+ bs, err := base64.StdEncoding.DecodeString(encStr)
+ if err != nil {
+ return nil, err
+ }
+ rawModules[path] = bs
+ }
+ return rawModules, nil
+}
+
+// ReadBundleRootsFromStore returns the roots in the specified bundle.
+// If the bundle is not activated, this function will return
+// storage NotFound error.
+func ReadBundleRootsFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]string, error) {
+ value, err := read(ctx, store, txn, rootsPath(name))
+ if err != nil {
+ return nil, err
+ }
+
+ sl, ok := value.([]any)
+ if !ok {
+ return nil, errors.New("corrupt manifest roots")
+ }
+
+ roots := make([]string, len(sl))
+
+ for i := range sl {
+ roots[i], ok = sl[i].(string)
+ if !ok {
+ return nil, errors.New("corrupt manifest root")
+ }
+ }
+
+ return roots, nil
+}
+
+// ReadBundleRevisionFromStore returns the revision in the specified bundle.
+// If the bundle is not activated, this function will return
+// storage NotFound error.
+func ReadBundleRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (string, error) {
+ return readRevisionFromStore(ctx, store, txn, revisionPath(name))
+}
+
+func readRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) {
+ value, err := read(ctx, store, txn, path)
+ if err != nil {
+ return "", err
+ }
+
+ str, ok := value.(string)
+ if !ok {
+ return "", errors.New("corrupt manifest revision")
+ }
+
+ return str, nil
+}
+
+// ReadBundleMetadataFromStore returns the metadata in the specified bundle.
+// If the bundle is not activated, this function will return
+// storage NotFound error.
+func ReadBundleMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string]any, error) {
+ return readMetadataFromStore(ctx, store, txn, metadataPath(name))
+}
+
+func readMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (map[string]any, error) {
+ value, err := read(ctx, store, txn, path)
+ if err != nil {
+ return nil, suppressNotFound(err)
+ }
+
+ data, ok := value.(map[string]any)
+ if !ok {
+ return nil, errors.New("corrupt manifest metadata")
+ }
+
+ return data, nil
+}
+
+// ReadBundleEtagFromStore returns the etag for the specified bundle.
+// If the bundle is not activated, this function will return
+// storage NotFound error.
+func ReadBundleEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (string, error) {
+ return readEtagFromStore(ctx, store, txn, EtagStoragePath(name))
+}
+
+func readEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) {
+ value, err := read(ctx, store, txn, path)
+ if err != nil {
+ return "", err
+ }
+
+ str, ok := value.(string)
+ if !ok {
+ return "", errors.New("corrupt bundle etag")
+ }
+
+ return str, nil
+}
+
+// Activator is the interface expected for implementations that activate bundles.
+type Activator interface {
+ Activate(*ActivateOpts) error
+}
+
+// ActivateOpts defines options for the Activate API call.
+type ActivateOpts struct {
+ Ctx context.Context
+ Store storage.Store
+ Txn storage.Transaction
+ TxnCtx *storage.Context
+ Compiler *ast.Compiler
+ Metrics metrics.Metrics
+ Bundles map[string]*Bundle // Optional
+ ExtraModules map[string]*ast.Module // Optional
+ AuthorizationDecisionRef ast.Ref
+ ParserOptions ast.ParserOptions
+ Plugin string
+
+ legacy bool
+}
+
+type DefaultActivator struct{}
+
+func (*DefaultActivator) Activate(opts *ActivateOpts) error {
+ opts.legacy = false
+ return activateBundles(opts)
+}
+
+// Activate the bundle(s) by loading into the given Store. This will load policies, data, and record
+// the manifest in storage. The compiler provided will have had the polices compiled on it.
+func Activate(opts *ActivateOpts) error {
+ plugin := opts.Plugin
+
+ // For backwards compatibility, check if there is no plugin specified, and use default.
+ if plugin == "" {
+ // Invoke extension activator if supplied. Otherwise, use default.
+ if HasExtension() {
+ plugin = bundleExtActivator
+ } else {
+ plugin = defaultActivatorID
+ }
+ }
+
+ activator, err := GetActivator(plugin)
+ if err != nil {
+ return err
+ }
+
+ return activator.Activate(opts)
+}
+
+// DeactivateOpts defines options for the Deactivate API call
+type DeactivateOpts struct {
+ Ctx context.Context
+ Store storage.Store
+ Txn storage.Transaction
+ BundleNames map[string]struct{}
+ ParserOptions ast.ParserOptions
+}
+
+// Deactivate the bundle(s). This will erase associated data, policies, and the manifest entry from the store.
+func Deactivate(opts *DeactivateOpts) error {
+ erase := map[string]struct{}{}
+ for name := range opts.BundleNames {
+ roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name)
+ if suppressNotFound(err) != nil {
+ return err
+ }
+ for _, root := range roots {
+ erase[root] = struct{}{}
+ }
+ }
+ _, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, opts.ParserOptions, opts.BundleNames, erase)
+ return err
+}
+
+func activateBundles(opts *ActivateOpts) error {
+
+ // Build collections of bundle names, modules, and roots to erase
+ erase := map[string]struct{}{}
+ names := map[string]struct{}{}
+ deltaBundles := map[string]*Bundle{}
+ snapshotBundles := map[string]*Bundle{}
+
+ for name, b := range opts.Bundles {
+ if b.Type() == DeltaBundleType {
+ deltaBundles[name] = b
+ } else {
+ snapshotBundles[name] = b
+ names[name] = struct{}{}
+
+ roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name)
+ if suppressNotFound(err) != nil {
+ return err
+ }
+ for _, root := range roots {
+ erase[root] = struct{}{}
+ }
+
+ // Erase data at new roots to prepare for writing the new data
+ for _, root := range *b.Manifest.Roots {
+ erase[root] = struct{}{}
+ }
+ }
+ }
+
+ // Before changing anything make sure the roots don't collide with any
+ // other bundles that already are activated or other bundles being activated.
+ err := hasRootsOverlap(opts.Ctx, opts.Store, opts.Txn, opts.Bundles)
+ if err != nil {
+ return err
+ }
+
+ if len(deltaBundles) != 0 {
+ err := activateDeltaBundles(opts, deltaBundles)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Erase data and policies at new + old roots, and remove the old
+ // manifests before activating a new snapshot bundle.
+ remaining, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, opts.ParserOptions, names, erase)
+ if err != nil {
+ return err
+ }
+
+ // Validate data in bundle does not contain paths outside the bundle's roots.
+ for _, b := range snapshotBundles {
+
+ if b.lazyLoadingMode {
+
+ for _, item := range b.Raw {
+ path := filepath.ToSlash(item.Path)
+
+ if filepath.Base(path) == dataFile || filepath.Base(path) == yamlDataFile {
+ var val map[string]json.RawMessage
+ err = util.Unmarshal(item.Value, &val)
+ if err == nil {
+ err = doDFS(val, filepath.Dir(strings.Trim(path, "/")), *b.Manifest.Roots)
+ if err != nil {
+ return err
+ }
+ } else {
+ // Build an object for the value
+ p := getNormalizedPath(path)
+
+ if len(p) == 0 {
+ return errors.New("root value must be object")
+ }
+
+ // verify valid YAML or JSON value
+ var x any
+ err := util.Unmarshal(item.Value, &x)
+ if err != nil {
+ return err
+ }
+
+ value := item.Value
+ dir := map[string]json.RawMessage{}
+ for i := len(p) - 1; i > 0; i-- {
+ dir[p[i]] = value
+
+ bs, err := json.Marshal(dir)
+ if err != nil {
+ return err
+ }
+
+ value = bs
+ dir = map[string]json.RawMessage{}
+ }
+ dir[p[0]] = value
+
+ err = doDFS(dir, filepath.Dir(strings.Trim(path, "/")), *b.Manifest.Roots)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Compile the modules all at once to avoid having to re-do work.
+ remainingAndExtra := make(map[string]*ast.Module)
+ maps.Copy(remainingAndExtra, remaining)
+ maps.Copy(remainingAndExtra, opts.ExtraModules)
+
+ err = compileModules(opts.Compiler, opts.Metrics, snapshotBundles, remainingAndExtra, opts.legacy, opts.AuthorizationDecisionRef)
+ if err != nil {
+ return err
+ }
+
+ if err := writeDataAndModules(opts.Ctx, opts.Store, opts.Txn, opts.TxnCtx, snapshotBundles, opts.legacy, opts.ParserOptions.RegoVersion); err != nil {
+ return err
+ }
+
+ if err := ast.CheckPathConflicts(opts.Compiler, storage.NonEmpty(opts.Ctx, opts.Store, opts.Txn)); len(err) > 0 {
+ return err
+ }
+
+ for name, b := range snapshotBundles {
+ if err := writeManifestToStore(opts, name, b.Manifest); err != nil {
+ return err
+ }
+
+ if err := writeEtagToStore(opts, name, b.Etag); err != nil {
+ return err
+ }
+
+ if err := writeWasmModulesToStore(opts.Ctx, opts.Store, opts.Txn, name, b); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func doDFS(obj map[string]json.RawMessage, path string, roots []string) error {
+ if len(roots) == 1 && roots[0] == "" {
+ return nil
+ }
+
+ for key := range obj {
+
+ newPath := filepath.Join(strings.Trim(path, "/"), key)
+
+ // Note: filepath.Join can return paths with '\' separators, always use
+ // filepath.ToSlash to keep them normalized.
+ newPath = strings.TrimLeft(normalizePath(newPath), "/.")
+
+ contains := false
+ prefix := false
+ if RootPathsContain(roots, newPath) {
+ contains = true
+ } else {
+ for i := range roots {
+ if strings.HasPrefix(strings.Trim(roots[i], "/"), newPath) {
+ prefix = true
+ break
+ }
+ }
+ }
+
+ if !contains && !prefix {
+ return fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, newPath)
+ }
+
+ if contains {
+ continue
+ }
+
+ var next map[string]json.RawMessage
+ err := util.Unmarshal(obj[key], &next)
+ if err != nil {
+ return fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, newPath)
+ }
+
+ if err := doDFS(next, newPath, roots); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func activateDeltaBundles(opts *ActivateOpts, bundles map[string]*Bundle) error {
+
+ // Check that the manifest roots and wasm resolvers in the delta bundle
+ // match with those currently in the store
+ for name, b := range bundles {
+ value, err := opts.Store.Read(opts.Ctx, opts.Txn, ManifestStoragePath(name))
+ if err != nil {
+ if storage.IsNotFound(err) {
+ continue
+ }
+ return err
+ }
+
+ manifest, err := valueToManifest(value)
+ if err != nil {
+ return fmt.Errorf("corrupt manifest data: %w", err)
+ }
+
+ if !b.Manifest.equalWasmResolversAndRoots(manifest) {
+ return fmt.Errorf("delta bundle '%s' has wasm resolvers or manifest roots that are different from those in the store", name)
+ }
+ }
+
+ for _, b := range bundles {
+ err := applyPatches(opts.Ctx, opts.Store, opts.Txn, b.Patch.Data)
+ if err != nil {
+ return err
+ }
+ }
+
+ if err := ast.CheckPathConflicts(opts.Compiler, storage.NonEmpty(opts.Ctx, opts.Store, opts.Txn)); len(err) > 0 {
+ return err
+ }
+
+ for name, b := range bundles {
+ if err := writeManifestToStore(opts, name, b.Manifest); err != nil {
+ return err
+ }
+
+ if err := writeEtagToStore(opts, name, b.Etag); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func valueToManifest(v any) (Manifest, error) {
+ if astV, ok := v.(ast.Value); ok {
+ var err error
+ v, err = ast.JSON(astV)
+ if err != nil {
+ return Manifest{}, err
+ }
+ }
+
+ var manifest Manifest
+
+ bs, err := json.Marshal(v)
+ if err != nil {
+ return Manifest{}, err
+ }
+
+ err = util.UnmarshalJSON(bs, &manifest)
+ if err != nil {
+ return Manifest{}, err
+ }
+
+ return manifest, nil
+}
+
+// erase bundles by name and roots. This will clear all policies and data at its roots and remove its
+// manifest from storage.
+func eraseBundles(ctx context.Context, store storage.Store, txn storage.Transaction, parserOpts ast.ParserOptions, names map[string]struct{}, roots map[string]struct{}) (map[string]*ast.Module, error) {
+
+ if err := eraseData(ctx, store, txn, roots); err != nil {
+ return nil, err
+ }
+
+ remaining, removed, err := erasePolicies(ctx, store, txn, parserOpts, roots)
+ if err != nil {
+ return nil, err
+ }
+
+ for name := range names {
+ if err := EraseManifestFromStore(ctx, store, txn, name); suppressNotFound(err) != nil {
+ return nil, err
+ }
+
+ if err := LegacyEraseManifestFromStore(ctx, store, txn); suppressNotFound(err) != nil {
+ return nil, err
+ }
+
+ if err := eraseBundleEtagFromStore(ctx, store, txn, name); suppressNotFound(err) != nil {
+ return nil, err
+ }
+
+ if err := eraseWasmModulesFromStore(ctx, store, txn, name); suppressNotFound(err) != nil {
+ return nil, err
+ }
+ }
+
+ err = eraseModuleRegoVersionsFromStore(ctx, store, txn, removed)
+ if err != nil {
+ return nil, err
+ }
+
+ return remaining, nil
+}
+
+func eraseData(ctx context.Context, store storage.Store, txn storage.Transaction, roots map[string]struct{}) error {
+ for root := range roots {
+ path, ok := storage.ParsePathEscaped("/" + root)
+ if !ok {
+ return fmt.Errorf("manifest root path invalid: %v", root)
+ }
+
+ if len(path) > 0 {
+ if err := store.Write(ctx, txn, storage.RemoveOp, path, nil); suppressNotFound(err) != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+type moduleInfo struct {
+ RegoVersion ast.RegoVersion `json:"rego_version"`
+}
+
+func readModuleInfoFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) (map[string]moduleInfo, error) {
+ value, err := read(ctx, store, txn, ModulesInfoBasePath)
+ if suppressNotFound(err) != nil {
+ return nil, err
+ }
+
+ if value == nil {
+ return nil, nil
+ }
+
+ if m, ok := value.(map[string]any); ok {
+ versions := make(map[string]moduleInfo, len(m))
+
+ for k, v := range m {
+ if m0, ok := v.(map[string]any); ok {
+ if ver, ok := m0["rego_version"]; ok {
+ if vs, ok := ver.(json.Number); ok {
+ i, err := vs.Int64()
+ if err != nil {
+ return nil, errors.New("corrupt rego version")
+ }
+ versions[k] = moduleInfo{RegoVersion: ast.RegoVersionFromInt(int(i))}
+ }
+ }
+ }
+ }
+ return versions, nil
+ }
+
+ return nil, errors.New("corrupt rego version")
+}
+
+func erasePolicies(ctx context.Context, store storage.Store, txn storage.Transaction, parserOpts ast.ParserOptions, roots map[string]struct{}) (map[string]*ast.Module, []string, error) {
+
+ ids, err := store.ListPolicies(ctx, txn)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ modulesInfo, err := readModuleInfoFromStore(ctx, store, txn)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to read module info from store: %w", err)
+ }
+
+ getRegoVersion := func(modId string) (ast.RegoVersion, bool) {
+ info, ok := modulesInfo[modId]
+ if !ok {
+ return ast.RegoUndefined, false
+ }
+ return info.RegoVersion, true
+ }
+
+ remaining := map[string]*ast.Module{}
+ var removed []string
+
+ for _, id := range ids {
+ bs, err := store.GetPolicy(ctx, txn, id)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ parserOptsCpy := parserOpts
+ if regoVersion, ok := getRegoVersion(id); ok {
+ parserOptsCpy.RegoVersion = regoVersion
+ }
+
+ module, err := ast.ParseModuleWithOpts(id, string(bs), parserOptsCpy)
+ if err != nil {
+ return nil, nil, err
+ }
+ path, err := module.Package.Path.Ptr()
+ if err != nil {
+ return nil, nil, err
+ }
+ deleted := false
+ for root := range roots {
+ if RootPathsContain([]string{root}, path) {
+ if err := store.DeletePolicy(ctx, txn, id); err != nil {
+ return nil, nil, err
+ }
+ deleted = true
+ break
+ }
+ }
+
+ if deleted {
+ removed = append(removed, id)
+ } else {
+ remaining[id] = module
+ }
+ }
+
+ return remaining, removed, nil
+}
+
+func writeManifestToStore(opts *ActivateOpts, name string, manifest Manifest) error {
+ // Always write manifests to the named location. If the plugin is in the older style config
+ // then also write to the old legacy unnamed location.
+ if err := WriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, name, manifest); err != nil {
+ return err
+ }
+
+ if opts.legacy {
+ if err := LegacyWriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, manifest); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func writeEtagToStore(opts *ActivateOpts, name, etag string) error {
+ if err := WriteEtagToStore(opts.Ctx, opts.Store, opts.Txn, name, etag); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func writeModuleRegoVersionToStore(ctx context.Context, store storage.Store, txn storage.Transaction, b *Bundle,
+ mf ModuleFile, storagePath string, runtimeRegoVersion ast.RegoVersion) error {
+
+ var regoVersion ast.RegoVersion
+ if mf.Parsed != nil {
+ regoVersion = mf.Parsed.RegoVersion()
+ }
+
+ if regoVersion == ast.RegoUndefined {
+ var err error
+ regoVersion, err = b.RegoVersionForFile(mf.Path, runtimeRegoVersion)
+ if err != nil {
+ return fmt.Errorf("failed to get rego version for module '%s' in bundle: %w", mf.Path, err)
+ }
+ }
+
+ if regoVersion != ast.RegoUndefined && regoVersion != runtimeRegoVersion {
+ if err := write(ctx, store, txn, moduleRegoVersionPath(storagePath), regoVersion.Int()); err != nil {
+ return fmt.Errorf("failed to write rego version for module '%s': %w", storagePath, err)
+ }
+ }
+ return nil
+}
+
+func writeDataAndModules(ctx context.Context, store storage.Store, txn storage.Transaction, txnCtx *storage.Context, bundles map[string]*Bundle, legacy bool, runtimeRegoVersion ast.RegoVersion) error {
+ params := storage.WriteParams
+ params.Context = txnCtx
+
+ for name, b := range bundles {
+ if len(b.Raw) == 0 {
+ // Write data from each new bundle into the store. Only write under the
+ // roots contained in their manifest.
+ if err := writeData(ctx, store, txn, *b.Manifest.Roots, b.Data); err != nil {
+ return err
+ }
+
+ for _, mf := range b.Modules {
+ var path string
+
+ // For backwards compatibility, in legacy mode, upsert policies to
+ // the unprefixed path.
+ if legacy {
+ path = mf.Path
+ } else {
+ path = modulePathWithPrefix(name, mf.Path)
+ }
+
+ if err := store.UpsertPolicy(ctx, txn, path, mf.Raw); err != nil {
+ return err
+ }
+
+ if err := writeModuleRegoVersionToStore(ctx, store, txn, b, mf, path, runtimeRegoVersion); err != nil {
+ return err
+ }
+ }
+ } else {
+ params.BasePaths = *b.Manifest.Roots
+
+ err := store.Truncate(ctx, txn, params, NewIterator(b.Raw))
+ if err != nil {
+ return fmt.Errorf("store truncate failed for bundle '%s': %v", name, err)
+ }
+
+ for _, f := range b.Raw {
+ if strings.HasSuffix(f.Path, RegoExt) {
+ p, err := getFileStoragePath(f.Path)
+ if err != nil {
+ return fmt.Errorf("failed get storage path for module '%s' in bundle '%s': %w", f.Path, name, err)
+ }
+
+ if m := f.module; m != nil {
+ // 'f.module.Path' contains the module's path as it relates to the bundle root, and can be used for looking up the rego-version.
+ // 'f.Path' can differ, based on how the bundle reader was initialized.
+ if err := writeModuleRegoVersionToStore(ctx, store, txn, b, *m, p.String(), runtimeRegoVersion); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func writeData(ctx context.Context, store storage.Store, txn storage.Transaction, roots []string, data map[string]any) error {
+ for _, root := range roots {
+ path, ok := storage.ParsePathEscaped("/" + root)
+ if !ok {
+ return fmt.Errorf("manifest root path invalid: %v", root)
+ }
+ if value, ok := lookup(path, data); ok {
+ if len(path) > 0 {
+ if err := storage.MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil {
+ return err
+ }
+ }
+ if err := store.Write(ctx, txn, storage.AddOp, path, value); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func compileModules(compiler *ast.Compiler, m metrics.Metrics, bundles map[string]*Bundle, extraModules map[string]*ast.Module, legacy bool, authorizationDecisionRef ast.Ref) error {
+
+ m.Timer(metrics.RegoModuleCompile).Start()
+ defer m.Timer(metrics.RegoModuleCompile).Stop()
+
+ modules := map[string]*ast.Module{}
+
+ // preserve any modules already on the compiler
+ maps.Copy(modules, compiler.Modules)
+
+ // preserve any modules passed in from the store
+ maps.Copy(modules, extraModules)
+
+ // include all the new bundle modules
+ for bundleName, b := range bundles {
+ if legacy {
+ for _, mf := range b.Modules {
+ modules[mf.Path] = mf.Parsed
+ }
+ } else {
+ maps.Copy(modules, b.ParsedModules(bundleName))
+ }
+ }
+
+ if compiler.Compile(modules); compiler.Failed() {
+ return compiler.Errors
+ }
+
+ if authorizationDecisionRef.Equal(ast.EmptyRef()) {
+ return nil
+ }
+
+ return iCompiler.VerifyAuthorizationPolicySchema(compiler, authorizationDecisionRef)
+}
+
+func writeModules(ctx context.Context, store storage.Store, txn storage.Transaction, compiler *ast.Compiler, m metrics.Metrics, bundles map[string]*Bundle, extraModules map[string]*ast.Module, legacy bool) error {
+
+ m.Timer(metrics.RegoModuleCompile).Start()
+ defer m.Timer(metrics.RegoModuleCompile).Stop()
+
+ modules := map[string]*ast.Module{}
+
+ // preserve any modules already on the compiler
+ maps.Copy(modules, compiler.Modules)
+
+ // preserve any modules passed in from the store
+ maps.Copy(modules, extraModules)
+
+ // include all the new bundle modules
+ for bundleName, b := range bundles {
+ if legacy {
+ for _, mf := range b.Modules {
+ modules[mf.Path] = mf.Parsed
+ }
+ } else {
+ maps.Copy(modules, b.ParsedModules(bundleName))
+ }
+ }
+
+ if compiler.Compile(modules); compiler.Failed() {
+ return compiler.Errors
+ }
+ for bundleName, b := range bundles {
+ for _, mf := range b.Modules {
+ var path string
+
+ // For backwards compatibility, in legacy mode, upsert policies to
+ // the unprefixed path.
+ if legacy {
+ path = mf.Path
+ } else {
+ path = modulePathWithPrefix(bundleName, mf.Path)
+ }
+
+ if err := store.UpsertPolicy(ctx, txn, path, mf.Raw); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func lookup(path storage.Path, data map[string]any) (any, bool) {
+ if len(path) == 0 {
+ return data, true
+ }
+ for i := range len(path) - 1 {
+ value, ok := data[path[i]]
+ if !ok {
+ return nil, false
+ }
+ obj, ok := value.(map[string]any)
+ if !ok {
+ return nil, false
+ }
+ data = obj
+ }
+ value, ok := data[path[len(path)-1]]
+ return value, ok
+}
+
+func hasRootsOverlap(ctx context.Context, store storage.Store, txn storage.Transaction, newBundles map[string]*Bundle) error {
+ storeBundles, err := ReadBundleNamesFromStore(ctx, store, txn)
+ if suppressNotFound(err) != nil {
+ return err
+ }
+
+ allRoots := map[string][]string{}
+ bundlesWithEmptyRoots := map[string]bool{}
+
+ // Build a map of roots for existing bundles already in the system
+ for _, name := range storeBundles {
+ roots, err := ReadBundleRootsFromStore(ctx, store, txn, name)
+ if suppressNotFound(err) != nil {
+ return err
+ }
+ allRoots[name] = roots
+ if slices.Contains(roots, "") {
+ bundlesWithEmptyRoots[name] = true
+ }
+ }
+
+ // Add in any bundles that are being activated, overwrite existing roots
+ // with new ones where bundles are in both groups.
+ for name, bundle := range newBundles {
+ allRoots[name] = *bundle.Manifest.Roots
+ if slices.Contains(*bundle.Manifest.Roots, "") {
+ bundlesWithEmptyRoots[name] = true
+ }
+ }
+
+ // Now check for each new bundle if it conflicts with any of the others
+ collidingBundles := map[string]bool{}
+ conflictSet := map[string]bool{}
+ for name, bundle := range newBundles {
+ for otherBundle, otherRoots := range allRoots {
+ if name == otherBundle {
+ // Skip the current bundle being checked
+ continue
+ }
+
+ // Compare the "new" roots with other existing (or a different bundles new roots)
+ for _, newRoot := range *bundle.Manifest.Roots {
+ for _, otherRoot := range otherRoots {
+ if !RootPathsOverlap(newRoot, otherRoot) {
+ continue
+ }
+
+ collidingBundles[name] = true
+ collidingBundles[otherBundle] = true
+
+ // Different message required if the roots are same
+ if newRoot == otherRoot {
+ conflictSet[fmt.Sprintf("root %s is in multiple bundles", newRoot)] = true
+ } else {
+ paths := []string{newRoot, otherRoot}
+ sort.Strings(paths)
+ conflictSet[fmt.Sprintf("%s overlaps %s", paths[0], paths[1])] = true
+ }
+ }
+ }
+ }
+ }
+
+ if len(collidingBundles) == 0 {
+ return nil
+ }
+
+ bundleNames := strings.Join(util.KeysSorted(collidingBundles), ", ")
+
+ if len(bundlesWithEmptyRoots) > 0 {
+ return fmt.Errorf(
+ "bundles [%s] have overlapping roots and cannot be activated simultaneously because bundle(s) [%s] specify empty root paths ('') which overlap with any other bundle root",
+ bundleNames,
+ strings.Join(util.KeysSorted(bundlesWithEmptyRoots), ", "),
+ )
+ }
+
+ return fmt.Errorf("detected overlapping roots in manifests for these bundles: [%s] (%s)", bundleNames, strings.Join(util.KeysSorted(conflictSet), ", "))
+}
+
+func applyPatches(ctx context.Context, store storage.Store, txn storage.Transaction, patches []PatchOperation) error {
+ for _, pat := range patches {
+
+ // construct patch path
+ path, ok := patch.ParsePatchPathEscaped("/" + strings.Trim(pat.Path, "/"))
+ if !ok {
+ return errors.New("error parsing patch path")
+ }
+
+ var op storage.PatchOp
+ switch pat.Op {
+ case "upsert":
+ op = storage.AddOp
+
+ _, err := store.Read(ctx, txn, path[:len(path)-1])
+ if err != nil {
+ if !storage.IsNotFound(err) {
+ return err
+ }
+
+ if err := storage.MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil {
+ return err
+ }
+ }
+ case "remove":
+ op = storage.RemoveOp
+ case "replace":
+ op = storage.ReplaceOp
+ default:
+ return fmt.Errorf("bad patch operation: %v", pat.Op)
+ }
+
+ // apply the patch
+ if err := store.Write(ctx, txn, op, path, pat.Value); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Helpers for the older single (unnamed) bundle style manifest storage.
+
+// LegacyManifestStoragePath is the older unnamed bundle path for manifests to be stored.
+// Deprecated: Use ManifestStoragePath and named bundles instead.
+var legacyManifestStoragePath = storage.MustParsePath("/system/bundle/manifest")
+var legacyRevisionStoragePath = append(legacyManifestStoragePath, "revision")
+
+// LegacyWriteManifestToStore will write the bundle manifest to the older single (unnamed) bundle manifest location.
+// Deprecated: Use WriteManifestToStore and named bundles instead.
+func LegacyWriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, manifest Manifest) error {
+ return write(ctx, store, txn, legacyManifestStoragePath, manifest)
+}
+
+// LegacyEraseManifestFromStore will erase the bundle manifest from the older single (unnamed) bundle manifest location.
+// Deprecated: Use WriteManifestToStore and named bundles instead.
+func LegacyEraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) error {
+ err := store.Write(ctx, txn, storage.RemoveOp, legacyManifestStoragePath, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// LegacyReadRevisionFromStore will read the bundle manifest revision from the older single (unnamed) bundle manifest location.
+// Deprecated: Use ReadBundleRevisionFromStore and named bundles instead.
+func LegacyReadRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) (string, error) {
+ return readRevisionFromStore(ctx, store, txn, legacyRevisionStoragePath)
+}
+
+// ActivateLegacy calls Activate for the bundles but will also write their manifest to the older unnamed store location.
+// Deprecated: Use Activate with named bundles instead.
+func ActivateLegacy(opts *ActivateOpts) error {
+ opts.legacy = true
+ return activateBundles(opts)
+}
+
+// GetActivator returns the Activator registered under the given id
+func GetActivator(id string) (Activator, error) {
+ activator, ok := activators[id]
+
+ if !ok {
+ return nil, fmt.Errorf("no activator exists under id %s", id)
+ }
+
+ return activator, nil
+}
+
+// RegisterActivator registers a bundle Activator under the given id.
+// The id value can later be referenced in ActivateOpts.Plugin to specify
+// which activator should be used for that bundle activation operation.
+// Note: This must be called *before* RegisterDefaultBundleActivator.
+func RegisterActivator(id string, a Activator) {
+ activatorMtx.Lock()
+ defer activatorMtx.Unlock()
+
+ if id == defaultActivatorID {
+ panic("cannot use reserved activator id, use a different id")
+ }
+
+ activators[id] = a
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/verify.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/verify.go
new file mode 100644
index 0000000000..82e308b49e
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/verify.go
@@ -0,0 +1,290 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package bundle provide helpers that assist in the bundle signature verification process
+package bundle
+
+import (
+ "bytes"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jws/jwsbb"
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+// parseVerificationKey converts a string key to the appropriate type for jws.Verify
+func parseVerificationKey(keyData string, alg jwa.SignatureAlgorithm) (any, error) {
+ // For HMAC algorithms, return the key as bytes
+ if alg == jwa.HS256() || alg == jwa.HS384() || alg == jwa.HS512() {
+ return []byte(keyData), nil
+ }
+
+ // For RSA/ECDSA algorithms, try to parse as PEM first
+ block, _ := pem.Decode([]byte(keyData))
+ if block != nil {
+ switch block.Type {
+ case "RSA PUBLIC KEY":
+ return x509.ParsePKCS1PublicKey(block.Bytes)
+ case "PUBLIC KEY":
+ return x509.ParsePKIXPublicKey(block.Bytes)
+ case "RSA PRIVATE KEY":
+ return x509.ParsePKCS1PrivateKey(block.Bytes)
+ case "PRIVATE KEY":
+ return x509.ParsePKCS8PrivateKey(block.Bytes)
+ case "EC PRIVATE KEY":
+ return x509.ParseECPrivateKey(block.Bytes)
+ case "CERTIFICATE":
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ return cert.PublicKey, nil
+ }
+ }
+
+ return nil, errors.New("failed to parse PEM block containing the key")
+}
+
+const defaultVerifierID = "_default"
+
+var verifiers map[string]Verifier
+
+// Verifier is the interface expected for implementations that verify bundle signatures.
+type Verifier interface {
+ VerifyBundleSignature(SignaturesConfig, *VerificationConfig) (map[string]FileInfo, error)
+}
+
+// VerifyBundleSignature will retrieve the Verifier implementation based
+// on the Plugin specified in SignaturesConfig, and call its implementation
+// of VerifyBundleSignature. VerifyBundleSignature verifies the bundle signature
+// using the given public keys or secret. If a signature is verified, it keeps
+// track of the files specified in the JWT payload
+func VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) {
+ // default implementation does not return a nil for map, so don't
+ // do it here either
+ files := make(map[string]FileInfo)
+ var plugin string
+ // for backwards compatibility, check if there is no plugin specified, and use default
+ if sc.Plugin == "" {
+ plugin = defaultVerifierID
+ } else {
+ plugin = sc.Plugin
+ }
+ verifier, err := GetVerifier(plugin)
+ if err != nil {
+ return files, err
+ }
+ return verifier.VerifyBundleSignature(sc, bvc)
+}
+
+// DefaultVerifier is the default bundle verification implementation. It verifies bundles by checking
+// the JWT signature using a locally-accessible public key.
+type DefaultVerifier struct{}
+
+// VerifyBundleSignature verifies the bundle signature using the given public keys or secret.
+// If a signature is verified, it keeps track of the files specified in the JWT payload
+func (*DefaultVerifier) VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) {
+ files := make(map[string]FileInfo)
+
+ if len(sc.Signatures) == 0 {
+ return files, errors.New(".signatures.json: missing JWT (expected exactly one)")
+ }
+
+ if len(sc.Signatures) > 1 {
+ return files, errors.New(".signatures.json: multiple JWTs not supported (expected exactly one)")
+ }
+
+ for _, token := range sc.Signatures {
+ payload, err := verifyJWTSignature(token, bvc)
+ if err != nil {
+ return files, err
+ }
+
+ for _, file := range payload.Files {
+ files[file.Name] = file
+ }
+ }
+ return files, nil
+}
+
+func verifyJWTSignature(token string, bvc *VerificationConfig) (*DecodedSignature, error) {
+ tokbytes := []byte(token)
+ hdrb64, payloadb64, signatureb64, err := jwsbb.SplitCompact(tokbytes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to split compact JWT: %w", err)
+ }
+
+ // check for the id of the key to use for JWT signature verification
+ // first in the OPA config. If not found, then check the JWT kid.
+ keyID := bvc.KeyID
+ if keyID == "" {
+ // Use jwsbb.Header to access into the "kid" header field, which we will
+ // use to determine the key to use for verification.
+ hdr := jwsbb.HeaderParseCompact(hdrb64)
+ v, err := jwsbb.HeaderGetString(hdr, "kid")
+ switch {
+ case err == nil:
+ // err == nils means we found the key ID in the header
+ keyID = v
+ case errors.Is(err, jwsbb.ErrHeaderNotFound()):
+ // no "kid" in the header. no op.
+ default:
+ // some other error occurred while trying to extract the key ID
+ return nil, fmt.Errorf("failed to extract key ID from headers: %w", err)
+ }
+ }
+
+ // Because we want to fallback to ds.KeyID when we can't find the
+ // keyID, we need to parse the payload here already.
+ //
+ // (lestrrat) Whoa, you're going to trust the payload before you
+ // verify the signature? Even if it's for backwrds compatibility,
+ // Is this OK?
+ decoder := base64.RawURLEncoding
+ payload := make([]byte, decoder.DecodedLen(len(payloadb64)))
+ if _, err := decoder.Decode(payload, payloadb64); err != nil {
+ return nil, fmt.Errorf("failed to base64 decode JWT payload: %w", err)
+ }
+
+ var ds DecodedSignature
+ if err := json.Unmarshal(payload, &ds); err != nil {
+ return nil, err
+ }
+
+ // If header has no key id, check the deprecated key claim.
+ if keyID == "" {
+ keyID = ds.KeyID
+ }
+
+ // If we still don't have a keyID, we cannot proceed
+ if keyID == "" {
+ return nil, errors.New("verification key ID is empty")
+ }
+
+ // now that we have the keyID, fetch the actual key
+ keyConfig, err := bvc.GetPublicKey(keyID)
+ if err != nil {
+ return nil, err
+ }
+
+ alg, ok := jwa.LookupSignatureAlgorithm(keyConfig.Algorithm)
+ if !ok {
+ return nil, fmt.Errorf("unknown signature algorithm: %s", keyConfig.Algorithm)
+ }
+
+ // Parse the key into the appropriate type
+ parsedKey, err := parseVerificationKey(keyConfig.Key, alg)
+ if err != nil {
+ return nil, err
+ }
+
+ signature := make([]byte, decoder.DecodedLen(len(signatureb64)))
+ if _, err = decoder.Decode(signature, signatureb64); err != nil {
+ return nil, fmt.Errorf("failed to base64 decode JWT signature: %w", err)
+ }
+
+ signbuf := make([]byte, len(hdrb64)+1+len(payloadb64))
+ copy(signbuf, hdrb64)
+ signbuf[len(hdrb64)] = '.'
+ copy(signbuf[len(hdrb64)+1:], payloadb64)
+
+ if err := jwsbb.Verify(parsedKey, alg.String(), signbuf, signature); err != nil {
+ return nil, fmt.Errorf("failed to verify JWT signature: %w", err)
+ }
+
+ // verify the scope
+ scope := bvc.Scope
+ if scope == "" {
+ scope = keyConfig.Scope
+ }
+
+ if ds.Scope != scope {
+ return nil, errors.New("scope mismatch")
+ }
+ return &ds, nil
+}
+
+// VerifyBundleFile verifies the hash of a file in the bundle matches to that provided in the bundle's signature
+func VerifyBundleFile(path string, data bytes.Buffer, files map[string]FileInfo) error {
+ var file FileInfo
+ var ok bool
+
+ if file, ok = files[path]; !ok {
+ return fmt.Errorf("file %v not included in bundle signature", path)
+ }
+
+ if file.Algorithm == "" {
+ return fmt.Errorf("no hashing algorithm provided for file %v", path)
+ }
+
+ hash, err := NewSignatureHasher(HashingAlgorithm(file.Algorithm))
+ if err != nil {
+ return err
+ }
+
+ // hash the file content
+ // For unstructured files, hash the byte stream of the file
+ // For structured files, read the byte stream and parse into a JSON structure;
+ // then recursively order the fields of all objects alphabetically and then apply
+ // the hash function to result to compute the hash. This ensures that the digital signature is
+ // independent of whitespace and other non-semantic JSON features.
+ var value any
+ if IsStructuredDoc(path) {
+ err := util.Unmarshal(data.Bytes(), &value)
+ if err != nil {
+ return err
+ }
+ } else {
+ value = data.Bytes()
+ }
+
+ bs, err := hash.HashFile(value)
+ if err != nil {
+ return err
+ }
+
+ // compare file hash with same file in the JWT payloads
+ fb, err := hex.DecodeString(file.Hash)
+ if err != nil {
+ return err
+ }
+
+ if !bytes.Equal(fb, bs) {
+ return fmt.Errorf("%v: digest mismatch (want: %x, got: %x)", path, fb, bs)
+ }
+
+ delete(files, path)
+ return nil
+}
+
+// GetVerifier returns the Verifier registered under the given id
+func GetVerifier(id string) (Verifier, error) {
+ verifier, ok := verifiers[id]
+ if !ok {
+ return nil, fmt.Errorf("no verifier exists under id %s", id)
+ }
+ return verifier, nil
+}
+
+// RegisterVerifier registers a Verifier under the given id
+func RegisterVerifier(id string, v Verifier) error {
+ if id == defaultVerifierID {
+ return fmt.Errorf("verifier id %s is reserved, use a different id", id)
+ }
+ verifiers[id] = v
+ return nil
+}
+
+func init() {
+ verifiers = map[string]Verifier{
+ defaultVerifierID: &DefaultVerifier{},
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/capabilities/capabilities.go b/vendor/github.com/open-policy-agent/opa/v1/capabilities/capabilities.go
new file mode 100644
index 0000000000..5b0bb1ea52
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/capabilities/capabilities.go
@@ -0,0 +1,18 @@
+// Copyright 2021 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+//go:build go1.16
+// +build go1.16
+
+package capabilities
+
+import (
+ v0 "github.com/open-policy-agent/opa/capabilities"
+)
+
+// FS contains the embedded capabilities/ directory of the built version,
+// which has all the capabilities of previous versions:
+// "v0.18.0.json" contains the capabilities JSON of version v0.18.0, etc
+
+var FS = v0.FS
diff --git a/vendor/github.com/open-policy-agent/opa/config/config.go b/vendor/github.com/open-policy-agent/opa/v1/config/config.go
similarity index 60%
rename from vendor/github.com/open-policy-agent/opa/config/config.go
rename to vendor/github.com/open-policy-agent/opa/v1/config/config.go
index 87ab109113..1912d1f38c 100644
--- a/vendor/github.com/open-policy-agent/opa/config/config.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/config/config.go
@@ -7,19 +7,74 @@ package config
import (
"encoding/json"
+ "errors"
"fmt"
+ "maps"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
- "github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/internal/ref"
- "github.com/open-policy-agent/opa/util"
- "github.com/open-policy-agent/opa/version"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/util"
+ "github.com/open-policy-agent/opa/v1/version"
)
+// ServerConfig represents the different server configuration options.
+type ServerConfig struct {
+ Metrics json.RawMessage `json:"metrics,omitempty"`
+
+ Encoding json.RawMessage `json:"encoding,omitempty"`
+ Decoding json.RawMessage `json:"decoding,omitempty"`
+}
+
+// Clone creates a deep copy of ServerConfig.
+func (s *ServerConfig) Clone() *ServerConfig {
+ if s == nil {
+ return nil
+ }
+
+ clone := &ServerConfig{}
+
+ if s.Encoding != nil {
+ clone.Encoding = make(json.RawMessage, len(s.Encoding))
+ copy(clone.Encoding, s.Encoding)
+ }
+ if s.Decoding != nil {
+ clone.Decoding = make(json.RawMessage, len(s.Decoding))
+ copy(clone.Decoding, s.Decoding)
+ }
+ if s.Metrics != nil {
+ clone.Metrics = make(json.RawMessage, len(s.Metrics))
+ copy(clone.Metrics, s.Metrics)
+ }
+
+ return clone
+}
+
+// StorageConfig represents Config's storage options.
+type StorageConfig struct {
+ Disk json.RawMessage `json:"disk,omitempty"`
+}
+
+// Clone creates a deep copy of StorageConfig.
+func (s *StorageConfig) Clone() *StorageConfig {
+ if s == nil {
+ return nil
+ }
+
+ clone := &StorageConfig{}
+
+ if s.Disk != nil {
+ clone.Disk = make(json.RawMessage, len(s.Disk))
+ copy(clone.Disk, s.Disk)
+ }
+
+ return clone
+}
+
// Config represents the configuration file that OPA can be started with.
type Config struct {
Services json.RawMessage `json:"services,omitempty"`
@@ -37,15 +92,9 @@ type Config struct {
NDBuiltinCache bool `json:"nd_builtin_cache,omitempty"`
PersistenceDirectory *string `json:"persistence_directory,omitempty"`
DistributedTracing json.RawMessage `json:"distributed_tracing,omitempty"`
- Server *struct {
- Encoding json.RawMessage `json:"encoding,omitempty"`
- Decoding json.RawMessage `json:"decoding,omitempty"`
- Metrics json.RawMessage `json:"metrics,omitempty"`
- } `json:"server,omitempty"`
- Storage *struct {
- Disk json.RawMessage `json:"disk,omitempty"`
- } `json:"storage,omitempty"`
- Extra map[string]json.RawMessage `json:"-"`
+ Server *ServerConfig `json:"server,omitempty"`
+ Storage *StorageConfig `json:"storage,omitempty"`
+ Extra map[string]json.RawMessage `json:"-"`
}
// ParseConfig returns a valid Config object with defaults injected. The id
@@ -98,7 +147,7 @@ func (c Config) PluginNames() (result []string) {
// PluginsEnabled returns true if one or more plugin features are enabled.
//
-// Deprecated. Use PluginNames instead.
+// Deprecated: Use PluginNames instead.
func (c Config) PluginsEnabled() bool {
return c.Bundle != nil || c.Bundles != nil || c.DecisionLogs != nil || c.Status != nil || len(c.Plugins) > 0
}
@@ -121,38 +170,6 @@ func (c Config) NDBuiltinCacheEnabled() bool {
return c.NDBuiltinCache
}
-func (c *Config) validateAndInjectDefaults(id string) error {
-
- if c.DefaultDecision == nil {
- s := defaultDecisionPath
- c.DefaultDecision = &s
- }
-
- _, err := ref.ParseDataPath(*c.DefaultDecision)
- if err != nil {
- return err
- }
-
- if c.DefaultAuthorizationDecision == nil {
- s := defaultAuthorizationDecisionPath
- c.DefaultAuthorizationDecision = &s
- }
-
- _, err = ref.ParseDataPath(*c.DefaultAuthorizationDecision)
- if err != nil {
- return err
- }
-
- if c.Labels == nil {
- c.Labels = map[string]string{}
- }
-
- c.Labels["id"] = id
- c.Labels["version"] = version.Version
-
- return nil
-}
-
// GetPersistenceDirectory returns the configured persistence directory, or $PWD/.opa if none is configured
func (c Config) GetPersistenceDirectory() (string, error) {
if c.PersistenceDirectory == nil {
@@ -167,13 +184,13 @@ func (c Config) GetPersistenceDirectory() (string, error) {
// ActiveConfig returns OPA's active configuration
// with the credentials and crypto keys removed
-func (c *Config) ActiveConfig() (interface{}, error) {
+func (c *Config) ActiveConfig() (any, error) {
bs, err := json.Marshal(c)
if err != nil {
return nil, err
}
- var result map[string]interface{}
+ var result map[string]any
if err := util.UnmarshalJSON(bs, &result); err != nil {
return nil, err
}
@@ -196,11 +213,128 @@ func (c *Config) ActiveConfig() (interface{}, error) {
return result, nil
}
-func removeServiceCredentials(x interface{}) error {
+// Clone creates a deep copy of the Config struct
+func (c *Config) Clone() *Config {
+ if c == nil {
+ return nil
+ }
+
+ clone := &Config{
+ NDBuiltinCache: c.NDBuiltinCache,
+ Server: c.Server.Clone(),
+ Storage: c.Storage.Clone(),
+ Labels: maps.Clone(c.Labels),
+ }
+
+ if c.Services != nil {
+ clone.Services = make(json.RawMessage, len(c.Services))
+ copy(clone.Services, c.Services)
+ }
+ if c.Discovery != nil {
+ clone.Discovery = make(json.RawMessage, len(c.Discovery))
+ copy(clone.Discovery, c.Discovery)
+ }
+ if c.Bundle != nil {
+ clone.Bundle = make(json.RawMessage, len(c.Bundle))
+ copy(clone.Bundle, c.Bundle)
+ }
+ if c.Bundles != nil {
+ clone.Bundles = make(json.RawMessage, len(c.Bundles))
+ copy(clone.Bundles, c.Bundles)
+ }
+ if c.DecisionLogs != nil {
+ clone.DecisionLogs = make(json.RawMessage, len(c.DecisionLogs))
+ copy(clone.DecisionLogs, c.DecisionLogs)
+ }
+ if c.Status != nil {
+ clone.Status = make(json.RawMessage, len(c.Status))
+ copy(clone.Status, c.Status)
+ }
+ if c.Keys != nil {
+ clone.Keys = make(json.RawMessage, len(c.Keys))
+ copy(clone.Keys, c.Keys)
+ }
+ if c.Caching != nil {
+ clone.Caching = make(json.RawMessage, len(c.Caching))
+ copy(clone.Caching, c.Caching)
+ }
+ if c.DistributedTracing != nil {
+ clone.DistributedTracing = make(json.RawMessage, len(c.DistributedTracing))
+ copy(clone.DistributedTracing, c.DistributedTracing)
+ }
+
+ if c.DefaultDecision != nil {
+ s := *c.DefaultDecision
+ clone.DefaultDecision = &s
+ }
+ if c.DefaultAuthorizationDecision != nil {
+ s := *c.DefaultAuthorizationDecision
+ clone.DefaultAuthorizationDecision = &s
+ }
+ if c.PersistenceDirectory != nil {
+ s := *c.PersistenceDirectory
+ clone.PersistenceDirectory = &s
+ }
+
+ if c.Plugins != nil {
+ clone.Plugins = make(map[string]json.RawMessage, len(c.Plugins))
+ for k, v := range c.Plugins {
+ if v != nil {
+ clone.Plugins[k] = make(json.RawMessage, len(v))
+ copy(clone.Plugins[k], v)
+ }
+ }
+ }
+
+ if c.Extra != nil {
+ clone.Extra = make(map[string]json.RawMessage, len(c.Extra))
+ for k, v := range c.Extra {
+ if v != nil {
+ clone.Extra[k] = make(json.RawMessage, len(v))
+ copy(clone.Extra[k], v)
+ }
+ }
+ }
+
+ return clone
+}
+
+func (c *Config) validateAndInjectDefaults(id string) error {
+ if c.DefaultDecision == nil {
+ s := defaultDecisionPath
+ c.DefaultDecision = &s
+ }
+
+ _, err := ref.ParseDataPath(*c.DefaultDecision)
+ if err != nil {
+ return err
+ }
+
+ if c.DefaultAuthorizationDecision == nil {
+ s := defaultAuthorizationDecisionPath
+ c.DefaultAuthorizationDecision = &s
+ }
+
+ _, err = ref.ParseDataPath(*c.DefaultAuthorizationDecision)
+ if err != nil {
+ return err
+ }
+
+ if c.Labels == nil {
+ c.Labels = map[string]string{}
+ }
+
+ c.Labels["id"] = id
+ c.Labels["version"] = version.Version
+
+ return nil
+}
+
+func removeServiceCredentials(x any) error {
switch x := x.(type) {
case nil:
return nil
- case []interface{}:
+ case []any:
for _, v := range x {
err := removeKey(v, "credentials")
if err != nil {
@@ -208,7 +342,7 @@ func removeServiceCredentials(x interface{}) error {
}
}
- case map[string]interface{}:
+ case map[string]any:
for _, v := range x {
err := removeKey(v, "credentials")
if err != nil {
@@ -222,11 +356,11 @@ func removeServiceCredentials(x interface{}) error {
return nil
}
-func removeCryptoKeys(x interface{}) error {
+func removeCryptoKeys(x any) error {
switch x := x.(type) {
case nil:
return nil
- case map[string]interface{}:
+ case map[string]any:
for _, v := range x {
err := removeKey(v, "key", "private_key")
if err != nil {
@@ -240,10 +374,10 @@ func removeCryptoKeys(x interface{}) error {
return nil
}
-func removeKey(x interface{}, keys ...string) error {
- val, ok := x.(map[string]interface{})
+func removeKey(x any, keys ...string) error {
+ val, ok := x.(map[string]any)
if !ok {
- return fmt.Errorf("type assertion error")
+ return errors.New("type assertion error")
}
for _, key := range keys {
diff --git a/vendor/github.com/open-policy-agent/opa/format/format.go b/vendor/github.com/open-policy-agent/opa/v1/format/format.go
similarity index 51%
rename from vendor/github.com/open-policy-agent/opa/format/format.go
rename to vendor/github.com/open-policy-agent/opa/v1/format/format.go
index 43e5594669..75514d39c0 100644
--- a/vendor/github.com/open-policy-agent/opa/format/format.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/format/format.go
@@ -7,15 +7,31 @@ package format
import (
"bytes"
+ "errors"
"fmt"
"regexp"
+ "slices"
"sort"
"strings"
"unicode"
- "github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/internal/future"
- "github.com/open-policy-agent/opa/types"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/types"
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+// defaultLocationFile is the file name used in `Ast()` for terms
+// without a location, as could happen when pretty-printing the
+// results of partial eval.
+const defaultLocationFile = "__format_default__"
+
+var (
+ elseVar ast.Value = ast.Var("else")
+
+ expandedConst = ast.NewBody(ast.NewExpr(ast.InternedTerm(true)))
+ commentsSlicePool = util.NewSlicePool[*ast.Comment](50)
+ varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$")
)
// Opts lets you control the code formatting via `AstWithOpts()`.
@@ -31,30 +47,50 @@ type Opts struct {
// ParserOptions is the parser options used when parsing the module to be formatted.
ParserOptions *ast.ParserOptions
+
+ // DropV0Imports instructs the formatter to drop all v0 imports from the module; i.e. 'rego.v1' and 'future.keywords' imports.
+ // Imports are only removed if [Opts.RegoVersion] makes them redundant.
+ DropV0Imports bool
+
+ // SkipDefensiveCopying, if true, will avoid deep-copying the AST before formatting it.
+ // This is true by default for all Source* functions, but false by default for Ast* functions,
+ // as some formatting operations may otherwise mutate the AST.
+ SkipDefensiveCopying bool
+
+ Capabilities *ast.Capabilities
}
-// defaultLocationFile is the file name used in `Ast()` for terms
-// without a location, as could happen when pretty-printing the
-// results of partial eval.
-const defaultLocationFile = "__format_default__"
+func (o Opts) effectiveRegoVersion() ast.RegoVersion {
+ if o.RegoVersion == ast.RegoUndefined {
+ return ast.DefaultRegoVersion
+ }
+ return o.RegoVersion
+}
// Source formats a Rego source file. The bytes provided must describe a complete
// Rego module. If they don't, Source will return an error resulting from the attempt
// to parse the bytes.
func Source(filename string, src []byte) ([]byte, error) {
- return SourceWithOpts(filename, src, Opts{})
+ return SourceWithOpts(filename, src, Opts{SkipDefensiveCopying: true})
}
func SourceWithOpts(filename string, src []byte, opts Opts) ([]byte, error) {
+ regoVersion := opts.effectiveRegoVersion()
+
var parserOpts ast.ParserOptions
if opts.ParserOptions != nil {
parserOpts = *opts.ParserOptions
- } else {
- if opts.RegoVersion == ast.RegoV1 {
- // If the rego version is V1, we need to parse it as such, to allow for future keywords not being imported.
- // Otherwise, we'll default to the default rego-version.
- parserOpts.RegoVersion = ast.RegoV1
- }
+ } else if regoVersion == ast.RegoV1 {
+ // If the rego version is V1, we need to parse it as such, to allow for future keywords not being imported.
+ // Otherwise, we'll default to the default rego-version.
+ parserOpts.RegoVersion = ast.RegoV1
+ }
+
+ // Copying the node does not make sense when both input and output are bytes.
+ opts.SkipDefensiveCopying = true
+
+ if parserOpts.RegoVersion == ast.RegoUndefined {
+ parserOpts.RegoVersion = ast.DefaultRegoVersion
}
module, err := ast.ParseModuleWithOpts(filename, string(src), parserOpts)
@@ -62,15 +98,15 @@ func SourceWithOpts(filename string, src []byte, opts Opts) ([]byte, error) {
return nil, err
}
- if opts.RegoVersion == ast.RegoV0CompatV1 || opts.RegoVersion == ast.RegoV1 {
+ if regoVersion == ast.RegoV0CompatV1 || regoVersion == ast.RegoV1 {
checkOpts := ast.NewRegoCheckOptions()
// The module is parsed as v0, so we need to disable checks that will be automatically amended by the AstWithOpts call anyways.
checkOpts.RequireIfKeyword = false
checkOpts.RequireContainsKeyword = false
checkOpts.RequireRuleBodyOrValue = false
- errors := ast.CheckRegoV1WithOptions(module, checkOpts)
- if len(errors) > 0 {
- return nil, errors
+ errs := ast.CheckRegoV1WithOptions(module, checkOpts)
+ if len(errs) > 0 {
+ return nil, errs
}
}
@@ -83,8 +119,8 @@ func SourceWithOpts(filename string, src []byte, opts Opts) ([]byte, error) {
}
// MustAst is a helper function to format a Rego AST element. If any errors
-// occurs this function will panic. This is mostly used for test
-func MustAst(x interface{}) []byte {
+// occur this function will panic. This is mostly used for test
+func MustAst(x any) []byte {
bs, err := Ast(x)
if err != nil {
panic(err)
@@ -93,8 +129,8 @@ func MustAst(x interface{}) []byte {
}
// MustAstWithOpts is a helper function to format a Rego AST element. If any errors
-// occurs this function will panic. This is mostly used for test
-func MustAstWithOpts(x interface{}, opts Opts) []byte {
+// occur this function will panic. This is mostly used for test
+func MustAstWithOpts(x any, opts Opts) []byte {
bs, err := AstWithOpts(x, opts)
if err != nil {
panic(err)
@@ -105,7 +141,7 @@ func MustAstWithOpts(x interface{}, opts Opts) []byte {
// Ast formats a Rego AST element. If the passed value is not a valid AST
// element, Ast returns nil and an error. If AST nodes are missing locations
// an arbitrary location will be used.
-func Ast(x interface{}) ([]byte, error) {
+func Ast(x any) ([]byte, error) {
return AstWithOpts(x, Opts{})
}
@@ -127,7 +163,12 @@ type fmtOpts struct {
refHeads bool
regoV1 bool
+ regoV1Imported bool
futureKeywords []string
+
+ // If true, the formatter will retain keywords in refs, e.g. `p.not ` instead of `p["not"]`.
+ // The format of the original ref is preserved, so `p["not"]` will still be formatted as `p["not"]`.
+ allowKeywordsInRefs bool
}
func (o fmtOpts) keywords() []string {
@@ -138,11 +179,13 @@ func (o fmtOpts) keywords() []string {
return append(kws, o.futureKeywords...)
}
-func AstWithOpts(x interface{}, opts Opts) ([]byte, error) {
+func AstWithOpts(x any, opts Opts) ([]byte, error) {
// The node has to be deep copied because it may be mutated below. Alternatively,
// we could avoid the copy by checking if mutation will occur first. For now,
// since format is not latency sensitive, just deep copy in all cases.
- x = ast.Copy(x)
+ if !opts.SkipDefensiveCopying {
+ x = ast.Copy(x)
+ }
wildcards := map[ast.Var]*ast.Term{}
@@ -154,12 +197,22 @@ func AstWithOpts(x interface{}, opts Opts) ([]byte, error) {
o := fmtOpts{}
- if opts.RegoVersion == ast.RegoV0CompatV1 || opts.RegoVersion == ast.RegoV1 {
+ regoVersion := opts.effectiveRegoVersion()
+ if regoVersion == ast.RegoV0CompatV1 || regoVersion == ast.RegoV1 {
o.regoV1 = true
o.ifs = true
o.contains = true
}
+ capabilities := opts.Capabilities
+ if capabilities == nil {
+ capabilities = ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(opts.effectiveRegoVersion()))
+ }
+ o.allowKeywordsInRefs = capabilities.ContainsFeature(ast.FeatureKeywordsInRefs)
+
+ memberRef := ast.Member.Ref()
+ memberWithKeyRef := ast.MemberWithKey.Ref()
+
// Preprocess the AST. Set any required defaults and calculate
// values required for printing the formatted output.
ast.WalkNodes(x, func(x ast.Node) bool {
@@ -173,7 +226,7 @@ func AstWithOpts(x interface{}, opts Opts) ([]byte, error) {
case *ast.Expr:
switch {
- case n.IsCall() && ast.Member.Ref().Equal(n.Operator()) || ast.MemberWithKey.Ref().Equal(n.Operator()):
+ case n.IsCall() && memberRef.Equal(n.Operator()) || memberWithKeyRef.Equal(n.Operator()):
extraFutureKeywordImports["in"] = struct{}{}
case n.IsEvery():
extraFutureKeywordImports["every"] = struct{}{}
@@ -186,6 +239,7 @@ func AstWithOpts(x interface{}, opts Opts) ([]byte, error) {
switch {
case isRegoV1Compatible(n):
+ o.regoV1Imported = true
o.contains = true
o.ifs = true
case future.IsAllFutureKeywords(n):
@@ -198,10 +252,11 @@ func AstWithOpts(x interface{}, opts Opts) ([]byte, error) {
}
case *ast.Rule:
- if len(n.Head.Ref()) > 2 {
+ headLen := len(n.Head.Ref())
+ if headLen > 2 {
o.refHeads = true
}
- if len(n.Head.Ref()) == 2 && n.Head.Key != nil && n.Head.Value == nil { // p.q contains "x"
+ if headLen == 2 && n.Head.Key != nil && n.Head.Value == nil { // p.q contains "x"
o.refHeads = true
}
}
@@ -220,43 +275,83 @@ func AstWithOpts(x interface{}, opts Opts) ([]byte, error) {
switch x := x.(type) {
case *ast.Module:
- if opts.RegoVersion == ast.RegoV1 {
+ if regoVersion == ast.RegoV1 && opts.DropV0Imports {
x.Imports = filterRegoV1Import(x.Imports)
- } else if opts.RegoVersion == ast.RegoV0CompatV1 {
+ } else if regoVersion == ast.RegoV0CompatV1 {
x.Imports = ensureRegoV1Import(x.Imports)
}
- if opts.RegoVersion == ast.RegoV0CompatV1 || opts.RegoVersion == ast.RegoV1 || moduleIsRegoV1Compatible(x) {
- x.Imports = future.FilterFutureImports(x.Imports)
+ regoV1Imported := slices.ContainsFunc(x.Imports, isRegoV1Compatible)
+ if regoVersion == ast.RegoV0CompatV1 || regoVersion == ast.RegoV1 || regoV1Imported {
+ if !opts.DropV0Imports && !regoV1Imported {
+ for _, kw := range o.futureKeywords {
+ x.Imports = ensureFutureKeywordImport(x.Imports, kw)
+ }
+ } else {
+ x.Imports = future.FilterFutureImports(x.Imports)
+ }
} else {
for kw := range extraFutureKeywordImports {
x.Imports = ensureFutureKeywordImport(x.Imports, kw)
}
}
- w.writeModule(x)
+ err := w.writeModule(x)
+ if err != nil {
+ w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error()))
+ }
case *ast.Package:
- w.writePackage(x, nil)
+ _, err := w.writePackage(x, nil)
+ if err != nil {
+ w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error()))
+ }
case *ast.Import:
- w.writeImports([]*ast.Import{x}, nil)
+ _, err := w.writeImports([]*ast.Import{x}, nil)
+ if err != nil {
+ w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error()))
+ }
case *ast.Rule:
- w.writeRule(x, false /* isElse */, nil)
+ _, err := w.writeRule(x, false /* isElse */, nil)
+ if err != nil {
+ w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error()))
+ }
case *ast.Head:
- w.writeHead(x,
+ _, err := w.writeHead(x,
false, // isDefault
false, // isExpandedConst
nil)
+ if err != nil {
+ w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error()))
+ }
case ast.Body:
- w.writeBody(x, nil)
+ _, err := w.writeBody(x, nil)
+ if err != nil {
+ return nil, err
+ }
case *ast.Expr:
- w.writeExpr(x, nil)
+ _, err := w.writeExpr(x, nil)
+ if err != nil {
+ w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error()))
+ }
case *ast.With:
- w.writeWith(x, nil, false)
+ _, err := w.writeWith(x, nil, false)
+ if err != nil {
+ w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error()))
+ }
case *ast.Term:
- w.writeTerm(x, nil)
+ _, err := w.writeTerm(x, nil)
+ if err != nil {
+ w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error()))
+ }
case ast.Value:
- w.writeTerm(&ast.Term{Value: x, Location: &ast.Location{}}, nil)
+ _, err := w.writeTerm(&ast.Term{Value: x, Location: &ast.Location{}}, nil)
+ if err != nil {
+ w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error()))
+ }
case *ast.Comment:
- w.writeComments([]*ast.Comment{x})
+ err := w.writeComments([]*ast.Comment{x})
+ if err != nil {
+ w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error()))
+ }
default:
return nil, fmt.Errorf("not an ast element: %v", x)
}
@@ -264,6 +359,7 @@ func AstWithOpts(x interface{}, opts Opts) ([]byte, error) {
if len(w.errs) > 0 {
return nil, w.errs
}
+
return squashTrailingNewlines(w.buf.Bytes()), nil
}
@@ -309,20 +405,21 @@ func defaultLocation(x ast.Node) *ast.Location {
type writer struct {
buf bytes.Buffer
- indent string
- level int
- inline bool
- beforeEnd *ast.Comment
- delay bool
- errs ast.Errors
- fmtOpts fmtOpts
+ indent string
+ level int
+ inline bool
+ beforeEnd *ast.Comment
+ delay bool
+ errs ast.Errors
+ fmtOpts fmtOpts
+ writeCommentOnFinalLine bool
}
-func (w *writer) writeModule(module *ast.Module) {
+func (w *writer) writeModule(module *ast.Module) error {
var pkg *ast.Package
- var others []interface{}
+ var others []any
var comments []*ast.Comment
- visitor := ast.NewGenericVisitor(func(x interface{}) bool {
+ visitor := ast.NewGenericVisitor(func(x any) bool {
switch x := x.(type) {
case *ast.Comment:
comments = append(comments, x)
@@ -340,23 +437,41 @@ func (w *writer) writeModule(module *ast.Module) {
visitor.Walk(module)
sort.Slice(comments, func(i, j int) bool {
- return locLess(comments[i], comments[j])
+ l, err := locLess(comments[i], comments[j])
+ if err != nil {
+ w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error()))
+ }
+ return l
})
sort.Slice(others, func(i, j int) bool {
- return locLess(others[i], others[j])
+ l, err := locLess(others[i], others[j])
+ if err != nil {
+ w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error()))
+ }
+ return l
})
comments = trimTrailingWhitespaceInComments(comments)
- comments = w.writePackage(pkg, comments)
+ var err error
+ comments, err = w.writePackage(pkg, comments)
+ if err != nil {
+ return err
+ }
var imports []*ast.Import
var rules []*ast.Rule
for len(others) > 0 {
imports, others = gatherImports(others)
- comments = w.writeImports(imports, comments)
+ comments, err = w.writeImports(imports, comments)
+ if err != nil {
+ return err
+ }
rules, others = gatherRules(others)
- comments = w.writeRules(rules, comments)
+ comments, err = w.writeRules(rules, comments)
+ if err != nil {
+ return err
+ }
}
for i, c := range comments {
@@ -365,6 +480,8 @@ func (w *writer) writeModule(module *ast.Module) {
w.write("\n")
}
}
+
+ return nil
}
func trimTrailingWhitespaceInComments(comments []*ast.Comment) []*ast.Comment {
@@ -375,45 +492,95 @@ func trimTrailingWhitespaceInComments(comments []*ast.Comment) []*ast.Comment {
return comments
}
-func (w *writer) writePackage(pkg *ast.Package, comments []*ast.Comment) []*ast.Comment {
- comments = w.insertComments(comments, pkg.Location)
+func (w *writer) writePackage(pkg *ast.Package, comments []*ast.Comment) ([]*ast.Comment, error) {
+ var err error
+ comments, err = w.insertComments(comments, pkg.Location)
+ if err != nil {
+ return nil, err
+ }
w.startLine()
// Omit head as all packages have the DefaultRootDocument prepended at parse time.
path := make(ast.Ref, len(pkg.Path)-1)
+ if len(path) == 0 {
+ w.errs = append(w.errs, ast.NewError(ast.FormatErr, pkg.Location, "invalid package path: %s", pkg.Path))
+ return comments, nil
+ }
+
path[0] = ast.VarTerm(string(pkg.Path[1].Value.(ast.String)))
copy(path[1:], pkg.Path[2:])
w.write("package ")
- w.writeRef(path)
+ _, err = w.writeRef(path, nil)
+ if err != nil {
+ return nil, err
+ }
w.blankLine()
- return comments
+ return comments, nil
}
-func (w *writer) writeComments(comments []*ast.Comment) {
- for i := 0; i < len(comments); i++ {
- if i > 0 && locCmp(comments[i], comments[i-1]) > 1 {
- w.blankLine()
+func (w *writer) writeComments(comments []*ast.Comment) error {
+ for i := range comments {
+ if i > 0 {
+ l, err := locCmp(comments[i], comments[i-1])
+ if err != nil {
+ return err
+ }
+ if l > 1 {
+ w.blankLine()
+ }
}
+
w.writeLine(comments[i].String())
}
+
+ return nil
}
-func (w *writer) writeRules(rules []*ast.Rule, comments []*ast.Comment) []*ast.Comment {
- for _, rule := range rules {
- comments = w.insertComments(comments, rule.Location)
- comments = w.writeRule(rule, false, comments)
+func (w *writer) writeRules(rules []*ast.Rule, comments []*ast.Comment) ([]*ast.Comment, error) {
+ for i, rule := range rules {
+ var err error
+ comments, err = w.insertComments(comments, rule.Location)
+ if err != nil && !errors.As(err, &unexpectedCommentError{}) {
+ w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error()))
+ }
+
+ comments, err = w.writeRule(rule, false, comments)
+ if err != nil && !errors.As(err, &unexpectedCommentError{}) {
+ w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error()))
+ }
+
+ if i < len(rules)-1 && w.groupableOneLiner(rule) {
+ next := rules[i+1]
+ if w.groupableOneLiner(next) && next.Location.Row == rule.Location.Row+1 {
+ // Current rule and the next are both groupable one-liners, and
+ // adjacent in the original policy (i.e. no extra newlines between them).
+ continue
+ }
+ }
w.blankLine()
}
- return comments
+ return comments, nil
}
-func (w *writer) writeRule(rule *ast.Rule, isElse bool, comments []*ast.Comment) []*ast.Comment {
+func (w *writer) groupableOneLiner(rule *ast.Rule) bool {
+ // Location required to determine if two rules are adjacent in the policy.
+ // If not, we respect line breaks between rules.
+ if len(rule.Body) > 1 || rule.Default || rule.Location == nil {
+ return false
+ }
+
+ partialSetException := w.fmtOpts.contains || rule.Head.Value != nil
+
+ return (w.fmtOpts.regoV1 || w.fmtOpts.ifs) && partialSetException
+}
+
+func (w *writer) writeRule(rule *ast.Rule, isElse bool, comments []*ast.Comment) ([]*ast.Comment, error) {
if rule == nil {
- return comments
+ return comments, nil
}
if !isElse {
@@ -428,37 +595,67 @@ func (w *writer) writeRule(rule *ast.Rule, isElse bool, comments []*ast.Comment)
// `foo = {"a": "b"} { true }` in the AST. We want to preserve that notation
// in the formatted code instead of expanding the bodies into rules, so we
// pretend that the rule has no body in this case.
- isExpandedConst := rule.Body.Equal(ast.NewBody(ast.NewExpr(ast.BooleanTerm(true)))) && rule.Else == nil
+ isExpandedConst := rule.Body.Equal(expandedConst) && rule.Else == nil
+ w.writeCommentOnFinalLine = isExpandedConst
- comments = w.writeHead(rule.Head, rule.Default, isExpandedConst, comments)
-
- // this excludes partial sets UNLESS `contains` is used
- partialSetException := w.fmtOpts.contains || rule.Head.Value != nil
+ var err error
+ var unexpectedComment bool
+ comments, err = w.writeHead(rule.Head, rule.Default, isExpandedConst, comments)
+ if err != nil {
+ if errors.As(err, &unexpectedCommentError{}) {
+ unexpectedComment = true
+ } else {
+ return nil, err
+ }
+ }
if len(rule.Body) == 0 || isExpandedConst {
w.endLine()
- return comments
+ return comments, nil
}
+ w.writeCommentOnFinalLine = true
+
+ // this excludes partial sets UNLESS `contains` is used
+ partialSetException := w.fmtOpts.contains || rule.Head.Value != nil
+
if (w.fmtOpts.regoV1 || w.fmtOpts.ifs) && partialSetException {
w.write(" if")
if len(rule.Body) == 1 {
if rule.Body[0].Location.Row == rule.Head.Location.Row {
w.write(" ")
- comments = w.writeExpr(rule.Body[0], comments)
+ var err error
+ comments, err = w.writeExpr(rule.Body[0], comments)
+ if err != nil {
+ return nil, err
+ }
w.endLine()
if rule.Else != nil {
- comments = w.writeElse(rule, comments)
+ comments, err = w.writeElse(rule, comments)
+ if err != nil {
+ return nil, err
+ }
}
- return comments
+ return comments, nil
}
}
}
- w.write(" {")
- w.endLine()
+ if unexpectedComment && len(comments) > 0 {
+ w.write(" { ")
+ } else {
+ w.write(" {")
+ w.endLine()
+ }
+
w.up()
- comments = w.writeBody(rule.Body, comments)
+ comments, err = w.writeBody(rule.Body, comments)
+ if err != nil {
+ // the unexpected comment error is passed up to be handled by writeHead
+ if !errors.As(err, &unexpectedCommentError{}) {
+ return nil, err
+ }
+ }
var closeLoc *ast.Location
@@ -470,18 +667,26 @@ func (w *writer) writeRule(rule *ast.Rule, isElse bool, comments []*ast.Comment)
closeLoc = closingLoc(0, 0, '{', '}', rule.Location)
}
- comments = w.insertComments(comments, closeLoc)
+ comments, err = w.insertComments(comments, closeLoc)
+ if err != nil {
+ return nil, err
+ }
- w.down()
+ if err := w.down(); err != nil {
+ return nil, err
+ }
w.startLine()
w.write("}")
if rule.Else != nil {
- comments = w.writeElse(rule, comments)
+ comments, err = w.writeElse(rule, comments)
+ if err != nil {
+ return nil, err
+ }
}
- return comments
+ return comments, nil
}
-func (w *writer) writeElse(rule *ast.Rule, comments []*ast.Comment) []*ast.Comment {
+func (w *writer) writeElse(rule *ast.Rule, comments []*ast.Comment) ([]*ast.Comment, error) {
// If there was nothing else on the line before the "else" starts
// then preserve this style of else block, otherwise it will be
// started as an "inline" else eg:
@@ -526,9 +731,17 @@ func (w *writer) writeElse(rule *ast.Rule, comments []*ast.Comment) []*ast.Comme
}
rule.Else.Head.Name = "else" // NOTE(sr): whaaat
- rule.Else.Head.Reference = ast.Ref{ast.VarTerm("else")}
+
+ elseHeadReference := ast.NewTerm(elseVar) // construct a reference for the term
+ elseHeadReference.Location = rule.Else.Head.Location // and set the location to match the rule location
+
+ rule.Else.Head.Reference = ast.Ref{elseHeadReference}
rule.Else.Head.Args = nil
- comments = w.insertComments(comments, rule.Else.Head.Location)
+ var err error
+ comments, err = w.insertComments(comments, rule.Else.Head.Location)
+ if err != nil {
+ return nil, err
+ }
if hasCommentAbove && !wasInline {
// The comments would have ended the line, be sure to start one again
@@ -546,14 +759,27 @@ func (w *writer) writeElse(rule *ast.Rule, comments []*ast.Comment) []*ast.Comme
return w.writeRule(rule.Else, true, comments)
}
-func (w *writer) writeHead(head *ast.Head, isDefault, isExpandedConst bool, comments []*ast.Comment) []*ast.Comment {
+func (w *writer) writeHead(head *ast.Head, isDefault bool, isExpandedConst bool, comments []*ast.Comment) ([]*ast.Comment, error) {
ref := head.Ref()
if head.Key != nil && head.Value == nil && !head.HasDynamicRef() {
ref = ref.GroundPrefix()
}
if w.fmtOpts.refHeads || len(ref) == 1 {
- w.writeRef(ref)
+ var err error
+ comments, err = w.writeRef(ref, comments)
+ if err != nil {
+ return nil, err
+ }
} else {
+ // if there are comments within the object in the rule head, don't format it
+ if len(comments) > 0 && ref[1].Location.Row == comments[0].Location.Row {
+ comments, err := w.writeUnformatted(head.Location, comments)
+ if err != nil {
+ return nil, err
+ }
+ return comments, nil
+ }
+
w.write(ref[0].String())
w.write("[")
w.write(ref[1].String())
@@ -562,26 +788,38 @@ func (w *writer) writeHead(head *ast.Head, isDefault, isExpandedConst bool, comm
if len(head.Args) > 0 {
w.write("(")
- var args []interface{}
+ var args []any
for _, arg := range head.Args {
args = append(args, arg)
}
- comments = w.writeIterable(args, head.Location, closingLoc(0, 0, '(', ')', head.Location), comments, w.listWriter())
+ var err error
+ comments, err = w.writeIterable(args, head.Location, closingLoc(0, 0, '(', ')', head.Location), comments, w.listWriter())
w.write(")")
+ if err != nil {
+ return comments, err
+ }
}
if head.Key != nil {
if w.fmtOpts.contains && head.Value == nil {
w.write(" contains ")
- comments = w.writeTerm(head.Key, comments)
+ var err error
+ comments, err = w.writeTerm(head.Key, comments)
+ if err != nil {
+ return comments, err
+ }
} else if head.Value == nil { // no `if` for p[x] notation
w.write("[")
- comments = w.writeTerm(head.Key, comments)
+ var err error
+ comments, err = w.writeTerm(head.Key, comments)
+ if err != nil {
+ return comments, err
+ }
w.write("]")
}
}
if head.Value != nil &&
- (head.Key != nil || ast.Compare(head.Value, ast.BooleanTerm(true)) != 0 || isExpandedConst || isDefault) {
+ (head.Key != nil || !ast.InternedTerm(true).Equal(head.Value) || isExpandedConst || isDefault) {
// in rego v1, explicitly print value for ref-head constants that aren't partial set assignments, e.g.:
// * a -> parser error, won't reach here
@@ -592,12 +830,12 @@ func (w *writer) writeHead(head *ast.Head, isDefault, isExpandedConst bool, comm
if head.Location == head.Value.Location &&
head.Name != "else" &&
- ast.Compare(head.Value, ast.BooleanTerm(true)) == 0 &&
+ ast.InternedTerm(true).Equal(head.Value) &&
!isRegoV1RefConst {
// If the value location is the same as the location of the head,
// we know that the value is generated, i.e. f(1)
// Don't print the value (` = true`) as it is implied.
- return comments
+ return comments, nil
}
if head.Assign || w.fmtOpts.regoV1 {
@@ -606,24 +844,35 @@ func (w *writer) writeHead(head *ast.Head, isDefault, isExpandedConst bool, comm
} else {
w.write(" = ")
}
- comments = w.writeTerm(head.Value, comments)
+ var err error
+ comments, err = w.writeTerm(head.Value, comments)
+ if err != nil {
+ return comments, err
+ }
}
- return comments
+ return comments, nil
}
-func (w *writer) insertComments(comments []*ast.Comment, loc *ast.Location) []*ast.Comment {
+func (w *writer) insertComments(comments []*ast.Comment, loc *ast.Location) ([]*ast.Comment, error) {
before, at, comments := partitionComments(comments, loc)
- w.writeComments(before)
+
+ err := w.writeComments(before)
+ if err != nil {
+ return nil, err
+ }
if len(before) > 0 && loc.Row-before[len(before)-1].Location.Row > 1 {
w.blankLine()
}
- w.beforeLineEnd(at)
- return comments
+ return comments, w.beforeLineEnd(at)
}
-func (w *writer) writeBody(body ast.Body, comments []*ast.Comment) []*ast.Comment {
- comments = w.insertComments(comments, body.Loc())
+func (w *writer) writeBody(body ast.Body, comments []*ast.Comment) ([]*ast.Comment, error) {
+ var err error
+ comments, err = w.insertComments(comments, body.Loc())
+ if err != nil {
+ return comments, err
+ }
for i, expr := range body {
// Insert a blank line in before the expression if it was not right
// after the previous expression.
@@ -640,14 +889,21 @@ func (w *writer) writeBody(body ast.Body, comments []*ast.Comment) []*ast.Commen
}
w.startLine()
- comments = w.writeExpr(expr, comments)
+ comments, err = w.writeExpr(expr, comments)
+ if err != nil && !errors.As(err, &unexpectedCommentError{}) {
+ w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error()))
+ }
w.endLine()
}
- return comments
+ return comments, nil
}
-func (w *writer) writeExpr(expr *ast.Expr, comments []*ast.Comment) []*ast.Comment {
- comments = w.insertComments(comments, expr.Location)
+func (w *writer) writeExpr(expr *ast.Expr, comments []*ast.Comment) ([]*ast.Comment, error) {
+ var err error
+ comments, err = w.insertComments(comments, expr.Location)
+ if err != nil {
+ return comments, err
+ }
if !w.inline {
w.startLine()
}
@@ -658,37 +914,65 @@ func (w *writer) writeExpr(expr *ast.Expr, comments []*ast.Comment) []*ast.Comme
switch t := expr.Terms.(type) {
case *ast.SomeDecl:
- comments = w.writeSomeDecl(t, comments)
+ comments, err = w.writeSomeDecl(t, comments)
+ if err != nil {
+ return nil, err
+ }
case *ast.Every:
- comments = w.writeEvery(t, comments)
+ comments, err = w.writeEvery(t, comments)
+ if err != nil {
+ return nil, err
+ }
case []*ast.Term:
- comments = w.writeFunctionCall(expr, comments)
+ comments, err = w.writeFunctionCall(expr, comments)
+ if err != nil {
+ return comments, err
+ }
case *ast.Term:
- comments = w.writeTerm(t, comments)
+ comments, err = w.writeTerm(t, comments)
+ if err != nil {
+ return comments, err
+ }
}
- var indented bool
+ var indented, down bool
for i, with := range expr.With {
if i == 0 || with.Location.Row == expr.With[i-1].Location.Row { // we're on the same line
- comments = w.writeWith(with, comments, false)
+ comments, err = w.writeWith(with, comments, false)
+ if err != nil {
+ return nil, err
+ }
} else { // we're on a new line
if !indented {
indented = true
w.up()
- defer w.down()
+ down = true
}
w.endLine()
w.startLine()
- comments = w.writeWith(with, comments, true)
+ comments, err = w.writeWith(with, comments, true)
+ if err != nil {
+ return nil, err
+ }
}
}
- return comments
+ if down {
+ if err := w.down(); err != nil {
+ return nil, err
+ }
+ }
+
+ return comments, nil
}
-func (w *writer) writeSomeDecl(decl *ast.SomeDecl, comments []*ast.Comment) []*ast.Comment {
- comments = w.insertComments(comments, decl.Location)
+func (w *writer) writeSomeDecl(decl *ast.SomeDecl, comments []*ast.Comment) ([]*ast.Comment, error) {
+ var err error
+ comments, err = w.insertComments(comments, decl.Location)
+ if err != nil {
+ return nil, err
+ }
w.write("some ")
row := decl.Location.Row
@@ -705,41 +989,66 @@ func (w *writer) writeSomeDecl(decl *ast.SomeDecl, comments []*ast.Comment) []*a
w.write(" ")
}
- comments = w.writeTerm(term, comments)
+ comments, err = w.writeTerm(term, comments)
+ if err != nil {
+ return nil, err
+ }
if i < len(decl.Symbols)-1 {
w.write(",")
}
case ast.Call:
- comments = w.writeInOperator(false, val[1:], comments, decl.Location, ast.BuiltinMap[val[0].String()].Decl)
+ comments, err = w.writeInOperator(false, val[1:], comments, decl.Location, ast.BuiltinMap[val[0].String()].Decl)
+ if err != nil {
+ return nil, err
+ }
}
}
- return comments
+ return comments, nil
}
-func (w *writer) writeEvery(every *ast.Every, comments []*ast.Comment) []*ast.Comment {
- comments = w.insertComments(comments, every.Location)
+func (w *writer) writeEvery(every *ast.Every, comments []*ast.Comment) ([]*ast.Comment, error) {
+ var err error
+ comments, err = w.insertComments(comments, every.Location)
+ if err != nil {
+ return nil, err
+ }
w.write("every ")
if every.Key != nil {
- comments = w.writeTerm(every.Key, comments)
+ comments, err = w.writeTerm(every.Key, comments)
+ if err != nil {
+ return nil, err
+ }
w.write(", ")
}
- comments = w.writeTerm(every.Value, comments)
+ comments, err = w.writeTerm(every.Value, comments)
+ if err != nil {
+ return nil, err
+ }
w.write(" in ")
- comments = w.writeTerm(every.Domain, comments)
+ comments, err = w.writeTerm(every.Domain, comments)
+ if err != nil {
+ return nil, err
+ }
w.write(" {")
- comments = w.writeComprehensionBody('{', '}', every.Body, every.Loc(), every.Loc(), comments)
+ comments, err = w.writeComprehensionBody('{', '}', every.Body, every.Loc(), every.Loc(), comments)
+ if err != nil {
+ // the unexpected comment error is passed up to be handled by writeHead
+ if !errors.As(err, &unexpectedCommentError{}) {
+ return nil, err
+ }
+ }
if len(every.Body) == 1 &&
every.Body[0].Location.Row == every.Location.Row {
w.write(" ")
}
w.write("}")
- return comments
+ return comments, nil
}
-func (w *writer) writeFunctionCall(expr *ast.Expr, comments []*ast.Comment) []*ast.Comment {
+func (w *writer) writeFunctionCall(expr *ast.Expr, comments []*ast.Comment) ([]*ast.Comment, error) {
terms := expr.Terms.([]*ast.Term)
operator := terms[0].Value.String()
@@ -754,22 +1063,34 @@ func (w *writer) writeFunctionCall(expr *ast.Expr, comments []*ast.Comment) []*a
return w.writeFunctionCallPlain(terms, comments)
}
- numDeclArgs := len(bi.Decl.Args())
+ numDeclArgs := bi.Decl.Arity()
numCallArgs := len(terms) - 1
+ var err error
switch numCallArgs {
case numDeclArgs: // Print infix where result is unassigned (e.g., x != y)
- comments = w.writeTerm(terms[1], comments)
+ comments, err = w.writeTerm(terms[1], comments)
+ if err != nil {
+ return nil, err
+ }
w.write(" " + bi.Infix + " ")
return w.writeTerm(terms[2], comments)
-
case numDeclArgs + 1: // Print infix where result is assigned (e.g., z = x + y)
- comments = w.writeTerm(terms[3], comments)
+ comments, err = w.writeTerm(terms[3], comments)
+ if err != nil {
+ return nil, err
+ }
w.write(" " + ast.Equality.Infix + " ")
- comments = w.writeTerm(terms[1], comments)
+ comments, err = w.writeTerm(terms[1], comments)
+ if err != nil {
+ return nil, err
+ }
w.write(" " + bi.Infix + " ")
- comments = w.writeTerm(terms[2], comments)
- return comments
+ comments, err = w.writeTerm(terms[2], comments)
+ if err != nil {
+ return nil, err
+ }
+ return comments, nil
}
// NOTE(Trolloldem): in this point we are operating with a built-in function with the
// wrong arity even when the assignment notation is used
@@ -777,65 +1098,199 @@ func (w *writer) writeFunctionCall(expr *ast.Expr, comments []*ast.Comment) []*a
return w.writeFunctionCallPlain(terms, comments)
}
-func (w *writer) writeFunctionCallPlain(terms []*ast.Term, comments []*ast.Comment) []*ast.Comment {
- w.write(terms[0].String() + "(")
+func (w *writer) writeFunctionCallPlain(terms []*ast.Term, comments []*ast.Comment) ([]*ast.Comment, error) {
+ if r, ok := terms[0].Value.(ast.Ref); ok {
+ if c, err := w.writeRef(r, comments); err != nil {
+ return c, err
+ }
+ } else {
+ w.write(terms[0].String())
+ }
+ w.write("(")
defer w.write(")")
- args := make([]interface{}, len(terms)-1)
+
+ args := make([]any, len(terms)-1)
for i, t := range terms[1:] {
args[i] = t
}
loc := terms[0].Location
- return w.writeIterable(args, loc, closingLoc(0, 0, '(', ')', loc), comments, w.listWriter())
+ var err error
+ comments, err = w.writeIterable(args, loc, closingLoc(0, 0, '(', ')', loc), comments, w.listWriter())
+ if err != nil {
+ return nil, err
+ }
+ return comments, nil
}
-func (w *writer) writeWith(with *ast.With, comments []*ast.Comment, indented bool) []*ast.Comment {
- comments = w.insertComments(comments, with.Location)
+func (w *writer) writeWith(with *ast.With, comments []*ast.Comment, indented bool) ([]*ast.Comment, error) {
+ var err error
+ comments, err = w.insertComments(comments, with.Location)
+ if err != nil {
+ return nil, err
+ }
if !indented {
w.write(" ")
}
w.write("with ")
- comments = w.writeTerm(with.Target, comments)
+ comments, err = w.writeTerm(with.Target, comments)
+ if err != nil {
+ return nil, err
+ }
w.write(" as ")
- return w.writeTerm(with.Value, comments)
+ comments, err = w.writeTerm(with.Value, comments)
+ if err != nil {
+ return nil, err
+ }
+ return comments, nil
}
-func (w *writer) writeTerm(term *ast.Term, comments []*ast.Comment) []*ast.Comment {
- return w.writeTermParens(false, term, comments)
+// saveComments saves a copy of the comments slice in a pooled slice to and returns it.
+// This is to avoid having to create a new slice every time we need to save comments.
+// The caller is responsible for putting the slice back in the pool when done.
+func saveComments(comments []*ast.Comment) *[]*ast.Comment {
+ cmlen := len(comments)
+ saved := commentsSlicePool.Get(cmlen)
+
+ copy(*saved, comments)
+
+ return saved
}
-func (w *writer) writeTermParens(parens bool, term *ast.Term, comments []*ast.Comment) []*ast.Comment {
- comments = w.insertComments(comments, term.Location)
+func (w *writer) writeTerm(term *ast.Term, comments []*ast.Comment) ([]*ast.Comment, error) {
+ if len(comments) == 0 {
+ return w.writeTermParens(false, term, comments)
+ }
+
+ currentLen := w.buf.Len()
+ currentComments := saveComments(comments)
+ defer commentsSlicePool.Put(currentComments)
+
+ comments, err := w.writeTermParens(false, term, comments)
+ if err != nil {
+ if errors.As(err, &unexpectedCommentError{}) {
+ w.buf.Truncate(currentLen)
+
+ comments, uErr := w.writeUnformatted(term.Location, *currentComments)
+ if uErr != nil {
+ return nil, uErr
+ }
+ return comments, err
+ }
+ return nil, err
+ }
+
+ return comments, nil
+}
+
+// writeUnformatted writes the unformatted text instead and updates the comment state
+func (w *writer) writeUnformatted(location *ast.Location, currentComments []*ast.Comment) ([]*ast.Comment, error) {
+ if len(location.Text) == 0 {
+ return nil, errors.New("original unformatted text is empty")
+ }
+
+ rowNum := bytes.Count(location.Text, []byte{'\n'}) + 1
+
+ w.writeBytes(location.Text)
+
+ comments := make([]*ast.Comment, 0, len(currentComments))
+ for _, c := range currentComments {
+ // if there is a body then wait to write the last comment
+ if w.writeCommentOnFinalLine && c.Location.Row == location.Row+rowNum-1 {
+ w.write(" ")
+ w.writeBytes(c.Location.Text)
+ continue
+ }
+
+ // drop comments that occur within the rule raw text
+ if c.Location.Row < location.Row+rowNum-1 {
+ continue
+ }
+ comments = append(comments, c)
+ }
+ return comments, nil
+}
+
+func (w *writer) writeTermParens(parens bool, term *ast.Term, comments []*ast.Comment) ([]*ast.Comment, error) {
+ var err error
+ comments, err = w.insertComments(comments, term.Location)
+ if err != nil {
+ return nil, err
+ }
if !w.inline {
w.startLine()
}
switch x := term.Value.(type) {
case ast.Ref:
- w.writeRef(x)
+ comments, err = w.writeRef(x, comments)
+ if err != nil {
+ return nil, err
+ }
case ast.Object:
- comments = w.writeObject(x, term.Location, comments)
+ comments, err = w.writeObject(x, term.Location, comments)
+ if err != nil {
+ return nil, err
+ }
case *ast.Array:
- comments = w.writeArray(x, term.Location, comments)
+ comments, err = w.writeArray(x, term.Location, comments)
+ if err != nil {
+ return nil, err
+ }
case ast.Set:
- comments = w.writeSet(x, term.Location, comments)
+ comments, err = w.writeSet(x, term.Location, comments)
+ if err != nil {
+ return nil, err
+ }
case *ast.ArrayComprehension:
- comments = w.writeArrayComprehension(x, term.Location, comments)
+ comments, err = w.writeArrayComprehension(x, term.Location, comments)
+ if err != nil {
+ return nil, err
+ }
case *ast.ObjectComprehension:
- comments = w.writeObjectComprehension(x, term.Location, comments)
+ comments, err = w.writeObjectComprehension(x, term.Location, comments)
+ if err != nil {
+ return nil, err
+ }
case *ast.SetComprehension:
- comments = w.writeSetComprehension(x, term.Location, comments)
+ comments, err = w.writeSetComprehension(x, term.Location, comments)
+ if err != nil {
+ return nil, err
+ }
case ast.String:
if term.Location.Text[0] == '`' {
// To preserve raw strings, we need to output the original text,
- // not what x.String() would give us.
- w.write(string(term.Location.Text))
+ w.writeBytes(term.Location.Text)
} else {
- w.write(x.String())
+ // x.String() cannot be used by default because it can change the input string "\u0000" to "\x00"
+ var after, quote []byte
+ var found bool
+ // term.Location.Text could contain the prefix `else :=`, remove it
+ switch term.Location.Text[len(term.Location.Text)-1] {
+ case '"':
+ quote = []byte{'"'}
+ _, after, found = bytes.Cut(term.Location.Text, quote)
+ case '`':
+ quote = []byte{'`'}
+ _, after, found = bytes.Cut(term.Location.Text, quote)
+ }
+
+ if !found {
+ // If no quoted string was found, that means it is a key being formatted to a string
+ // e.g. partial_set.y to partial_set["y"]
+ w.write(x.String())
+ } else {
+ w.writeBytes(quote)
+ w.writeBytes(after)
+ }
+
}
case ast.Var:
w.write(w.formatVar(x))
case ast.Call:
- comments = w.writeCall(parens, x, term.Location, comments)
+ comments, err = w.writeCall(parens, x, term.Location, comments)
+ if err != nil {
+ return nil, err
+ }
case fmt.Stringer:
w.write(x.String())
}
@@ -843,56 +1298,86 @@ func (w *writer) writeTermParens(parens bool, term *ast.Term, comments []*ast.Co
if !w.inline {
w.startLine()
}
- return comments
+ return comments, nil
}
-func (w *writer) writeRef(x ast.Ref) {
+func (w *writer) writeRef(x ast.Ref, comments []*ast.Comment) ([]*ast.Comment, error) {
if len(x) > 0 {
parens := false
_, ok := x[0].Value.(ast.Call)
if ok {
parens = x[0].Location.Text[0] == 40 // Starts with "("
}
- w.writeTermParens(parens, x[0], nil)
+ var err error
+ comments, err = w.writeTermParens(parens, x[0], comments)
+ if err != nil {
+ return nil, err
+ }
path := x[1:]
for _, t := range path {
switch p := t.Value.(type) {
case ast.String:
- w.writeRefStringPath(p)
+ w.writeRefStringPath(p, t.Location)
case ast.Var:
w.writeBracketed(w.formatVar(p))
default:
w.write("[")
- w.writeTerm(t, nil)
+ comments, err = w.writeTerm(t, comments)
+ if err != nil {
+ if errors.As(err, &unexpectedCommentError{}) {
+ // add a new line so that the closing bracket isn't part of the unexpected comment
+ w.write("\n")
+ } else {
+ return nil, err
+ }
+ }
w.write("]")
}
}
}
+
+ return comments, nil
}
func (w *writer) writeBracketed(str string) {
w.write("[" + str + "]")
}
-var varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$")
-
-func (w *writer) writeRefStringPath(s ast.String) {
+func (w *writer) writeRefStringPath(s ast.String, l *ast.Location) {
str := string(s)
- if varRegexp.MatchString(str) && !ast.IsInKeywords(str, w.fmtOpts.keywords()) {
- w.write("." + str)
- } else {
+ if w.shouldBracketRefTerm(str, l) {
w.writeBracketed(s.String())
+ } else {
+ w.write("." + str)
+ }
+}
+
+func (w *writer) shouldBracketRefTerm(s string, l *ast.Location) bool {
+ if !varRegexp.MatchString(s) {
+ return true
+ }
+
+ if ast.IsInKeywords(s, w.fmtOpts.keywords()) {
+ if !w.fmtOpts.allowKeywordsInRefs {
+ return true
+ }
+
+ if l != nil && l.Text[0] == 34 { // If the original term text starts with '"', we preserve the brackets and quotes
+ return true
+ }
}
+
+ return false
}
-func (w *writer) formatVar(v ast.Var) string {
+func (*writer) formatVar(v ast.Var) string {
if v.IsWildcard() {
return ast.Wildcard.String()
}
return v.String()
}
-func (w *writer) writeCall(parens bool, x ast.Call, loc *ast.Location, comments []*ast.Comment) []*ast.Comment {
+func (w *writer) writeCall(parens bool, x ast.Call, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) {
bi, ok := ast.BuiltinMap[x[0].String()]
if !ok || bi.Infix == "" {
return w.writeFunctionCallPlain(x, comments)
@@ -912,110 +1397,148 @@ func (w *writer) writeCall(parens bool, x ast.Call, loc *ast.Location, comments
// NOTE(Trolloldem): writeCall is only invoked when the function call is a term
// of another function. The only valid arity is the one of the
// built-in function
- if len(bi.Decl.Args()) != len(x)-1 {
+ if bi.Decl.Arity() != len(x)-1 {
w.errs = append(w.errs, ArityFormatMismatchError(x[1:], x[0].String(), loc, bi.Decl))
- return comments
+ return comments, nil
}
- comments = w.writeTermParens(true, x[1], comments)
+ var err error
+ comments, err = w.writeTermParens(true, x[1], comments)
+ if err != nil {
+ return nil, err
+ }
w.write(" " + bi.Infix + " ")
- comments = w.writeTermParens(true, x[2], comments)
+ comments, err = w.writeTermParens(true, x[2], comments)
+ if err != nil {
+ return nil, err
+ }
if parens {
w.write(")")
}
- return comments
+ return comments, nil
}
-func (w *writer) writeInOperator(parens bool, operands []*ast.Term, comments []*ast.Comment, loc *ast.Location, f *types.Function) []*ast.Comment {
+func (w *writer) writeInOperator(parens bool, operands []*ast.Term, comments []*ast.Comment, loc *ast.Location, f *types.Function) ([]*ast.Comment, error) {
- if len(operands) != len(f.Args()) {
+ if len(operands) != f.Arity() {
// The number of operands does not math the arity of the `in` operator
operator := ast.Member.Name
- if len(f.Args()) == 3 {
+ if f.Arity() == 3 {
operator = ast.MemberWithKey.Name
}
w.errs = append(w.errs, ArityFormatMismatchError(operands, operator, loc, f))
- return comments
+ return comments, nil
}
kw := "in"
+ var err error
switch len(operands) {
case 2:
- comments = w.writeTermParens(true, operands[0], comments)
+ comments, err = w.writeTermParens(true, operands[0], comments)
+ if err != nil {
+ return nil, err
+ }
w.write(" ")
w.write(kw)
w.write(" ")
- comments = w.writeTermParens(true, operands[1], comments)
+ comments, err = w.writeTermParens(true, operands[1], comments)
+ if err != nil {
+ return nil, err
+ }
case 3:
if parens {
w.write("(")
defer w.write(")")
}
- comments = w.writeTermParens(true, operands[0], comments)
+ comments, err = w.writeTermParens(true, operands[0], comments)
+ if err != nil {
+ return nil, err
+ }
w.write(", ")
- comments = w.writeTermParens(true, operands[1], comments)
+ comments, err = w.writeTermParens(true, operands[1], comments)
+ if err != nil {
+ return nil, err
+ }
w.write(" ")
w.write(kw)
w.write(" ")
- comments = w.writeTermParens(true, operands[2], comments)
+ comments, err = w.writeTermParens(true, operands[2], comments)
+ if err != nil {
+ return nil, err
+ }
}
- return comments
+ return comments, nil
}
-func (w *writer) writeObject(obj ast.Object, loc *ast.Location, comments []*ast.Comment) []*ast.Comment {
+func (w *writer) writeObject(obj ast.Object, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) {
w.write("{")
defer w.write("}")
- var s []interface{}
+ var s []any
obj.Foreach(func(k, v *ast.Term) {
s = append(s, ast.Item(k, v))
})
return w.writeIterable(s, loc, closingLoc(0, 0, '{', '}', loc), comments, w.objectWriter())
}
-func (w *writer) writeArray(arr *ast.Array, loc *ast.Location, comments []*ast.Comment) []*ast.Comment {
+func (w *writer) writeArray(arr *ast.Array, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) {
w.write("[")
defer w.write("]")
- var s []interface{}
+ var s []any
arr.Foreach(func(t *ast.Term) {
s = append(s, t)
})
- return w.writeIterable(s, loc, closingLoc(0, 0, '[', ']', loc), comments, w.listWriter())
+ var err error
+ comments, err = w.writeIterable(s, loc, closingLoc(0, 0, '[', ']', loc), comments, w.listWriter())
+ if err != nil {
+ return nil, err
+ }
+ return comments, nil
}
-func (w *writer) writeSet(set ast.Set, loc *ast.Location, comments []*ast.Comment) []*ast.Comment {
+func (w *writer) writeSet(set ast.Set, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) {
if set.Len() == 0 {
w.write("set()")
- return w.insertComments(comments, closingLoc(0, 0, '(', ')', loc))
+ var err error
+ comments, err = w.insertComments(comments, closingLoc(0, 0, '(', ')', loc))
+ if err != nil {
+ return nil, err
+ }
+ return comments, nil
}
w.write("{")
defer w.write("}")
- var s []interface{}
+ var s []any
set.Foreach(func(t *ast.Term) {
s = append(s, t)
})
- return w.writeIterable(s, loc, closingLoc(0, 0, '{', '}', loc), comments, w.listWriter())
+ var err error
+ comments, err = w.writeIterable(s, loc, closingLoc(0, 0, '{', '}', loc), comments, w.listWriter())
+ if err != nil {
+ return nil, err
+ }
+ return comments, nil
}
-func (w *writer) writeArrayComprehension(arr *ast.ArrayComprehension, loc *ast.Location, comments []*ast.Comment) []*ast.Comment {
+func (w *writer) writeArrayComprehension(arr *ast.ArrayComprehension, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) {
w.write("[")
defer w.write("]")
return w.writeComprehension('[', ']', arr.Term, arr.Body, loc, comments)
}
-func (w *writer) writeSetComprehension(set *ast.SetComprehension, loc *ast.Location, comments []*ast.Comment) []*ast.Comment {
+func (w *writer) writeSetComprehension(set *ast.SetComprehension, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) {
w.write("{")
defer w.write("}")
return w.writeComprehension('{', '}', set.Term, set.Body, loc, comments)
}
-func (w *writer) writeObjectComprehension(object *ast.ObjectComprehension, loc *ast.Location, comments []*ast.Comment) []*ast.Comment {
+func (w *writer) writeObjectComprehension(object *ast.ObjectComprehension, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) {
w.write("{")
defer w.write("}")
@@ -1025,12 +1548,16 @@ func (w *writer) writeObjectComprehension(object *ast.ObjectComprehension, loc *
w.startLine()
}
- comments = w.writeTerm(object.Key, comments)
+ var err error
+ comments, err = w.writeTerm(object.Key, comments)
+ if err != nil {
+ return nil, err
+ }
w.write(": ")
return w.writeComprehension('{', '}', object.Value, object.Body, loc, comments)
}
-func (w *writer) writeComprehension(open, close byte, term *ast.Term, body ast.Body, loc *ast.Location, comments []*ast.Comment) []*ast.Comment {
+func (w *writer) writeComprehension(openChar, closeChar byte, term *ast.Term, body ast.Body, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) {
if term.Location.Row-loc.Row >= 1 {
w.endLine()
w.startLine()
@@ -1041,55 +1568,82 @@ func (w *writer) writeComprehension(open, close byte, term *ast.Term, body ast.B
if ok {
parens = term.Location.Text[0] == 40 // Starts with "("
}
- comments = w.writeTermParens(parens, term, comments)
+ var err error
+ comments, err = w.writeTermParens(parens, term, comments)
+ if err != nil {
+ return nil, err
+ }
w.write(" |")
- return w.writeComprehensionBody(open, close, body, term.Location, loc, comments)
+ return w.writeComprehensionBody(openChar, closeChar, body, term.Location, loc, comments)
}
-func (w *writer) writeComprehensionBody(open, close byte, body ast.Body, term, compr *ast.Location, comments []*ast.Comment) []*ast.Comment {
- exprs := make([]interface{}, 0, len(body))
+func (w *writer) writeComprehensionBody(openChar, closeChar byte, body ast.Body, term, compr *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) {
+ exprs := make([]any, 0, len(body))
for _, expr := range body {
exprs = append(exprs, expr)
}
- lines := groupIterable(exprs, term)
+ lines, err := w.groupIterable(exprs, term)
+ if err != nil {
+ return nil, err
+ }
if body.Loc().Row-term.Row > 0 || len(lines) > 1 {
w.endLine()
w.up()
defer w.startLine()
- defer w.down()
+ defer func() {
+ if err := w.down(); err != nil {
+ w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error()))
+ }
+ }()
- comments = w.writeBody(body, comments)
+ var err error
+ comments, err = w.writeBody(body, comments)
+ if err != nil {
+ return comments, err
+ }
} else {
w.write(" ")
i := 0
for ; i < len(body)-1; i++ {
- comments = w.writeExpr(body[i], comments)
+ comments, err = w.writeExpr(body[i], comments)
+ if err != nil {
+ return comments, err
+ }
w.write("; ")
}
- comments = w.writeExpr(body[i], comments)
+ comments, err = w.writeExpr(body[i], comments)
+ if err != nil {
+ return comments, err
+ }
}
-
- return w.insertComments(comments, closingLoc(0, 0, open, close, compr))
+ comments, err = w.insertComments(comments, closingLoc(0, 0, openChar, closeChar, compr))
+ if err != nil {
+ return nil, err
+ }
+ return comments, nil
}
-func (w *writer) writeImports(imports []*ast.Import, comments []*ast.Comment) []*ast.Comment {
+func (w *writer) writeImports(imports []*ast.Import, comments []*ast.Comment) ([]*ast.Comment, error) {
m, comments := mapImportsToComments(imports, comments)
groups := groupImports(imports)
for _, group := range groups {
- comments = w.insertComments(comments, group[0].Loc())
+ var err error
+ comments, err = w.insertComments(comments, group[0].Loc())
+ if err != nil {
+ return nil, err
+ }
// Sort imports within a newline grouping.
- sort.Slice(group, func(i, j int) bool {
- a := group[i]
- b := group[j]
- return a.Compare(b) < 0
- })
+ slices.SortFunc(group, (*ast.Import).Compare)
for _, i := range group {
w.startLine()
- w.writeImport(i)
+ err = w.writeImport(i)
+ if err != nil {
+ return nil, err
+ }
if c, ok := m[i]; ok {
w.write(" " + c.String())
}
@@ -1098,35 +1652,45 @@ func (w *writer) writeImports(imports []*ast.Import, comments []*ast.Comment) []
w.blankLine()
}
- return comments
+ return comments, nil
}
-func (w *writer) writeImport(imp *ast.Import) {
+func (w *writer) writeImport(imp *ast.Import) error {
path := imp.Path.Value.(ast.Ref)
- buf := []string{"import"}
+ w.write("import ")
if _, ok := future.WhichFutureKeyword(imp); ok {
// We don't want to wrap future.keywords imports in parens, so we create a new writer that doesn't
w2 := writer{
buf: bytes.Buffer{},
}
- w2.writeRef(path)
- buf = append(buf, w2.buf.String())
+ _, err := w2.writeRef(path, nil)
+ if err != nil {
+ return err
+ }
+ w.write(w2.buf.String())
} else {
- buf = append(buf, path.String())
+ _, err := w.writeRef(path, nil)
+ if err != nil {
+ return err
+ }
}
if len(imp.Alias) > 0 {
- buf = append(buf, "as "+imp.Alias.String())
+ w.write(" as " + imp.Alias.String())
}
- w.write(strings.Join(buf, " "))
+
+ return nil
}
-type entryWriter func(interface{}, []*ast.Comment) []*ast.Comment
+type entryWriter func(any, []*ast.Comment) ([]*ast.Comment, error)
-func (w *writer) writeIterable(elements []interface{}, last *ast.Location, close *ast.Location, comments []*ast.Comment, fn entryWriter) []*ast.Comment {
- lines := groupIterable(elements, last)
+func (w *writer) writeIterable(elements []any, last *ast.Location, close *ast.Location, comments []*ast.Comment, fn entryWriter) ([]*ast.Comment, error) {
+ lines, err := w.groupIterable(elements, last)
+ if err != nil {
+ return nil, err
+ }
if len(lines) > 1 {
w.delayBeforeEnd()
w.startMultilineSeq()
@@ -1134,34 +1698,49 @@ func (w *writer) writeIterable(elements []interface{}, last *ast.Location, close
i := 0
for ; i < len(lines)-1; i++ {
- comments = w.writeIterableLine(lines[i], comments, fn)
+ comments, err = w.writeIterableLine(lines[i], comments, fn)
+ if err != nil {
+ return nil, err
+ }
w.write(",")
w.endLine()
w.startLine()
}
- comments = w.writeIterableLine(lines[i], comments, fn)
+ comments, err = w.writeIterableLine(lines[i], comments, fn)
+ if err != nil {
+ return nil, err
+ }
if len(lines) > 1 {
w.write(",")
w.endLine()
- comments = w.insertComments(comments, close)
- w.down()
+ comments, err = w.insertComments(comments, close)
+ if err != nil {
+ return nil, err
+ }
+ if err := w.down(); err != nil {
+ return nil, err
+ }
w.startLine()
}
- return comments
+ return comments, nil
}
-func (w *writer) writeIterableLine(elements []interface{}, comments []*ast.Comment, fn entryWriter) []*ast.Comment {
+func (w *writer) writeIterableLine(elements []any, comments []*ast.Comment, fn entryWriter) ([]*ast.Comment, error) {
if len(elements) == 0 {
- return comments
+ return comments, nil
}
i := 0
for ; i < len(elements)-1; i++ {
- comments = fn(elements[i], comments)
+ var err error
+ comments, err = fn(elements[i], comments)
+ if err != nil {
+ return nil, err
+ }
w.write(", ")
}
@@ -1169,7 +1748,7 @@ func (w *writer) writeIterableLine(elements []interface{}, comments []*ast.Comme
}
func (w *writer) objectWriter() entryWriter {
- return func(x interface{}, comments []*ast.Comment) []*ast.Comment {
+ return func(x any, comments []*ast.Comment) ([]*ast.Comment, error) {
entry := x.([2]*ast.Term)
call, isCall := entry[0].Value.(ast.Call)
@@ -1180,7 +1759,11 @@ func (w *writer) objectWriter() entryWriter {
w.write("(")
}
- comments = w.writeTerm(entry[0], comments)
+ var err error
+ comments, err = w.writeTerm(entry[0], comments)
+ if err != nil {
+ return nil, err
+ }
if paren {
w.write(")")
}
@@ -1198,7 +1781,7 @@ func (w *writer) objectWriter() entryWriter {
}
func (w *writer) listWriter() entryWriter {
- return func(x interface{}, comments []*ast.Comment) []*ast.Comment {
+ return func(x any, comments []*ast.Comment) ([]*ast.Comment, error) {
t, ok := x.(*ast.Term)
if ok {
call, isCall := t.Value.(ast.Call)
@@ -1214,7 +1797,7 @@ func (w *writer) listWriter() entryWriter {
// groupIterable will group the `elements` slice into slices according to their
// location: anything on the same line will be put into a slice.
-func groupIterable(elements []interface{}, last *ast.Location) [][]interface{} {
+func (w *writer) groupIterable(elements []any, last *ast.Location) ([][]any, error) {
// Generated vars occur in the AST when we're rendering the result of
// partial evaluation in a bundle build with optimization.
// Those variables, and wildcard variables have the "default location",
@@ -1241,18 +1824,26 @@ func groupIterable(elements []interface{}, last *ast.Location) [][]interface{} {
return false
})
if def { // return as-is
- return [][]interface{}{elements}
+ return [][]any{elements}, nil
}
}
- sort.Slice(elements, func(i, j int) bool {
- return locLess(elements[i], elements[j])
+
+ slices.SortFunc(elements, func(i, j any) int {
+ l, err := locCmp(i, j)
+ if err != nil {
+ w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error()))
+ }
+ return l
})
- var lines [][]interface{}
- cur := make([]interface{}, 0, len(elements))
+ var lines [][]any
+ cur := make([]any, 0, len(elements))
for i, t := range elements {
elem := t
- loc := getLoc(elem)
+ loc, err := getLoc(elem)
+ if err != nil {
+ return nil, err
+ }
lineDiff := loc.Row - last.Row
if lineDiff > 0 && i > 0 {
lines = append(lines, cur)
@@ -1262,7 +1853,7 @@ func groupIterable(elements []interface{}, last *ast.Location) [][]interface{} {
last = loc
cur = append(cur, elem)
}
- return append(lines, cur)
+ return append(lines, cur), nil
}
func mapImportsToComments(imports []*ast.Import, comments []*ast.Comment) (map[*ast.Import]*ast.Comment, []*ast.Comment) {
@@ -1318,14 +1909,37 @@ func groupImports(imports []*ast.Import) [][]*ast.Import {
return groups
}
-func partitionComments(comments []*ast.Comment, l *ast.Location) (before []*ast.Comment, at *ast.Comment, after []*ast.Comment) {
+func partitionComments(comments []*ast.Comment, l *ast.Location) ([]*ast.Comment, *ast.Comment, []*ast.Comment) {
+ if len(comments) == 0 {
+ return nil, nil, nil
+ }
+
+ numBefore, numAfter := 0, 0
+ for _, c := range comments {
+ switch cmp := c.Location.Row - l.Row; {
+ case cmp < 0:
+ numBefore++
+ case cmp > 0:
+ numAfter++
+ }
+ }
+
+ if numAfter == len(comments) {
+ return nil, nil, comments
+ }
+
+ var at *ast.Comment
+
+ before := make([]*ast.Comment, 0, numBefore)
+ after := comments[0 : 0 : len(comments)-numBefore]
+
for _, c := range comments {
switch cmp := c.Location.Row - l.Row; {
case cmp < 0:
before = append(before, c)
case cmp > 0:
after = append(after, c)
- case cmp == 0:
+ default:
at = c
}
}
@@ -1333,7 +1947,7 @@ func partitionComments(comments []*ast.Comment, l *ast.Location) (before []*ast.
return before, at, after
}
-func gatherImports(others []interface{}) (imports []*ast.Import, rest []interface{}) {
+func gatherImports(others []any) (imports []*ast.Import, rest []any) {
i := 0
loop:
for ; i < len(others); i++ {
@@ -1347,7 +1961,7 @@ loop:
return imports, others[i:]
}
-func gatherRules(others []interface{}) (rules []*ast.Rule, rest []interface{}) {
+func gatherRules(others []any) (rules []*ast.Rule, rest []any) {
i := 0
loop:
for ; i < len(others); i++ {
@@ -1361,43 +1975,52 @@ loop:
return rules, others[i:]
}
-func locLess(a, b interface{}) bool {
- return locCmp(a, b) < 0
+func locLess(a, b any) (bool, error) {
+ c, err := locCmp(a, b)
+ return c < 0, err
}
-func locCmp(a, b interface{}) int {
- al := getLoc(a)
- bl := getLoc(b)
+func locCmp(a, b any) (int, error) {
+ al, err := getLoc(a)
+ if err != nil {
+ return 0, err
+ }
+ bl, err := getLoc(b)
+ if err != nil {
+ return 0, err
+ }
switch {
case al == nil && bl == nil:
- return 0
+ return 0, nil
case al == nil:
- return -1
+ return -1, nil
case bl == nil:
- return 1
+ return 1, nil
}
if cmp := al.Row - bl.Row; cmp != 0 {
- return cmp
+ return cmp, nil
}
- return al.Col - bl.Col
+ return al.Col - bl.Col, nil
}
-func getLoc(x interface{}) *ast.Location {
+func getLoc(x any) (*ast.Location, error) {
switch x := x.(type) {
case ast.Node: // *ast.Head, *ast.Expr, *ast.With, *ast.Term
- return x.Loc()
+ return x.Loc(), nil
case *ast.Location:
- return x
+ return x, nil
case [2]*ast.Term: // Special case to allow for easy printing of objects.
- return x[0].Location
+ return x[0].Location, nil
default:
- panic("Not reached")
+ return nil, fmt.Errorf("unable to get location for type %v", x)
}
}
-func closingLoc(skipOpen, skipClose, open, close byte, loc *ast.Location) *ast.Location {
+var negativeRow = &ast.Location{Row: -1}
+
+func closingLoc(skipOpen, skipClose, openChar, closeChar byte, loc *ast.Location) *ast.Location {
i, offset := 0, 0
// Skip past parens/brackets/braces in rule heads.
@@ -1406,26 +2029,26 @@ func closingLoc(skipOpen, skipClose, open, close byte, loc *ast.Location) *ast.L
}
for ; i < len(loc.Text); i++ {
- if loc.Text[i] == open {
+ if loc.Text[i] == openChar {
break
}
}
if i >= len(loc.Text) {
- return &ast.Location{Row: -1}
+ return negativeRow
}
state := 1
for state > 0 {
i++
if i >= len(loc.Text) {
- return &ast.Location{Row: -1}
+ return negativeRow
}
switch loc.Text[i] {
- case open:
+ case openChar:
state++
- case close:
+ case closeChar:
state--
case '\n':
offset++
@@ -1435,10 +2058,10 @@ func closingLoc(skipOpen, skipClose, open, close byte, loc *ast.Location) *ast.L
return &ast.Location{Row: loc.Row + offset}
}
-func skipPast(open, close byte, loc *ast.Location) (int, int) {
+func skipPast(openChar, closeChar byte, loc *ast.Location) (int, int) {
i := 0
for ; i < len(loc.Text); i++ {
- if loc.Text[i] == open {
+ if loc.Text[i] == openChar {
break
}
}
@@ -1452,9 +2075,9 @@ func skipPast(open, close byte, loc *ast.Location) (int, int) {
}
switch loc.Text[i] {
- case open:
+ case openChar:
state++
- case close:
+ case closeChar:
state--
case '\n':
offset++
@@ -1467,7 +2090,7 @@ func skipPast(open, close byte, loc *ast.Location) (int, int) {
// startLine begins a line with the current indentation level.
func (w *writer) startLine() {
w.inline = true
- for i := 0; i < w.level; i++ {
+ for range w.level {
w.write(w.indent)
}
}
@@ -1483,15 +2106,46 @@ func (w *writer) endLine() {
w.write("\n")
}
+type unexpectedCommentError struct {
+ newComment string
+ newCommentRow int
+ existingComment string
+ existingCommentRow int
+}
+
+func (u unexpectedCommentError) Error() string {
+ return fmt.Sprintf("unexpected new comment (%s) on line %d because there is already a comment (%s) registered for line %d",
+ u.newComment, u.newCommentRow, u.existingComment, u.existingCommentRow)
+}
+
// beforeLineEnd registers a comment to be printed at the end of the current line.
-func (w *writer) beforeLineEnd(c *ast.Comment) {
+func (w *writer) beforeLineEnd(c *ast.Comment) error {
if w.beforeEnd != nil {
if c == nil {
- return
+ return nil
+ }
+
+ existingComment := truncatedString(w.beforeEnd.String(), 100)
+ existingCommentRow := w.beforeEnd.Location.Row
+ newComment := truncatedString(c.String(), 100)
+ w.beforeEnd = nil
+
+ return unexpectedCommentError{
+ newComment: newComment,
+ newCommentRow: c.Location.Row,
+ existingComment: existingComment,
+ existingCommentRow: existingCommentRow,
}
- panic("overwriting non-nil beforeEnd")
}
w.beforeEnd = c
+ return nil
+}
+
+func truncatedString(s string, max int) string {
+ if len(s) > max {
+ return s[:max-2] + "..."
+ }
+ return s
}
func (w *writer) delayBeforeEnd() {
@@ -1507,11 +2161,16 @@ func (w *writer) blankLine() {
w.write("\n")
}
-// write the input string and writes it to the buffer.
+// write writes string s to the buffer.
func (w *writer) write(s string) {
w.buf.WriteString(s)
}
+// writeBytes writes []byte b to the buffer.
+func (w *writer) writeBytes(b []byte) {
+ w.buf.Write(b)
+}
+
// writeLine writes the string on a newly started line, then terminate the line.
func (w *writer) writeLine(s string) {
if !w.inline {
@@ -1533,11 +2192,12 @@ func (w *writer) up() {
}
// down decreases the indentation level
-func (w *writer) down() {
+func (w *writer) down() error {
if w.level == 0 {
- panic("negative indentation level")
+ return errors.New("negative indentation level")
}
w.level--
+ return nil
}
func ensureFutureKeywordImport(imps []*ast.Import, kw string) []*ast.Import {
@@ -1589,22 +2249,22 @@ func ensureImport(imps []*ast.Import, path ast.Ref) []*ast.Import {
return append(imps, imp)
}
-// ArgErrDetail but for `fmt` checks since compiler has not run yet.
+// ArityFormatErrDetail but for `fmt` checks since compiler has not run yet.
type ArityFormatErrDetail struct {
Have []string `json:"have"`
Want []string `json:"want"`
}
-// arityMismatchError but for `fmt` checks since the compiler has not run yet.
+// ArityFormatMismatchError but for `fmt` checks since the compiler has not run yet.
func ArityFormatMismatchError(operands []*ast.Term, operator string, loc *ast.Location, f *types.Function) *ast.Error {
- want := make([]string, len(f.Args()))
- for i := range f.Args() {
- want[i] = types.Sprint(f.Args()[i])
+ want := make([]string, f.Arity())
+ for i, arg := range f.FuncArgs().Args {
+ want[i] = types.Sprint(arg)
}
have := make([]string, len(operands))
- for i := 0; i < len(operands); i++ {
- have[i] = ast.TypeName(operands[i].Value)
+ for i := range operands {
+ have[i] = ast.ValueName(operands[i].Value)
}
err := ast.NewError(ast.TypeErr, loc, "%s: %s", operator, "arity mismatch")
err.Details = &ArityFormatErrDetail{
@@ -1617,18 +2277,9 @@ func ArityFormatMismatchError(operands []*ast.Term, operator string, loc *ast.Lo
// Lines returns the string representation of the detail.
func (d *ArityFormatErrDetail) Lines() []string {
return []string{
- "have: " + "(" + strings.Join(d.Have, ",") + ")",
- "want: " + "(" + strings.Join(d.Want, ",") + ")",
- }
-}
-
-func moduleIsRegoV1Compatible(m *ast.Module) bool {
- for _, imp := range m.Imports {
- if isRegoV1Compatible(imp) {
- return true
- }
+ "have: (" + strings.Join(d.Have, ",") + ")",
+ "want: (" + strings.Join(d.Want, ",") + ")",
}
- return false
}
// isRegoV1Compatible returns true if the passed *ast.Import is `rego.v1`
@@ -1636,5 +2287,5 @@ func isRegoV1Compatible(imp *ast.Import) bool {
path := imp.Path.Value.(ast.Ref)
return len(path) == 2 &&
ast.RegoRootDocument.Equal(path[0]) &&
- path[1].Equal(ast.StringTerm("v1"))
+ path[1].Equal(ast.InternedTerm("v1"))
}
diff --git a/vendor/github.com/open-policy-agent/opa/hooks/hooks.go b/vendor/github.com/open-policy-agent/opa/v1/hooks/hooks.go
similarity index 74%
rename from vendor/github.com/open-policy-agent/opa/hooks/hooks.go
rename to vendor/github.com/open-policy-agent/opa/v1/hooks/hooks.go
index 9659d7b499..cb756e5020 100644
--- a/vendor/github.com/open-policy-agent/opa/hooks/hooks.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/hooks/hooks.go
@@ -8,7 +8,8 @@ import (
"context"
"fmt"
- "github.com/open-policy-agent/opa/config"
+ "github.com/open-policy-agent/opa/v1/config"
+ topdown_cache "github.com/open-policy-agent/opa/v1/topdown/cache"
)
// Hook is a hook to be called in some select places in OPA's operation.
@@ -49,6 +50,10 @@ func (hs Hooks) Each(fn func(Hook)) {
}
}
+func (hs Hooks) Len() int {
+ return len(hs.m)
+}
+
// ConfigHook allows inspecting or rewriting the configuration when the plugin
// manager is processing it.
// Note that this hook is not run when the plugin manager is reconfigured. This
@@ -64,10 +69,25 @@ type ConfigDiscoveryHook interface {
OnConfigDiscovery(context.Context, *config.Config) (*config.Config, error)
}
+// InterQueryCacheHook allows access to the server's inter-query cache instance.
+// It's useful for out-of-tree handlers that also need to evaluate something.
+// Using this hook, they can share the caches with the rest of OPA.
+type InterQueryCacheHook interface {
+ OnInterQueryCache(context.Context, topdown_cache.InterQueryCache) error
+}
+
+// InterQueryValueCacheHook allows access to the server's inter-query value cache
+// instance.
+type InterQueryValueCacheHook interface {
+ OnInterQueryValueCache(context.Context, topdown_cache.InterQueryValueCache) error
+}
+
func (hs Hooks) Validate() error {
for h := range hs.m {
switch h.(type) {
- case ConfigHook,
+ case InterQueryCacheHook,
+ InterQueryValueCacheHook,
+ ConfigHook,
ConfigDiscoveryHook: // OK
default:
return fmt.Errorf("unknown hook type %T", h)
diff --git a/vendor/github.com/open-policy-agent/opa/ir/ir.go b/vendor/github.com/open-policy-agent/opa/v1/ir/ir.go
similarity index 99%
rename from vendor/github.com/open-policy-agent/opa/ir/ir.go
rename to vendor/github.com/open-policy-agent/opa/v1/ir/ir.go
index c07670704e..3657a9b673 100644
--- a/vendor/github.com/open-policy-agent/opa/ir/ir.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/ir/ir.go
@@ -11,7 +11,7 @@ package ir
import (
"fmt"
- "github.com/open-policy-agent/opa/types"
+ "github.com/open-policy-agent/opa/v1/types"
)
type (
@@ -106,7 +106,7 @@ const (
Unused
)
-func (a *Policy) String() string {
+func (*Policy) String() string {
return "Policy"
}
diff --git a/vendor/github.com/open-policy-agent/opa/ir/marshal.go b/vendor/github.com/open-policy-agent/opa/v1/ir/marshal.go
similarity index 93%
rename from vendor/github.com/open-policy-agent/opa/ir/marshal.go
rename to vendor/github.com/open-policy-agent/opa/v1/ir/marshal.go
index 69f4b5caf6..f792e2c1b6 100644
--- a/vendor/github.com/open-policy-agent/opa/ir/marshal.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/ir/marshal.go
@@ -6,6 +6,7 @@ package ir
import (
"encoding/json"
+ "fmt"
"reflect"
)
@@ -50,7 +51,11 @@ func (a *Operand) UnmarshalJSON(bs []byte) error {
if err := json.Unmarshal(bs, &typed); err != nil {
return err
}
- x := valFactories[typed.Type]()
+ f, ok := valFactories[typed.Type]
+ if !ok {
+ return fmt.Errorf("unrecognized value type %q", typed.Type)
+ }
+ x := f()
if err := json.Unmarshal(typed.Value, &x); err != nil {
return err
}
@@ -77,7 +82,11 @@ type rawTypedStmt struct {
}
func (raw rawTypedStmt) Unmarshal() (Stmt, error) {
- x := stmtFactories[raw.Type]()
+ f, ok := stmtFactories[raw.Type]
+ if !ok {
+ return nil, fmt.Errorf("unrecognized statement type %q", raw.Type)
+ }
+ x := f()
if err := json.Unmarshal(raw.Stmt, &x); err != nil {
return nil, err
}
@@ -119,6 +128,7 @@ var stmtFactories = map[string]func() Stmt{
"IsArrayStmt": func() Stmt { return &IsArrayStmt{} },
"IsObjectStmt": func() Stmt { return &IsObjectStmt{} },
"IsDefinedStmt": func() Stmt { return &IsDefinedStmt{} },
+ "IsSetStmt": func() Stmt { return &IsSetStmt{} },
"IsUndefinedStmt": func() Stmt { return &IsUndefinedStmt{} },
"ArrayAppendStmt": func() Stmt { return &ArrayAppendStmt{} },
"ObjectInsertStmt": func() Stmt { return &ObjectInsertStmt{} },
diff --git a/vendor/github.com/open-policy-agent/opa/ir/pretty.go b/vendor/github.com/open-policy-agent/opa/v1/ir/pretty.go
similarity index 67%
rename from vendor/github.com/open-policy-agent/opa/ir/pretty.go
rename to vendor/github.com/open-policy-agent/opa/v1/ir/pretty.go
index 6102c5a911..53d7cbae88 100644
--- a/vendor/github.com/open-policy-agent/opa/ir/pretty.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/ir/pretty.go
@@ -11,7 +11,7 @@ import (
)
// Pretty writes a human-readable representation of an IR object to w.
-func Pretty(w io.Writer, x interface{}) error {
+func Pretty(w io.Writer, x any) error {
pp := &prettyPrinter{
depth: -1,
@@ -25,20 +25,20 @@ type prettyPrinter struct {
w io.Writer
}
-func (pp *prettyPrinter) Before(_ interface{}) {
+func (pp *prettyPrinter) Before(_ any) {
pp.depth++
}
-func (pp *prettyPrinter) After(_ interface{}) {
+func (pp *prettyPrinter) After(_ any) {
pp.depth--
}
-func (pp *prettyPrinter) Visit(x interface{}) (Visitor, error) {
+func (pp *prettyPrinter) Visit(x any) (Visitor, error) {
pp.writeIndent("%T %+v", x, x)
return pp, nil
}
-func (pp *prettyPrinter) writeIndent(f string, a ...interface{}) {
+func (pp *prettyPrinter) writeIndent(f string, a ...any) {
pad := strings.Repeat("| ", pp.depth)
fmt.Fprintf(pp.w, pad+f+"\n", a...)
}
diff --git a/vendor/github.com/open-policy-agent/opa/ir/walk.go b/vendor/github.com/open-policy-agent/opa/v1/ir/walk.go
similarity index 89%
rename from vendor/github.com/open-policy-agent/opa/ir/walk.go
rename to vendor/github.com/open-policy-agent/opa/v1/ir/walk.go
index 08a8f42440..788f36cd8e 100644
--- a/vendor/github.com/open-policy-agent/opa/ir/walk.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/ir/walk.go
@@ -6,13 +6,13 @@ package ir
// Visitor defines the interface for visiting IR nodes.
type Visitor interface {
- Before(x interface{})
- Visit(x interface{}) (Visitor, error)
- After(x interface{})
+ Before(x any)
+ Visit(x any) (Visitor, error)
+ After(x any)
}
// Walk invokes the visitor for nodes under x.
-func Walk(vis Visitor, x interface{}) error {
+func Walk(vis Visitor, x any) error {
impl := walkerImpl{
vis: vis,
}
@@ -25,7 +25,7 @@ type walkerImpl struct {
err error
}
-func (w *walkerImpl) walk(x interface{}) {
+func (w *walkerImpl) walk(x any) {
if w.err != nil { // abort on error
return
}
diff --git a/vendor/github.com/open-policy-agent/opa/keys/keys.go b/vendor/github.com/open-policy-agent/opa/v1/keys/keys.go
similarity index 98%
rename from vendor/github.com/open-policy-agent/opa/keys/keys.go
rename to vendor/github.com/open-policy-agent/opa/v1/keys/keys.go
index de03496943..fba7a9c939 100644
--- a/vendor/github.com/open-policy-agent/opa/keys/keys.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/keys/keys.go
@@ -5,7 +5,7 @@ import (
"fmt"
"os"
- "github.com/open-policy-agent/opa/util"
+ "github.com/open-policy-agent/opa/v1/util"
)
const defaultSigningAlgorithm = "RS256"
diff --git a/vendor/github.com/open-policy-agent/opa/v1/loader/errors.go b/vendor/github.com/open-policy-agent/opa/v1/loader/errors.go
new file mode 100644
index 0000000000..55b8e7dc44
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/loader/errors.go
@@ -0,0 +1,62 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package loader
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/open-policy-agent/opa/v1/ast"
+)
+
+// Errors is a wrapper for multiple loader errors.
+type Errors []error
+
+func (e Errors) Error() string {
+ if len(e) == 0 {
+ return "no error(s)"
+ }
+ if len(e) == 1 {
+ return "1 error occurred during loading: " + e[0].Error()
+ }
+ buf := make([]string, len(e))
+ for i := range buf {
+ buf[i] = e[i].Error()
+ }
+ return fmt.Sprintf("%v errors occurred during loading:\n", len(e)) + strings.Join(buf, "\n")
+}
+
+func (e *Errors) add(err error) {
+ if errs, ok := err.(ast.Errors); ok {
+ for i := range errs {
+ *e = append(*e, errs[i])
+ }
+ } else {
+ *e = append(*e, err)
+ }
+}
+
+type unsupportedDocumentType string
+
+func (path unsupportedDocumentType) Error() string {
+ return string(path) + ": document must be of type object"
+}
+
+type unrecognizedFile string
+
+func (path unrecognizedFile) Error() string {
+ return string(path) + ": can't recognize file type"
+}
+
+func isUnrecognizedFile(err error) bool {
+ _, ok := err.(unrecognizedFile)
+ return ok
+}
+
+type mergeError string
+
+func (e mergeError) Error() string {
+ return string(e) + ": merge error"
+}
diff --git a/vendor/github.com/open-policy-agent/opa/loader/extension/extension.go b/vendor/github.com/open-policy-agent/opa/v1/loader/extension/extension.go
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/loader/extension/extension.go
rename to vendor/github.com/open-policy-agent/opa/v1/loader/extension/extension.go
diff --git a/vendor/github.com/open-policy-agent/opa/loader/filter/filter.go b/vendor/github.com/open-policy-agent/opa/v1/loader/filter/filter.go
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/loader/filter/filter.go
rename to vendor/github.com/open-policy-agent/opa/v1/loader/filter/filter.go
diff --git a/vendor/github.com/open-policy-agent/opa/v1/loader/loader.go b/vendor/github.com/open-policy-agent/opa/v1/loader/loader.go
new file mode 100644
index 0000000000..42a59d031f
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/loader/loader.go
@@ -0,0 +1,861 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package loader contains utilities for loading files into OPA.
+package loader
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "sigs.k8s.io/yaml"
+
+ fileurl "github.com/open-policy-agent/opa/internal/file/url"
+ "github.com/open-policy-agent/opa/internal/merge"
+ "github.com/open-policy-agent/opa/v1/ast"
+ astJSON "github.com/open-policy-agent/opa/v1/ast/json"
+ "github.com/open-policy-agent/opa/v1/bundle"
+ "github.com/open-policy-agent/opa/v1/loader/extension"
+ "github.com/open-policy-agent/opa/v1/loader/filter"
+ "github.com/open-policy-agent/opa/v1/metrics"
+ "github.com/open-policy-agent/opa/v1/storage"
+ "github.com/open-policy-agent/opa/v1/storage/inmem"
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+// Result represents the result of successfully loading zero or more files.
+type Result struct {
+ Documents map[string]any
+ Modules map[string]*RegoFile
+ path []string
+}
+
+// ParsedModules returns the parsed modules stored on the result.
+func (l *Result) ParsedModules() map[string]*ast.Module {
+ modules := make(map[string]*ast.Module)
+ for _, module := range l.Modules {
+ modules[module.Name] = module.Parsed
+ }
+ return modules
+}
+
+// Compiler returns a Compiler object with the compiled modules from this loader
+// result.
+func (l *Result) Compiler() (*ast.Compiler, error) {
+ compiler := ast.NewCompiler()
+ compiler.Compile(l.ParsedModules())
+ if compiler.Failed() {
+ return nil, compiler.Errors
+ }
+ return compiler, nil
+}
+
+// Store returns a Store object with the documents from this loader result.
+func (l *Result) Store() (storage.Store, error) {
+ return l.StoreWithOpts()
+}
+
+// StoreWithOpts returns a Store object with the documents from this loader result,
+// instantiated with the passed options.
+func (l *Result) StoreWithOpts(opts ...inmem.Opt) (storage.Store, error) {
+ return inmem.NewFromObjectWithOpts(l.Documents, opts...), nil
+}
+
+// RegoFile represents the result of loading a single Rego source file.
+type RegoFile struct {
+ Name string
+ Parsed *ast.Module
+ Raw []byte
+}
+
+// Filter defines the interface for filtering files during loading. If the
+// filter returns true, the file should be excluded from the result.
+type Filter = filter.LoaderFilter
+
+// GlobExcludeName excludes files and directories whose names do not match the
+// shell style pattern at minDepth or greater.
+func GlobExcludeName(pattern string, minDepth int) Filter {
+ return func(_ string, info fs.FileInfo, depth int) bool {
+ match, _ := filepath.Match(pattern, info.Name())
+ return match && depth >= minDepth
+ }
+}
+
+// FileLoader defines an interface for loading OPA data files
+// and Rego policies.
+type FileLoader interface {
+ All(paths []string) (*Result, error)
+ Filtered(paths []string, filter Filter) (*Result, error)
+ AsBundle(path string) (*bundle.Bundle, error)
+ WithReader(io.Reader) FileLoader
+ WithFS(fs.FS) FileLoader
+ WithMetrics(metrics.Metrics) FileLoader
+ WithFilter(Filter) FileLoader
+ WithBundleVerificationConfig(*bundle.VerificationConfig) FileLoader
+ WithSkipBundleVerification(bool) FileLoader
+ WithBundleLazyLoadingMode(bool) FileLoader
+ WithProcessAnnotation(bool) FileLoader
+ WithCapabilities(*ast.Capabilities) FileLoader
+ // Deprecated: Use SetOptions in the json package instead, where a longer description
+ // of why this is deprecated also can be found.
+ WithJSONOptions(*astJSON.Options) FileLoader
+ WithRegoVersion(ast.RegoVersion) FileLoader
+ WithFollowSymlinks(bool) FileLoader
+}
+
+// NewFileLoader returns a new FileLoader instance.
+func NewFileLoader() FileLoader {
+ return &fileLoader{
+ metrics: metrics.New(),
+ files: make(map[string]bundle.FileInfo),
+ }
+}
+
+type fileLoader struct {
+ metrics metrics.Metrics
+ filter Filter
+ bvc *bundle.VerificationConfig
+ skipVerify bool
+ bundleLazyLoading bool
+ files map[string]bundle.FileInfo
+ opts ast.ParserOptions
+ fsys fs.FS
+ reader io.Reader
+ followSymlinks bool
+}
+
+// WithFS provides an fs.FS to use for loading files. You can pass nil to
+// use plain IO calls (e.g. os.Open, os.Stat, etc.), this is the default
+// behaviour.
+func (fl *fileLoader) WithFS(fsys fs.FS) FileLoader {
+ fl.fsys = fsys
+ return fl
+}
+
+// WithReader provides an io.Reader to use for loading the bundle tarball.
+// An io.Reader passed via WithReader takes precedence over an fs.FS passed
+// via WithFS.
+func (fl *fileLoader) WithReader(rdr io.Reader) FileLoader {
+ fl.reader = rdr
+ return fl
+}
+
+// WithMetrics provides the metrics instance to use while loading
+func (fl *fileLoader) WithMetrics(m metrics.Metrics) FileLoader {
+ fl.metrics = m
+ return fl
+}
+
+// WithFilter specifies the filter object to use to filter files while loading
+func (fl *fileLoader) WithFilter(filter Filter) FileLoader {
+ fl.filter = filter
+ return fl
+}
+
+// WithBundleVerificationConfig sets the key configuration used to verify a signed bundle
+func (fl *fileLoader) WithBundleVerificationConfig(config *bundle.VerificationConfig) FileLoader {
+ fl.bvc = config
+ return fl
+}
+
+// WithSkipBundleVerification skips verification of a signed bundle
+func (fl *fileLoader) WithSkipBundleVerification(skipVerify bool) FileLoader {
+ fl.skipVerify = skipVerify
+ return fl
+}
+
+// WithBundleLazyLoadingMode enables or disables bundle lazy loading mode
+func (fl *fileLoader) WithBundleLazyLoadingMode(bundleLazyLoading bool) FileLoader {
+ fl.bundleLazyLoading = bundleLazyLoading
+ return fl
+}
+
+// WithProcessAnnotation enables or disables processing of schema annotations on rules
+func (fl *fileLoader) WithProcessAnnotation(processAnnotation bool) FileLoader {
+ fl.opts.ProcessAnnotation = processAnnotation
+ return fl
+}
+
+// WithCapabilities sets the supported capabilities when loading the files
+func (fl *fileLoader) WithCapabilities(caps *ast.Capabilities) FileLoader {
+ fl.opts.Capabilities = caps
+ return fl
+}
+
+// WithJSONOptions sets the JSON options on the parser (now a no-op).
+//
+// Deprecated: Use SetOptions in the json package instead, where a longer description
+// of why this is deprecated also can be found.
+func (fl *fileLoader) WithJSONOptions(*astJSON.Options) FileLoader {
+ return fl
+}
+
+// WithRegoVersion sets the ast.RegoVersion to use when parsing and compiling modules.
+func (fl *fileLoader) WithRegoVersion(version ast.RegoVersion) FileLoader {
+ fl.opts.RegoVersion = version
+ return fl
+}
+
+// WithFollowSymlinks enables or disables following symlinks when loading files
+func (fl *fileLoader) WithFollowSymlinks(followSymlinks bool) FileLoader {
+ fl.followSymlinks = followSymlinks
+ return fl
+}
+
+// All returns a Result object loaded (recursively) from the specified paths.
+func (fl fileLoader) All(paths []string) (*Result, error) {
+ return fl.Filtered(paths, nil)
+}
+
+// Filtered returns a Result object loaded (recursively) from the specified
+// paths while applying the given filters. If any filter returns true, the
+// file/directory is excluded.
+func (fl fileLoader) Filtered(paths []string, filter Filter) (*Result, error) {
+ return all(fl.fsys, paths, filter, func(curr *Result, path string, depth int) error {
+
+ var (
+ bs []byte
+ err error
+ )
+ if fl.fsys != nil {
+ bs, err = fs.ReadFile(fl.fsys, path)
+ } else {
+ bs, err = os.ReadFile(path)
+ }
+ if err != nil {
+ return err
+ }
+
+ result, err := loadKnownTypes(path, bs, fl.metrics, fl.opts, fl.bundleLazyLoading)
+ if err != nil {
+ if !isUnrecognizedFile(err) {
+ return err
+ }
+ if depth > 0 {
+ return nil
+ }
+ result, err = loadFileForAnyType(path, bs, fl.metrics, fl.opts)
+ if err != nil {
+ return err
+ }
+ }
+
+ return curr.merge(path, result)
+ })
+}
+
+// AsBundle loads a path as a bundle. If it is a single file
+// it will be treated as a normal tarball bundle. If a directory
+// is supplied it will be loaded as an unzipped bundle tree.
+func (fl fileLoader) AsBundle(path string) (*bundle.Bundle, error) {
+ path, err := fileurl.Clean(path)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := checkForUNCPath(path); err != nil {
+ return nil, err
+ }
+
+ var bundleLoader bundle.DirectoryLoader
+ var isDir bool
+ if fl.reader != nil {
+ bundleLoader = bundle.NewTarballLoaderWithBaseURL(fl.reader, path).WithFilter(fl.filter)
+ } else {
+ bundleLoader, isDir, err = GetBundleDirectoryLoaderFS(fl.fsys, path, fl.filter)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ bundleLoader = bundleLoader.WithFollowSymlinks(fl.followSymlinks)
+
+ br := bundle.NewCustomReader(bundleLoader).
+ WithMetrics(fl.metrics).
+ WithBundleVerificationConfig(fl.bvc).
+ WithSkipBundleVerification(fl.skipVerify).
+ WithLazyLoadingMode(fl.bundleLazyLoading).
+ WithProcessAnnotations(fl.opts.ProcessAnnotation).
+ WithCapabilities(fl.opts.Capabilities).
+ WithFollowSymlinks(fl.followSymlinks).
+ WithRegoVersion(fl.opts.RegoVersion).
+ WithLazyLoadingMode(fl.bundleLazyLoading).
+ WithBundleName(path)
+
+ // For bundle directories add the full path in front of module file names
+ // to simplify debugging.
+ if isDir {
+ br.WithBaseDir(path)
+ }
+
+ b, err := br.Read()
+ if err != nil {
+ err = fmt.Errorf("bundle %s: %w", path, err)
+ }
+
+ return &b, err
+}
+
+// GetBundleDirectoryLoader returns a bundle directory loader which can be used to load
+// files in the directory
+func GetBundleDirectoryLoader(path string) (bundle.DirectoryLoader, bool, error) {
+ return GetBundleDirectoryLoaderFS(nil, path, nil)
+}
+
+// GetBundleDirectoryLoaderWithFilter returns a bundle directory loader which can be used to load
+// files in the directory after applying the given filter.
+func GetBundleDirectoryLoaderWithFilter(path string, filter Filter) (bundle.DirectoryLoader, bool, error) {
+ return GetBundleDirectoryLoaderFS(nil, path, filter)
+}
+
+// GetBundleDirectoryLoaderFS returns a bundle directory loader which can be used to load
+// files in the directory.
+func GetBundleDirectoryLoaderFS(fsys fs.FS, path string, filter Filter) (bundle.DirectoryLoader, bool, error) {
+ path, err := fileurl.Clean(path)
+ if err != nil {
+ return nil, false, err
+ }
+
+ if err := checkForUNCPath(path); err != nil {
+ return nil, false, err
+ }
+
+ var fi fs.FileInfo
+ if fsys != nil {
+ fi, err = fs.Stat(fsys, path)
+ } else {
+ fi, err = os.Stat(path)
+ }
+ if err != nil {
+ return nil, false, fmt.Errorf("error reading %q: %s", path, err)
+ }
+
+ var bundleLoader bundle.DirectoryLoader
+ if fi.IsDir() {
+ if fsys != nil {
+ bundleLoader = bundle.NewFSLoaderWithRoot(fsys, path)
+ } else {
+ bundleLoader = bundle.NewDirectoryLoader(path)
+ }
+ } else {
+ var fh fs.File
+ if fsys != nil {
+ fh, err = fsys.Open(path)
+ } else {
+ fh, err = os.Open(path)
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ bundleLoader = bundle.NewTarballLoaderWithBaseURL(fh, path)
+ }
+
+ if filter != nil {
+ bundleLoader = bundleLoader.WithFilter(filter)
+ }
+ return bundleLoader, fi.IsDir(), nil
+}
+
+// FilteredPaths is the same as FilterPathsFS using the current diretory file
+// system
+func FilteredPaths(paths []string, filter Filter) ([]string, error) {
+ return FilteredPathsFS(nil, paths, filter)
+}
+
+// FilteredPathsFS return a list of files from the specified
+// paths while applying the given filters. If any filter returns true, the
+// file/directory is excluded.
+func FilteredPathsFS(fsys fs.FS, paths []string, filter Filter) ([]string, error) {
+ result := []string{}
+
+ _, err := all(fsys, paths, filter, func(_ *Result, path string, _ int) error {
+ result = append(result, path)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+// Schemas loads a schema set from the specified file path.
+func Schemas(schemaPath string) (*ast.SchemaSet, error) {
+
+ var errs Errors
+ ss, err := loadSchemas(schemaPath)
+ if err != nil {
+ errs.add(err)
+ return nil, errs
+ }
+
+ return ss, nil
+}
+
+func loadSchemas(schemaPath string) (*ast.SchemaSet, error) {
+
+ if schemaPath == "" {
+ return nil, nil
+ }
+
+ ss := ast.NewSchemaSet()
+ path, err := fileurl.Clean(schemaPath)
+ if err != nil {
+ return nil, err
+ }
+
+ info, err := os.Stat(path)
+ if err != nil {
+ return nil, err
+ }
+
+ // Handle single file case.
+ if !info.IsDir() {
+ schema, err := loadOneSchema(path)
+ if err != nil {
+ return nil, err
+ }
+ ss.Put(ast.SchemaRootRef, schema)
+ return ss, nil
+
+ }
+
+ // Handle directory case.
+ rootDir := path
+
+ err = filepath.Walk(path,
+ func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ } else if info.IsDir() {
+ return nil
+ }
+
+ schema, err := loadOneSchema(path)
+ if err != nil {
+ return err
+ }
+
+ relPath, err := filepath.Rel(rootDir, path)
+ if err != nil {
+ return err
+ }
+
+ key := getSchemaSetByPathKey(relPath)
+ ss.Put(key, schema)
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return ss, nil
+}
+
+func getSchemaSetByPathKey(path string) ast.Ref {
+
+ front := filepath.Dir(path)
+ last := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path))
+
+ var parts []string
+
+ if front != "." {
+ parts = append(strings.Split(filepath.ToSlash(front), "/"), last)
+ } else {
+ parts = []string{last}
+ }
+
+ key := make(ast.Ref, 1+len(parts))
+ key[0] = ast.SchemaRootDocument
+ for i := range parts {
+ key[i+1] = ast.InternedTerm(parts[i])
+ }
+
+ return key
+}
+
+func loadOneSchema(path string) (any, error) {
+ bs, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ var schema any
+ if err := util.Unmarshal(bs, &schema); err != nil {
+ return nil, fmt.Errorf("%s: %w", path, err)
+ }
+
+ return schema, nil
+}
+
+// All returns a Result object loaded (recursively) from the specified paths.
+// Deprecated: Use FileLoader.Filtered() instead.
+func All(paths []string) (*Result, error) {
+ return NewFileLoader().Filtered(paths, nil)
+}
+
+// Filtered returns a Result object loaded (recursively) from the specified
+// paths while applying the given filters. If any filter returns true, the
+// file/directory is excluded.
+// Deprecated: Use FileLoader.Filtered() instead.
+func Filtered(paths []string, filter Filter) (*Result, error) {
+ return NewFileLoader().Filtered(paths, filter)
+}
+
+// AsBundle loads a path as a bundle. If it is a single file
+// it will be treated as a normal tarball bundle. If a directory
+// is supplied it will be loaded as an unzipped bundle tree.
+// Deprecated: Use FileLoader.AsBundle() instead.
+func AsBundle(path string) (*bundle.Bundle, error) {
+ return NewFileLoader().AsBundle(path)
+}
+
+// AllRegos returns a Result object loaded (recursively) with all Rego source
+// files from the specified paths.
+func AllRegos(paths []string) (*Result, error) {
+ return NewFileLoader().Filtered(paths, func(_ string, info os.FileInfo, _ int) bool {
+ return !info.IsDir() && !strings.HasSuffix(info.Name(), bundle.RegoExt)
+ })
+}
+
+// Rego is deprecated. Use RegoWithOpts instead.
+func Rego(path string) (*RegoFile, error) {
+ return RegoWithOpts(path, ast.ParserOptions{})
+}
+
+// RegoWithOpts returns a RegoFile object loaded from the given path.
+func RegoWithOpts(path string, opts ast.ParserOptions) (*RegoFile, error) {
+ path, err := fileurl.Clean(path)
+ if err != nil {
+ return nil, err
+ }
+ bs, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ return loadRego(path, bs, metrics.New(), opts)
+}
+
+// CleanPath returns the normalized version of a path that can be used as an identifier.
+func CleanPath(path string) string {
+ return strings.Trim(path, "/")
+}
+
+// Paths returns a sorted list of files contained at path. If recurse is true
+// and path is a directory, then Paths will walk the directory structure
+// recursively and list files at each level.
+func Paths(path string, recurse bool) (paths []string, err error) {
+ path, err = fileurl.Clean(path)
+ if err != nil {
+ return nil, err
+ }
+ err = filepath.Walk(path, func(f string, _ os.FileInfo, _ error) error {
+ if !recurse {
+ if path != f && path != filepath.Dir(f) {
+ return filepath.SkipDir
+ }
+ }
+ paths = append(paths, f)
+ return nil
+ })
+ return paths, err
+}
+
+// Dirs resolves filepaths to directories. It will return a list of unique
+// directories.
+func Dirs(paths []string) []string {
+ unique := map[string]struct{}{}
+
+ for _, path := range paths {
+ // TODO: /dir/dir will register top level directory /dir
+ dir := filepath.Dir(path)
+ unique[dir] = struct{}{}
+ }
+
+ return util.KeysSorted(unique)
+}
+
+// SplitPrefix returns a tuple specifying the document prefix and the file
+// path.
+func SplitPrefix(path string) ([]string, string) {
+ // Non-prefixed URLs can be returned without modification and their contents
+ // can be rooted directly under data.
+ if strings.Index(path, "://") == strings.Index(path, ":") {
+ return nil, path
+ }
+ parts := strings.SplitN(path, ":", 2)
+ if len(parts) == 2 && len(parts[0]) > 0 {
+ return strings.Split(parts[0], "."), parts[1]
+ }
+ return nil, path
+}
+
+func (l *Result) merge(path string, result any) error {
+ switch result := result.(type) {
+ case bundle.Bundle:
+ for _, module := range result.Modules {
+ l.Modules[module.Path] = &RegoFile{
+ Name: module.Path,
+ Parsed: module.Parsed,
+ Raw: module.Raw,
+ }
+ }
+ return l.mergeDocument(path, result.Data)
+ case *RegoFile:
+ l.Modules[CleanPath(path)] = result
+ return nil
+ default:
+ return l.mergeDocument(path, result)
+ }
+}
+
+func (l *Result) mergeDocument(path string, doc any) error {
+ obj, ok := makeDir(l.path, doc)
+ if !ok {
+ return unsupportedDocumentType(path)
+ }
+ merged, ok := merge.InterfaceMaps(l.Documents, obj)
+ if !ok {
+ return mergeError(path)
+ }
+ for k := range merged {
+ l.Documents[k] = merged[k]
+ }
+ return nil
+}
+
+func (l *Result) withParent(p string) *Result {
+ path := append(l.path, p)
+ return &Result{
+ Documents: l.Documents,
+ Modules: l.Modules,
+ path: path,
+ }
+}
+
+func newResult() *Result {
+ return &Result{
+ Documents: map[string]any{},
+ Modules: map[string]*RegoFile{},
+ }
+}
+
+func all(fsys fs.FS, paths []string, filter Filter, f func(*Result, string, int) error) (*Result, error) {
+ errs := Errors{}
+ root := newResult()
+
+ for _, path := range paths {
+
+ // Paths can be prefixed with a string that specifies where content should be
+ // loaded under data. E.g., foo.bar:/path/to/some.json will load the content
+ // of some.json under {"foo": {"bar": ...}}.
+ loaded := root
+ prefix, path := SplitPrefix(path)
+ if len(prefix) > 0 {
+ for _, part := range prefix {
+ loaded = loaded.withParent(part)
+ }
+ }
+
+ allRec(fsys, path, filter, &errs, loaded, 0, f)
+ }
+
+ if len(errs) > 0 {
+ return nil, errs
+ }
+
+ return root, nil
+}
+
+func allRec(fsys fs.FS, path string, filter Filter, errors *Errors, loaded *Result, depth int, f func(*Result, string, int) error) {
+
+ path, err := fileurl.Clean(path)
+ if err != nil {
+ errors.add(err)
+ return
+ }
+
+ if err := checkForUNCPath(path); err != nil {
+ errors.add(err)
+ return
+ }
+
+ var info fs.FileInfo
+ if fsys != nil {
+ info, err = fs.Stat(fsys, path)
+ } else {
+ info, err = os.Stat(path)
+ }
+
+ if err != nil {
+ errors.add(err)
+ return
+ }
+
+ if filter != nil && filter(path, info, depth) {
+ return
+ }
+
+ if !info.IsDir() {
+ if err := f(loaded, path, depth); err != nil {
+ errors.add(err)
+ }
+ return
+ }
+
+ // If we are recursing on directories then content must be loaded under path
+ // specified by directory hierarchy.
+ if depth > 0 {
+ loaded = loaded.withParent(info.Name())
+ }
+
+ var files []fs.DirEntry
+ if fsys != nil {
+ files, err = fs.ReadDir(fsys, path)
+ } else {
+ files, err = os.ReadDir(path)
+ }
+ if err != nil {
+ errors.add(err)
+ return
+ }
+
+ for _, file := range files {
+ allRec(fsys, filepath.Join(path, file.Name()), filter, errors, loaded, depth+1, f)
+ }
+}
+
+func loadKnownTypes(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions, bundleLazyLoadingMode bool) (any, error) {
+ ext := filepath.Ext(path)
+ if handler := extension.FindExtension(ext); handler != nil {
+ m.Timer(metrics.RegoDataParse).Start()
+
+ var value any
+ err := handler(bs, &value)
+
+ m.Timer(metrics.RegoDataParse).Stop()
+ if err != nil {
+ return nil, fmt.Errorf("bundle %s: %w", path, err)
+ }
+
+ return value, nil
+ }
+ switch ext {
+ case ".json":
+ return loadJSON(path, bs, m)
+ case ".rego":
+ return loadRego(path, bs, m, opts)
+ case ".yaml", ".yml":
+ return loadYAML(path, bs, m)
+ default:
+ if strings.HasSuffix(path, ".tar.gz") {
+ r, err := loadBundleFile(path, bs, m, opts, bundleLazyLoadingMode)
+ if err != nil {
+ err = fmt.Errorf("bundle %s: %w", path, err)
+ }
+ return r, err
+ }
+ }
+ return nil, unrecognizedFile(path)
+}
+
+func loadFileForAnyType(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (any, error) {
+ module, err := loadRego(path, bs, m, opts)
+ if err == nil {
+ return module, nil
+ }
+ doc, err := loadJSON(path, bs, m)
+ if err == nil {
+ return doc, nil
+ }
+ doc, err = loadYAML(path, bs, m)
+ if err == nil {
+ return doc, nil
+ }
+ return nil, unrecognizedFile(path)
+}
+
+func loadBundleFile(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions, bundleLazyLoadingMode bool) (bundle.Bundle, error) {
+ tl := bundle.NewTarballLoaderWithBaseURL(bytes.NewBuffer(bs), path)
+ br := bundle.NewCustomReader(tl).
+ WithRegoVersion(opts.RegoVersion).
+ WithCapabilities(opts.Capabilities).
+ WithProcessAnnotations(opts.ProcessAnnotation).
+ WithMetrics(m).
+ WithSkipBundleVerification(true).
+ WithLazyLoadingMode(bundleLazyLoadingMode).
+ IncludeManifestInData(true)
+ return br.Read()
+}
+
+func loadRego(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (*RegoFile, error) {
+ m.Timer(metrics.RegoModuleParse).Start()
+ var module *ast.Module
+ var err error
+ module, err = ast.ParseModuleWithOpts(path, string(bs), opts)
+ m.Timer(metrics.RegoModuleParse).Stop()
+ if err != nil {
+ return nil, err
+ }
+ result := &RegoFile{
+ Name: path,
+ Parsed: module,
+ Raw: bs,
+ }
+ return result, nil
+}
+
+func loadJSON(path string, bs []byte, m metrics.Metrics) (any, error) {
+ m.Timer(metrics.RegoDataParse).Start()
+ var x any
+ err := util.UnmarshalJSON(bs, &x)
+ m.Timer(metrics.RegoDataParse).Stop()
+
+ if err != nil {
+ return nil, fmt.Errorf("%s: %w", path, err)
+ }
+ return x, nil
+}
+
+func loadYAML(path string, bs []byte, m metrics.Metrics) (any, error) {
+ m.Timer(metrics.RegoDataParse).Start()
+ bs, err := yaml.YAMLToJSON(bs)
+ m.Timer(metrics.RegoDataParse).Stop()
+ if err != nil {
+ return nil, fmt.Errorf("%v: error converting YAML to JSON: %v", path, err)
+ }
+ return loadJSON(path, bs, m)
+}
+
+func makeDir(path []string, x any) (map[string]any, bool) {
+ if len(path) == 0 {
+ obj, ok := x.(map[string]any)
+ if !ok {
+ return nil, false
+ }
+ return obj, true
+ }
+ return makeDir(path[:len(path)-1], map[string]any{path[len(path)-1]: x})
+}
+
+// isUNC reports whether path is a UNC path.
+func isUNC(path string) bool {
+ return len(path) > 1 && isSlash(path[0]) && isSlash(path[1])
+}
+
+func isSlash(c uint8) bool {
+ return c == '\\' || c == '/'
+}
+
+func checkForUNCPath(path string) error {
+ if isUNC(path) {
+ return fmt.Errorf("UNC path read is not allowed: %s", path)
+ }
+ return nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/logging/logging.go b/vendor/github.com/open-policy-agent/opa/v1/logging/logging.go
similarity index 81%
rename from vendor/github.com/open-policy-agent/opa/logging/logging.go
rename to vendor/github.com/open-policy-agent/opa/v1/logging/logging.go
index 7a1edfb563..5ff27a2116 100644
--- a/vendor/github.com/open-policy-agent/opa/logging/logging.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/logging/logging.go
@@ -3,6 +3,7 @@ package logging
import (
"context"
"io"
+ "maps"
"net/http"
"github.com/sirupsen/logrus"
@@ -24,12 +25,12 @@ const (
// Logger provides interface for OPA logger implementations
type Logger interface {
- Debug(fmt string, a ...interface{})
- Info(fmt string, a ...interface{})
- Error(fmt string, a ...interface{})
- Warn(fmt string, a ...interface{})
+ Debug(fmt string, a ...any)
+ Info(fmt string, a ...any)
+ Error(fmt string, a ...any)
+ Warn(fmt string, a ...any)
- WithFields(map[string]interface{}) Logger
+ WithFields(map[string]any) Logger
GetLevel() Level
SetLevel(Level)
@@ -38,7 +39,7 @@ type Logger interface {
// StandardLogger is the default OPA logger implementation.
type StandardLogger struct {
logger *logrus.Logger
- fields map[string]interface{}
+ fields map[string]any
}
// New returns a new standard logger.
@@ -68,20 +69,16 @@ func (l *StandardLogger) SetFormatter(formatter logrus.Formatter) {
}
// WithFields provides additional fields to include in log output
-func (l *StandardLogger) WithFields(fields map[string]interface{}) Logger {
+func (l *StandardLogger) WithFields(fields map[string]any) Logger {
cp := *l
- cp.fields = make(map[string]interface{})
- for k, v := range l.fields {
- cp.fields[k] = v
- }
- for k, v := range fields {
- cp.fields[k] = v
- }
+ cp.fields = make(map[string]any)
+ maps.Copy(cp.fields, l.fields)
+ maps.Copy(cp.fields, fields)
return &cp
}
// getFields returns additional fields of this logger
-func (l *StandardLogger) getFields() map[string]interface{} {
+func (l *StandardLogger) getFields() map[string]any {
return l.fields
}
@@ -126,7 +123,7 @@ func (l *StandardLogger) GetLevel() Level {
}
// Debug logs at debug level
-func (l *StandardLogger) Debug(fmt string, a ...interface{}) {
+func (l *StandardLogger) Debug(fmt string, a ...any) {
if len(a) == 0 {
l.logger.WithFields(l.getFields()).Debug(fmt)
return
@@ -135,7 +132,7 @@ func (l *StandardLogger) Debug(fmt string, a ...interface{}) {
}
// Info logs at info level
-func (l *StandardLogger) Info(fmt string, a ...interface{}) {
+func (l *StandardLogger) Info(fmt string, a ...any) {
if len(a) == 0 {
l.logger.WithFields(l.getFields()).Info(fmt)
return
@@ -144,7 +141,7 @@ func (l *StandardLogger) Info(fmt string, a ...interface{}) {
}
// Error logs at error level
-func (l *StandardLogger) Error(fmt string, a ...interface{}) {
+func (l *StandardLogger) Error(fmt string, a ...any) {
if len(a) == 0 {
l.logger.WithFields(l.getFields()).Error(fmt)
return
@@ -153,7 +150,7 @@ func (l *StandardLogger) Error(fmt string, a ...interface{}) {
}
// Warn logs at warn level
-func (l *StandardLogger) Warn(fmt string, a ...interface{}) {
+func (l *StandardLogger) Warn(fmt string, a ...any) {
if len(a) == 0 {
l.logger.WithFields(l.getFields()).Warn(fmt)
return
@@ -164,7 +161,7 @@ func (l *StandardLogger) Warn(fmt string, a ...interface{}) {
// NoOpLogger logging implementation that does nothing
type NoOpLogger struct {
level Level
- fields map[string]interface{}
+ fields map[string]any
}
// NewNoOpLogger instantiates new NoOpLogger
@@ -176,23 +173,23 @@ func NewNoOpLogger() *NoOpLogger {
// WithFields provides additional fields to include in log output.
// Implemented here primarily to be able to switch between implementations without loss of data.
-func (l *NoOpLogger) WithFields(fields map[string]interface{}) Logger {
+func (l *NoOpLogger) WithFields(fields map[string]any) Logger {
cp := *l
cp.fields = fields
return &cp
}
// Debug noop
-func (*NoOpLogger) Debug(string, ...interface{}) {}
+func (*NoOpLogger) Debug(string, ...any) {}
// Info noop
-func (*NoOpLogger) Info(string, ...interface{}) {}
+func (*NoOpLogger) Info(string, ...any) {}
// Error noop
-func (*NoOpLogger) Error(string, ...interface{}) {}
+func (*NoOpLogger) Error(string, ...any) {}
// Warn noop
-func (*NoOpLogger) Warn(string, ...interface{}) {}
+func (*NoOpLogger) Warn(string, ...any) {}
// SetLevel set log level
func (l *NoOpLogger) SetLevel(level Level) {
@@ -264,3 +261,14 @@ func DecisionIDFromContext(ctx context.Context) (string, bool) {
s, ok := ctx.Value(decisionCtxKey).(string)
return s, ok
}
+
+const batchDecisionCtxKey = requestContextKey("batch_decision_id")
+
+func WithBatchDecisionID(parent context.Context, id string) context.Context {
+ return context.WithValue(parent, batchDecisionCtxKey, id)
+}
+
+func BatchDecisionIDFromContext(ctx context.Context) (string, bool) {
+ s, ok := ctx.Value(batchDecisionCtxKey).(string)
+ return s, ok
+}
diff --git a/vendor/github.com/open-policy-agent/opa/metrics/metrics.go b/vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go
similarity index 56%
rename from vendor/github.com/open-policy-agent/opa/metrics/metrics.go
rename to vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go
index 53cd606a36..481f27337e 100644
--- a/vendor/github.com/open-policy-agent/opa/metrics/metrics.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go
@@ -8,7 +8,7 @@ package metrics
import (
"encoding/json"
"fmt"
- "sort"
+ "slices"
"strings"
"sync"
"sync/atomic"
@@ -19,21 +19,27 @@ import (
// Well-known metric names.
const (
- BundleRequest = "bundle_request"
- ServerHandler = "server_handler"
- ServerQueryCacheHit = "server_query_cache_hit"
- SDKDecisionEval = "sdk_decision_eval"
- RegoQueryCompile = "rego_query_compile"
- RegoQueryEval = "rego_query_eval"
- RegoQueryParse = "rego_query_parse"
- RegoModuleParse = "rego_module_parse"
- RegoDataParse = "rego_data_parse"
- RegoModuleCompile = "rego_module_compile"
- RegoPartialEval = "rego_partial_eval"
- RegoInputParse = "rego_input_parse"
- RegoLoadFiles = "rego_load_files"
- RegoLoadBundles = "rego_load_bundles"
- RegoExternalResolve = "rego_external_resolve"
+ BundleRequest = "bundle_request"
+ ServerHandler = "server_handler"
+ ServerQueryCacheHit = "server_query_cache_hit"
+ SDKDecisionEval = "sdk_decision_eval"
+ RegoQueryCompile = "rego_query_compile"
+ RegoQueryEval = "rego_query_eval"
+ RegoQueryParse = "rego_query_parse"
+ RegoModuleParse = "rego_module_parse"
+ RegoDataParse = "rego_data_parse"
+ RegoModuleCompile = "rego_module_compile"
+ RegoPartialEval = "rego_partial_eval"
+ RegoInputParse = "rego_input_parse"
+ RegoLoadFiles = "rego_load_files"
+ RegoLoadBundles = "rego_load_bundles"
+ RegoExternalResolve = "rego_external_resolve"
+ CompilePrepPartial = "compile_prep_partial"
+ CompileEvalConstraints = "compile_eval_constraints"
+ CompileTranslateQueries = "compile_translate_queries"
+ CompileExtractAnnotationsUnknowns = "compile_extract_annotations_unknowns"
+ CompileExtractAnnotationsMask = "compile_extract_annotations_mask"
+ CompileEvalMaskRule = "compile_eval_mask_rule"
)
// Info contains attributes describing the underlying metrics provider.
@@ -48,13 +54,13 @@ type Metrics interface {
Timer(name string) Timer
Histogram(name string) Histogram
Counter(name string) Counter
- All() map[string]interface{}
+ All() map[string]any
Clear()
json.Marshaler
}
type TimerMetrics interface {
- Timers() map[string]interface{}
+ Timers() map[string]any
}
type metrics struct {
@@ -66,14 +72,22 @@ type metrics struct {
// New returns a new Metrics object.
func New() Metrics {
- m := &metrics{}
- m.Clear()
- return m
+ return &metrics{
+ timers: map[string]Timer{},
+ histograms: map[string]Histogram{},
+ counters: map[string]Counter{},
+ }
+}
+
+// NoOp returns a Metrics implementation that does nothing and costs nothing.
+// Used when metrics are expected, but not of interest.
+func NoOp() Metrics {
+ return noOpMetricsInstance
}
type metric struct {
Key string
- Value interface{}
+ Value any
}
func (*metrics) Info() Info {
@@ -83,7 +97,6 @@ func (*metrics) Info() Info {
}
func (m *metrics) String() string {
-
all := m.All()
sorted := make([]metric, 0, len(all))
@@ -94,8 +107,8 @@ func (m *metrics) String() string {
})
}
- sort.Slice(sorted, func(i, j int) bool {
- return sorted[i].Key < sorted[j].Key
+ slices.SortFunc(sorted, func(a, b metric) int {
+ return strings.Compare(a.Key, b.Key)
})
buf := make([]string, len(sorted))
@@ -144,10 +157,10 @@ func (m *metrics) Counter(name string) Counter {
return c
}
-func (m *metrics) All() map[string]interface{} {
+func (m *metrics) All() map[string]any {
m.mtx.Lock()
defer m.mtx.Unlock()
- result := map[string]interface{}{}
+ result := make(map[string]any, len(m.timers)+len(m.histograms)+len(m.counters))
for name, timer := range m.timers {
result[m.formatKey(name, timer)] = timer.Value()
}
@@ -160,10 +173,10 @@ func (m *metrics) All() map[string]interface{} {
return result
}
-func (m *metrics) Timers() map[string]interface{} {
+func (m *metrics) Timers() map[string]any {
m.mtx.Lock()
defer m.mtx.Unlock()
- ts := map[string]interface{}{}
+ ts := make(map[string]any, len(m.timers))
for n, t := range m.timers {
ts[m.formatKey(n, t)] = t.Value()
}
@@ -178,7 +191,7 @@ func (m *metrics) Clear() {
m.counters = map[string]Counter{}
}
-func (m *metrics) formatKey(name string, metrics interface{}) string {
+func (*metrics) formatKey(name string, metrics any) string {
switch metrics.(type) {
case Timer:
return "timer_" + name + "_ns"
@@ -194,9 +207,12 @@ func (m *metrics) formatKey(name string, metrics interface{}) string {
// Timer defines the interface for a restartable timer that accumulates elapsed
// time.
type Timer interface {
- Value() interface{}
+ Value() any
Int64() int64
+ // Start or resume a timer's time tracking.
Start()
+ // Stop a timer, and accumulate the delta (in nanoseconds) since it was last
+ // started.
Stop() int64
}
@@ -208,19 +224,26 @@ type timer struct {
func (t *timer) Start() {
t.mtx.Lock()
- defer t.mtx.Unlock()
t.start = time.Now()
+ t.mtx.Unlock()
}
func (t *timer) Stop() int64 {
t.mtx.Lock()
defer t.mtx.Unlock()
- delta := time.Since(t.start).Nanoseconds()
- t.value += delta
+
+ var delta int64
+ if !t.start.IsZero() {
+ // Add the delta to the accumulated time value so far.
+ delta = time.Since(t.start).Nanoseconds()
+ t.value += delta
+ t.start = time.Time{} // Reset the start time to zero.
+ }
+
return delta
}
-func (t *timer) Value() interface{} {
+func (t *timer) Value() any {
return t.Int64()
}
@@ -232,7 +255,7 @@ func (t *timer) Int64() int64 {
// Histogram defines the interface for a histogram with hardcoded percentiles.
type Histogram interface {
- Value() interface{}
+ Value() any
Update(int64)
}
@@ -253,8 +276,8 @@ func (h *histogram) Update(v int64) {
h.hist.Update(v)
}
-func (h *histogram) Value() interface{} {
- values := map[string]interface{}{}
+func (h *histogram) Value() any {
+ values := make(map[string]any, 12)
snap := h.hist.Snapshot()
percentiles := snap.Percentiles([]float64{
0.5,
@@ -282,7 +305,7 @@ func (h *histogram) Value() interface{} {
// Counter defines the interface for a monotonic increasing counter.
type Counter interface {
- Value() interface{}
+ Value() any
Incr()
Add(n uint64)
}
@@ -299,14 +322,49 @@ func (c *counter) Add(n uint64) {
atomic.AddUint64(&c.c, n)
}
-func (c *counter) Value() interface{} {
+func (c *counter) Value() any {
return atomic.LoadUint64(&c.c)
}
-func Statistics(num ...int64) interface{} {
+func Statistics(num ...int64) any {
t := newHistogram()
for _, n := range num {
t.Update(n)
}
return t.Value()
}
+
+type noOpMetrics struct{}
+type noOpTimer struct{}
+type noOpHistogram struct{}
+type noOpCounter struct{}
+
+var (
+ noOpMetricsInstance = &noOpMetrics{}
+ noOpTimerInstance = &noOpTimer{}
+ noOpHistogramInstance = &noOpHistogram{}
+ noOpCounterInstance = &noOpCounter{}
+)
+
+func (*noOpMetrics) Info() Info { return Info{Name: ""} }
+func (*noOpMetrics) Timer(name string) Timer { return noOpTimerInstance }
+func (*noOpMetrics) Histogram(name string) Histogram { return noOpHistogramInstance }
+func (*noOpMetrics) Counter(name string) Counter { return noOpCounterInstance }
+func (*noOpMetrics) All() map[string]any { return nil }
+func (*noOpMetrics) Clear() {}
+func (*noOpMetrics) MarshalJSON() ([]byte, error) {
+ return []byte(`{"name": ""}`), nil
+}
+
+func (*noOpTimer) Start() {}
+func (*noOpTimer) Stop() int64 { return 0 }
+func (*noOpTimer) Value() any { return 0 }
+func (*noOpTimer) Int64() int64 { return 0 }
+
+func (*noOpHistogram) Update(v int64) {}
+func (*noOpHistogram) Value() any { return nil }
+
+func (*noOpCounter) Incr() {}
+func (*noOpCounter) Add(_ uint64) {}
+func (*noOpCounter) Value() any { return 0 }
+func (*noOpCounter) Int64() int64 { return 0 }
diff --git a/vendor/github.com/open-policy-agent/opa/plugins/plugins.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/plugins.go
similarity index 83%
rename from vendor/github.com/open-policy-agent/opa/plugins/plugins.go
rename to vendor/github.com/open-policy-agent/opa/v1/plugins/plugins.go
index 567acfb817..ca8df1ee48 100644
--- a/vendor/github.com/open-policy-agent/opa/plugins/plugins.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/plugins/plugins.go
@@ -9,7 +9,9 @@ import (
"context"
"errors"
"fmt"
+ "maps"
mr "math/rand"
+ "net/http"
"sync"
"time"
@@ -17,24 +19,22 @@ import (
"github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/otel/sdk/trace"
- "github.com/gorilla/mux"
-
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/bundle"
- "github.com/open-policy-agent/opa/config"
- "github.com/open-policy-agent/opa/hooks"
bundleUtils "github.com/open-policy-agent/opa/internal/bundle"
cfg "github.com/open-policy-agent/opa/internal/config"
initload "github.com/open-policy-agent/opa/internal/runtime/init"
- "github.com/open-policy-agent/opa/keys"
- "github.com/open-policy-agent/opa/loader"
- "github.com/open-policy-agent/opa/logging"
- "github.com/open-policy-agent/opa/plugins/rest"
- "github.com/open-policy-agent/opa/resolver/wasm"
- "github.com/open-policy-agent/opa/storage"
- "github.com/open-policy-agent/opa/topdown/cache"
- "github.com/open-policy-agent/opa/topdown/print"
- "github.com/open-policy-agent/opa/tracing"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/bundle"
+ "github.com/open-policy-agent/opa/v1/config"
+ "github.com/open-policy-agent/opa/v1/hooks"
+ "github.com/open-policy-agent/opa/v1/keys"
+ "github.com/open-policy-agent/opa/v1/loader"
+ "github.com/open-policy-agent/opa/v1/logging"
+ "github.com/open-policy-agent/opa/v1/plugins/rest"
+ "github.com/open-policy-agent/opa/v1/resolver/wasm"
+ "github.com/open-policy-agent/opa/v1/storage"
+ "github.com/open-policy-agent/opa/v1/topdown/cache"
+ "github.com/open-policy-agent/opa/v1/topdown/print"
+ "github.com/open-policy-agent/opa/v1/tracing"
)
// Factory defines the interface OPA uses to instantiate your plugin.
@@ -85,8 +85,8 @@ import (
// After a plugin has been created subsequent status updates can be
// send anytime the plugin enters a ready or error state.
type Factory interface {
- Validate(manager *Manager, config []byte) (interface{}, error)
- New(manager *Manager, config interface{}) Plugin
+ Validate(manager *Manager, config []byte) (any, error)
+ New(manager *Manager, config any) Plugin
}
// Plugin defines the interface OPA uses to manage your plugin.
@@ -104,7 +104,7 @@ type Factory interface {
type Plugin interface {
Start(ctx context.Context) error
Stop(ctx context.Context)
- Reconfigure(ctx context.Context, config interface{})
+ Reconfigure(ctx context.Context, config any)
}
// Triggerable defines the interface plugins use for manual plugin triggers.
@@ -163,13 +163,22 @@ func (s *Status) String() string {
return fmt.Sprintf("{%v %q}", s.State, s.Message)
}
+func (s *Status) Equal(other *Status) bool {
+ if s == nil || other == nil {
+ return s == nil && other == nil
+ }
+
+ return s.State == other.State && s.Message == other.Message
+}
+
// StatusListener defines a handler to register for status updates.
type StatusListener func(status map[string]*Status)
// Manager implements lifecycle management of plugins and gives plugins access
// to engine-wide components like storage.
type Manager struct {
- Store storage.Store
+ Store storage.Store
+ // Config values should be accessed from the thread-safe GetConfig method.
Config *config.Config
Info *ast.Term
ID string
@@ -198,7 +207,7 @@ type Manager struct {
serverInitializedOnce sync.Once
printHook print.Hook
enablePrintStatements bool
- router *mux.Router
+ router *http.ServeMux
prometheusRegister prometheus.Registerer
tracerProvider *trace.TracerProvider
distributedTacingOpts tracing.Options
@@ -207,17 +216,25 @@ type Manager struct {
bootstrapConfigLabels map[string]string
hooks hooks.Hooks
enableTelemetry bool
- reporter *report.Reporter
+ reporter report.Reporter
opaReportNotifyCh chan struct{}
stop chan chan struct{}
parserOptions ast.ParserOptions
+ extraRoutes map[string]ExtraRoute
+ extraMiddlewares []func(http.Handler) http.Handler
+ extraAuthorizerRoutes []func(string, []any) bool
+ bundleActivatorPlugin string
}
-type managerContextKey string
-type managerWasmResolverKey string
+type (
+ managerContextKey string
+ managerWasmResolverKey string
+)
-const managerCompilerContextKey = managerContextKey("compiler")
-const managerWasmResolverContextKey = managerWasmResolverKey("wasmResolvers")
+const (
+ managerCompilerContextKey = managerContextKey("compiler")
+ managerWasmResolverContextKey = managerWasmResolverKey("wasmResolvers")
+)
// SetCompilerOnContext puts the compiler into the storage context. Calling this
// function before committing updated policies to storage allows the manager to
@@ -264,7 +281,6 @@ func validateTriggerMode(mode TriggerMode) error {
// ValidateAndInjectDefaultsForTriggerMode validates the trigger mode and injects default values
func ValidateAndInjectDefaultsForTriggerMode(a, b *TriggerMode) (*TriggerMode, error) {
-
if a == nil && b != nil {
err := validateTriggerMode(*b)
if err != nil {
@@ -361,7 +377,7 @@ func PrintHook(h print.Hook) func(*Manager) {
}
}
-func WithRouter(r *mux.Router) func(*Manager) {
+func WithRouter(r *http.ServeMux) func(*Manager) {
return func(m *Manager) {
m.router = r
}
@@ -417,9 +433,15 @@ func WithTelemetryGatherers(gs map[string]report.Gatherer) func(*Manager) {
}
}
+// WithBundleActivatorPlugin sets the name of the activator plugin to load bundles into the store
+func WithBundleActivatorPlugin(bundleActivatorPlugin string) func(*Manager) {
+ return func(m *Manager) {
+ m.bundleActivatorPlugin = bundleActivatorPlugin
+ }
+}
+
// New creates a new Manager using config.
func New(raw []byte, id string, store storage.Store, opts ...func(*Manager)) (*Manager, error) {
-
parsedConfig, err := config.ParseConfig(raw, id)
if err != nil {
return nil, err
@@ -434,12 +456,18 @@ func New(raw []byte, id string, store storage.Store, opts ...func(*Manager)) (*M
maxErrors: -1,
serverInitialized: make(chan struct{}),
bootstrapConfigLabels: parsedConfig.Labels,
+ extraRoutes: map[string]ExtraRoute{},
}
for _, f := range opts {
f(m)
}
+ if m.parserOptions.RegoVersion == ast.RegoUndefined {
+ // Default to v1 if rego-version is not set through options
+ m.parserOptions.RegoVersion = ast.DefaultRegoVersion
+ }
+
if m.logger == nil {
m.logger = logging.Get()
}
@@ -472,13 +500,7 @@ func New(raw []byte, id string, store storage.Store, opts ...func(*Manager)) (*M
return nil, err
}
- serviceOpts := cfg.ServiceOptions{
- Raw: parsedConfig.Services,
- AuthPlugin: m.AuthPlugin,
- Keys: m.keys,
- Logger: m.logger,
- DistributedTacingOpts: m.distributedTacingOpts,
- }
+ serviceOpts := m.DefaultServiceOpts(parsedConfig)
m.services, err = cfg.ParseServicesConfig(serviceOpts)
if err != nil {
@@ -486,7 +508,7 @@ func New(raw []byte, id string, store storage.Store, opts ...func(*Manager)) (*M
}
if m.enableTelemetry {
- reporter, err := report.New(id, report.Options{Logger: m.logger})
+ reporter, err := report.New(report.Options{Logger: m.logger})
if err != nil {
return nil, err
}
@@ -494,8 +516,8 @@ func New(raw []byte, id string, store storage.Store, opts ...func(*Manager)) (*M
m.reporter.RegisterGatherer("min_compatible_version", func(_ context.Context) (any, error) {
var minimumCompatibleVersion string
- if m.compiler != nil && m.compiler.Required != nil {
- minimumCompatibleVersion, _ = m.compiler.Required.MinimumCompatibleVersion()
+ if c := m.GetCompiler(); c != nil && c.Required != nil {
+ minimumCompatibleVersion, _ = c.Required.MinimumCompatibleVersion()
}
return minimumCompatibleVersion, nil
})
@@ -512,7 +534,6 @@ func New(raw []byte, id string, store storage.Store, opts ...func(*Manager)) (*M
// Init returns an error if the manager could not initialize itself. Init() should
// be called before Start(). Init() is idempotent.
func (m *Manager) Init(ctx context.Context) error {
-
if m.initialized {
return nil
}
@@ -529,7 +550,6 @@ func (m *Manager) Init(ctx context.Context) error {
}
err := storage.Txn(ctx, m.Store, params, func(txn storage.Transaction) error {
-
result, err := initload.InsertAndCompile(ctx, initload.InsertAndCompileOptions{
Store: m.Store,
Txn: txn,
@@ -537,8 +557,9 @@ func (m *Manager) Init(ctx context.Context) error {
Bundles: m.initBundles,
MaxErrors: m.maxErrors,
EnablePrintStatements: m.enablePrintStatements,
+ ParserOptions: m.parserOptions,
+ BundleActivatorPlugin: m.bundleActivatorPlugin,
})
-
if err != nil {
return err
}
@@ -554,7 +575,6 @@ func (m *Manager) Init(ctx context.Context) error {
_, err = m.Store.Register(ctx, txn, storage.TriggerConfig{OnCommit: m.onCommit})
return err
})
-
if err != nil {
if m.stop != nil {
done := make(chan struct{})
@@ -573,14 +593,24 @@ func (m *Manager) Init(ctx context.Context) error {
func (m *Manager) Labels() map[string]string {
m.mtx.Lock()
defer m.mtx.Unlock()
- return m.Config.Labels
+
+ return maps.Clone(m.Config.Labels)
}
// InterQueryBuiltinCacheConfig returns the configuration for the inter-query caches.
func (m *Manager) InterQueryBuiltinCacheConfig() *cache.Config {
m.mtx.Lock()
defer m.mtx.Unlock()
- return m.interQueryBuiltinCacheConfig
+
+ return m.interQueryBuiltinCacheConfig.Clone()
+}
+
+// GetConfig returns a deep copy of the manager's configuration.
+func (m *Manager) GetConfig() *config.Config {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ return m.Config.Clone()
}
// Register adds a plugin to the manager. When the manager is started, all of
@@ -645,8 +675,61 @@ func (m *Manager) setCompiler(compiler *ast.Compiler) {
m.compiler = compiler
}
+type ExtraRoute struct {
+ PromName string // name is for prometheus metrics
+ HandlerFunc http.HandlerFunc
+}
+
+func (m *Manager) ExtraRoutes() map[string]ExtraRoute {
+ return m.extraRoutes
+}
+
+func (m *Manager) ExtraMiddlewares() []func(http.Handler) http.Handler {
+ return m.extraMiddlewares
+}
+
+func (m *Manager) ExtraAuthorizerRoutes() []func(string, []any) bool {
+ return m.extraAuthorizerRoutes
+}
+
+// ExtraRoute registers an extra route to be served by the HTTP
+// server later. Using this instead of directly registering routes
+// with GetRouter() lets the server apply its handler wrapping for
+// Prometheus and OpenTelemetry.
+// Caution: This cannot be used to dynamically register and un-
+// register HTTP handlers. It's meant as a late-stage set up helper,
+// to be called from a plugin's init methods.
+func (m *Manager) ExtraRoute(path, name string, hf http.HandlerFunc) {
+ if _, ok := m.extraRoutes[path]; ok {
+ panic("extra route already registered: " + path)
+ }
+ m.extraRoutes[path] = ExtraRoute{
+ PromName: name,
+ HandlerFunc: hf,
+ }
+}
+
+// ExtraMiddleware registers extra middlewares (`func(http.Handler) http.Handler`)
+// to be injected into the HTTP handler chain in the server later.
+// Caution: This cannot be used to dynamically register and un-
+// register middlewares. It's meant as a late-stage set up helper,
+// to be called from a plugin's init methods.
+func (m *Manager) ExtraMiddleware(mw ...func(http.Handler) http.Handler) {
+ m.extraMiddlewares = append(m.extraMiddlewares, mw...)
+}
+
+// ExtraAuthorizerRoute registers an extra URL path validator function for use
+// in the server authorizer. These functions designate specific methods and URL
+// prefixes or paths where the authorizer should allow request body parsing.
+// Caution: This cannot be used to dynamically register and un-
+// register path validator functions. It's meant as a late-stage
+// set up helper, to be called from a plugin's init methods.
+func (m *Manager) ExtraAuthorizerRoute(validatorFunc func(string, []any) bool) {
+ m.extraAuthorizerRoutes = append(m.extraAuthorizerRoutes, validatorFunc)
+}
+
// GetRouter returns the managers router if set
-func (m *Manager) GetRouter() *mux.Router {
+func (m *Manager) GetRouter() *http.ServeMux {
m.mtx.Lock()
defer m.mtx.Unlock()
return m.router
@@ -675,7 +758,6 @@ func (m *Manager) setWasmResolvers(rs []*wasm.Resolver) {
// Start starts the manager. Init() should be called once before Start().
func (m *Manager) Start(ctx context.Context) error {
-
if m == nil {
return nil
}
@@ -746,14 +828,21 @@ func (m *Manager) Stop(ctx context.Context) {
}
}
-// Reconfigure updates the configuration on the manager.
-func (m *Manager) Reconfigure(config *config.Config) error {
- opts := cfg.ServiceOptions{
+func (m *Manager) DefaultServiceOpts(config *config.Config) cfg.ServiceOptions {
+ return cfg.ServiceOptions{
Raw: config.Services,
AuthPlugin: m.AuthPlugin,
Logger: m.logger,
+ Keys: m.keys,
DistributedTacingOpts: m.distributedTacingOpts,
}
+}
+
+// Reconfigure updates the configuration on the manager.
+func (m *Manager) Reconfigure(newCfg *config.Config) error {
+ config := newCfg.Clone()
+
+ opts := m.DefaultServiceOpts(config)
keys, err := keys.ParseKeysConfig(config.Keys)
if err != nil {
@@ -778,25 +867,20 @@ func (m *Manager) Reconfigure(config *config.Config) error {
if config.Labels == nil {
config.Labels = m.bootstrapConfigLabels
} else {
- for label, value := range m.bootstrapConfigLabels {
- config.Labels[label] = value
- }
+ maps.Copy(config.Labels, m.bootstrapConfigLabels)
}
// don't erase persistence directory
if config.PersistenceDirectory == nil {
+ // update is ok since we have the lock
config.PersistenceDirectory = m.Config.PersistenceDirectory
}
m.Config = config
m.interQueryBuiltinCacheConfig = interQueryBuiltinCacheConfig
- for name, client := range services {
- m.services[name] = client
- }
- for name, key := range keys {
- m.keys[name] = key
- }
+ maps.Copy(m.services, services)
+ maps.Copy(m.keys, keys)
for _, trigger := range m.registeredCacheTriggers {
trigger(interQueryBuiltinCacheConfig)
@@ -839,7 +923,6 @@ func (m *Manager) UnregisterPluginStatusListener(name string) {
// listeners will be called with a copy of the new state of all
// plugins.
func (m *Manager) UpdatePluginStatus(pluginName string, status *Status) {
-
var toNotify map[string]StatusListener
var statuses map[string]*Status
@@ -848,9 +931,7 @@ func (m *Manager) UpdatePluginStatus(pluginName string, status *Status) {
defer m.mtx.Unlock()
m.pluginStatus[pluginName] = status
toNotify = make(map[string]StatusListener, len(m.pluginStatusListeners))
- for k, v := range m.pluginStatusListeners {
- toNotify[k] = v
- }
+ maps.Copy(toNotify, m.pluginStatusListeners)
statuses = m.copyPluginStatus()
}()
@@ -875,7 +956,6 @@ func (m *Manager) copyPluginStatus() map[string]*Status {
}
func (m *Manager) onCommit(ctx context.Context, txn storage.Transaction, event storage.TriggerEvent) {
-
compiler := GetCompilerOnContext(event.Context)
// If the context does not contain the compiler fallback to loading the
@@ -903,7 +983,6 @@ func (m *Manager) onCommit(ctx context.Context, txn storage.Transaction, event s
resolvers := getWasmResolversOnContext(event.Context)
if resolvers != nil {
m.setWasmResolvers(resolvers)
-
} else if event.DataChanged() {
if requiresWasmResolverReload(event) {
resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, m.Store, txn, nil)
@@ -939,7 +1018,13 @@ func loadCompilerFromStore(ctx context.Context, store storage.Store, txn storage
modules[policy] = module
}
- compiler := ast.NewCompiler().WithEnablePrintStatements(enablePrintStatements)
+ compiler := ast.NewCompiler().
+ WithEnablePrintStatements(enablePrintStatements)
+
+ if popts.RegoVersion != ast.RegoUndefined {
+ compiler = compiler.WithDefaultRegoVersion(popts.RegoVersion)
+ }
+
compiler.Compile(modules)
return compiler, nil
}
@@ -980,7 +1065,19 @@ func (m *Manager) updateWasmResolversData(ctx context.Context, event storage.Tri
func (m *Manager) PublicKeys() map[string]*keys.Config {
m.mtx.Lock()
defer m.mtx.Unlock()
- return m.keys
+
+ if m.keys == nil {
+ return make(map[string]*keys.Config)
+ }
+
+ result := make(map[string]*keys.Config, len(m.keys))
+ for k, v := range m.keys {
+ if v != nil {
+ copied := *v
+ result[k] = &copied
+ }
+ }
+ return result
}
// Client returns a client for communicating with a remote service.
@@ -1078,7 +1175,7 @@ func (m *Manager) sendOPAUpdateLoop(ctx context.Context) {
opaReportNotify = false
_, err := m.reporter.SendReport(ctx)
if err != nil {
- m.logger.WithFields(map[string]interface{}{"err": err}).Debug("Unable to send OPA telemetry report.")
+ m.logger.WithFields(map[string]any{"err": err}).Debug("Unable to send OPA telemetry report.")
}
}
diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/auth.go
similarity index 69%
rename from vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go
rename to vendor/github.com/open-policy-agent/opa/v1/plugins/rest/auth.go
index 11e72001a2..8ec337bd1e 100644
--- a/vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/auth.go
@@ -21,6 +21,7 @@ import (
"fmt"
"hash"
"io"
+ "maps"
"math/big"
"net/http"
"net/url"
@@ -28,13 +29,12 @@ import (
"strings"
"time"
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
- "github.com/open-policy-agent/opa/internal/jwx/jws"
- "github.com/open-policy-agent/opa/internal/jwx/jws/sign"
+ "github.com/lestrrat-go/jwx/v3/jwa"
+ "github.com/lestrrat-go/jwx/v3/jws"
"github.com/open-policy-agent/opa/internal/providers/aws"
"github.com/open-policy-agent/opa/internal/uuid"
- "github.com/open-policy-agent/opa/keys"
- "github.com/open-policy-agent/opa/logging"
+ "github.com/open-policy-agent/opa/v1/keys"
+ "github.com/open-policy-agent/opa/v1/logging"
)
const (
@@ -126,10 +126,14 @@ type bearerAuthPlugin struct {
// encode is set to true for the OCIDownloader because
// it expects tokens in plain text but needs them in base64.
encode bool
+ logger logging.Logger
}
func (ap *bearerAuthPlugin) NewClient(c Config) (*http.Client, error) {
t, err := DefaultTLSConfig(c)
+
+ ap.logger = c.logger
+
if err != nil {
return nil, err
}
@@ -153,6 +157,9 @@ func (ap *bearerAuthPlugin) NewClient(c Config) (*http.Client, error) {
func (ap *bearerAuthPlugin) Prepare(req *http.Request) error {
token := ap.Token
+ if ap.logger == nil {
+ ap.logger = logging.Get()
+ }
if ap.TokenPath != "" {
bytes, err := os.ReadFile(ap.TokenPath)
@@ -166,7 +173,12 @@ func (ap *bearerAuthPlugin) Prepare(req *http.Request) error {
token = base64.StdEncoding.EncodeToString([]byte(token))
}
- req.Header.Add("Authorization", fmt.Sprintf("%v %v", ap.Scheme, token))
+ if req.Response != nil && (req.Response.StatusCode == http.StatusPermanentRedirect || req.Response.StatusCode == http.StatusTemporaryRedirect) {
+ ap.logger.Debug("not attaching authorization header as the response contains a redirect")
+ } else {
+ ap.logger.Debug("attaching authorization header")
+ req.Header.Add("Authorization", fmt.Sprintf("%v %v", ap.Scheme, token))
+ }
return nil
}
@@ -181,6 +193,15 @@ type awsKmsKeyConfig struct {
Algorithm string `json:"algorithm"`
}
+type azureKeyVaultConfig struct {
+ Key string `json:"key"`
+ KeyVersion string `json:"key_version"`
+ Alg string `json:"key_algorithm"`
+ Vault string `json:"vault"`
+ URL *url.URL
+ APIVersion string `json:"api_version"`
+}
+
func convertSignatureToBase64(alg string, der []byte) (string, error) {
r, s, derErr := pointsFromDER(der)
if derErr != nil {
@@ -194,7 +215,7 @@ func convertSignatureToBase64(alg string, der []byte) (string, error) {
return signatureData, nil
}
-func pointsFromDER(der []byte) (R, S *big.Int, err error) {
+func pointsFromDER(der []byte) (R, S *big.Int, err error) { //nolint:gocritic
R, S = &big.Int{}, &big.Int{}
data := asn1.RawValue{}
if _, err := asn1.Unmarshal(der, &data); err != nil {
@@ -253,42 +274,47 @@ func messageDigest(message []byte, alg string) ([]byte, error) {
var digest hash.Hash
switch alg {
- case "ECDSA_SHA_256":
+ case "ECDSA_SHA_256", "ES256", "ES256K", "PS256", "RS256":
digest = sha256.New()
- case "ECDSA_SHA_384":
+ case "ECDSA_SHA_384", "ES384", "PS384", "RS384":
digest = sha512.New384()
- case "ECDSA_SHA_512":
+ case "ECDSA_SHA_512", "ES512", "PS512", "RS512":
digest = sha512.New()
default:
return []byte{}, fmt.Errorf("unsupported sign algorithm %s", alg)
}
- digest.Write(message)
+ _, err := digest.Write(message)
+ if err != nil {
+ return nil, err
+ }
return digest.Sum(nil), nil
}
// oauth2ClientCredentialsAuthPlugin represents authentication via a bearer token in the HTTP Authorization header
// obtained through the OAuth2 client credentials flow
type oauth2ClientCredentialsAuthPlugin struct {
- GrantType string `json:"grant_type"`
- TokenURL string `json:"token_url"`
- ClientID string `json:"client_id"`
- ClientSecret string `json:"client_secret"`
- SigningKeyID string `json:"signing_key"`
- Thumbprint string `json:"thumbprint"`
- Claims map[string]interface{} `json:"additional_claims"`
- IncludeJti bool `json:"include_jti_claim"`
- Scopes []string `json:"scopes,omitempty"`
- AdditionalHeaders map[string]string `json:"additional_headers,omitempty"`
- AdditionalParameters map[string]string `json:"additional_parameters,omitempty"`
- AWSKmsKey *awsKmsKeyConfig `json:"aws_kms,omitempty"`
- AWSSigningPlugin *awsSigningAuthPlugin `json:"aws_signing,omitempty"`
- ClientAssertionType string `json:"client_assertion_type"`
- ClientAssertion string `json:"client_assertion"`
- ClientAssertionPath string `json:"client_assertion_path"`
+ GrantType string `json:"grant_type"`
+ TokenURL string `json:"token_url"`
+ ClientID string `json:"client_id"`
+ ClientSecret string `json:"client_secret"`
+ SigningKeyID string `json:"signing_key"`
+ Thumbprint string `json:"thumbprint"`
+ Claims map[string]any `json:"additional_claims"`
+ IncludeJti bool `json:"include_jti_claim"`
+ Scopes []string `json:"scopes,omitempty"`
+ AdditionalHeaders map[string]string `json:"additional_headers,omitempty"`
+ AdditionalParameters map[string]string `json:"additional_parameters,omitempty"`
+ AWSKmsKey *awsKmsKeyConfig `json:"aws_kms,omitempty"`
+ AWSSigningPlugin *awsSigningAuthPlugin `json:"aws_signing,omitempty"`
+ AzureKeyVault *azureKeyVaultConfig `json:"azure_keyvault,omitempty"`
+ AzureSigningPlugin *azureSigningAuthPlugin `json:"azure_signing,omitempty"`
+ ClientAssertionType string `json:"client_assertion_type"`
+ ClientAssertion string `json:"client_assertion"`
+ ClientAssertionPath string `json:"client_assertion_path"`
signingKey *keys.Config
- signingKeyParsed interface{}
+ signingKeyParsed any
tokenCache *oauth2Token
tlsSkipVerify bool
logger logging.Logger
@@ -299,15 +325,13 @@ type oauth2Token struct {
ExpiresAt time.Time
}
-func (ap *oauth2ClientCredentialsAuthPlugin) createAuthJWT(ctx context.Context, extClaims map[string]interface{}, signingKey interface{}) (*string, error) {
+func (ap *oauth2ClientCredentialsAuthPlugin) createJWSParts(extClaims map[string]any) ([]byte, []byte, string, error) {
now := time.Now()
- claims := map[string]interface{}{
+ claims := map[string]any{
"iat": now.Unix(),
"exp": now.Add(10 * time.Minute).Unix(),
}
- for k, v := range extClaims {
- claims[k] = v
- }
+ maps.Copy(claims, extClaims)
if len(ap.Scopes) > 0 {
claims["scope"] = strings.Join(ap.Scopes, " ")
@@ -316,55 +340,88 @@ func (ap *oauth2ClientCredentialsAuthPlugin) createAuthJWT(ctx context.Context,
if ap.IncludeJti {
jti, err := uuid.New(rand.Reader)
if err != nil {
- return nil, err
+ return nil, nil, "", err
}
claims["jti"] = jti
}
payload, err := json.Marshal(claims)
if err != nil {
- return nil, err
+ return nil, nil, "", err
}
var jwsHeaders []byte
var signatureAlg string
- if ap.AWSKmsKey == nil {
+ switch {
+ case ap.AWSKmsKey == nil && ap.AzureKeyVault == nil:
signatureAlg = ap.signingKey.Algorithm
- } else {
+ case ap.AWSKmsKey != nil && ap.AWSKmsKey.Algorithm != "":
signatureAlg, err = ap.mapKMSAlgToSign(ap.AWSKmsKey.Algorithm)
if err != nil {
- return nil, err
+ return nil, nil, "", err
}
+ case ap.AzureKeyVault != nil && ap.AzureKeyVault.Alg != "":
+ signatureAlg = ap.AzureKeyVault.Alg
}
if ap.Thumbprint != "" {
bytes, err := hex.DecodeString(ap.Thumbprint)
if err != nil {
- return nil, err
+ return nil, nil, "", err
}
x5t := base64.URLEncoding.EncodeToString(bytes)
- jwsHeaders = []byte(fmt.Sprintf(`{"typ":"JWT","alg":"%s","x5t":"%s"}`, signatureAlg, x5t))
- } else {
- jwsHeaders = []byte(fmt.Sprintf(`{"typ":"JWT","alg":"%s"}`, signatureAlg))
- }
- var jwsCompact []byte
- if ap.AWSKmsKey == nil {
- jwsCompact, err = jws.SignLiteral(payload,
- jwa.SignatureAlgorithm(signatureAlg),
- signingKey,
- jwsHeaders,
- rand.Reader)
+ jwsHeaders = fmt.Appendf(nil, `{"typ":"JWT","alg":"%s","x5t":"%s"}`, signatureAlg, x5t)
} else {
- jwsCompact, err = ap.SignWithKMS(ctx, payload, jwsHeaders)
+ jwsHeaders = fmt.Appendf(nil, `{"typ":"JWT","alg":"%s"}`, signatureAlg)
+ }
+
+ return jwsHeaders, payload, signatureAlg, nil
+}
+
+func (ap *oauth2ClientCredentialsAuthPlugin) createAuthJWT(ctx context.Context, extClaims map[string]any, signingKey any) (*string, error) {
+ header, payload, alg, err := ap.createJWSParts(extClaims)
+ if err != nil {
+ return nil, err
+ }
+
+ var clientAssertion []byte
+ switch {
+ case ap.AWSKmsKey != nil:
+ clientAssertion, err = ap.SignWithKMS(ctx, payload, header)
+ case ap.AzureKeyVault != nil:
+ clientAssertion, err = ap.SignWithKeyVault(ctx, payload, header)
+ default:
+ // Parse the algorithm string to jwa.SignatureAlgorithm
+ algObj, ok := jwa.LookupSignatureAlgorithm(alg)
+ if !ok {
+ return nil, fmt.Errorf("unknown signature algorithm: %s", alg)
+ }
+
+ // Parse headers
+ var headers map[string]any
+ if err := json.Unmarshal(header, &headers); err != nil {
+ return nil, err
+ }
+
+ // Create protected headers
+ protectedHeaders := jws.NewHeaders()
+ for k, v := range headers {
+ if err := protectedHeaders.Set(k, v); err != nil {
+ return nil, err
+ }
+ }
+
+ clientAssertion, err = jws.Sign(payload,
+ jws.WithKey(algObj, signingKey, jws.WithProtectedHeaders(protectedHeaders)))
}
if err != nil {
return nil, err
}
- jwt := string(jwsCompact)
+ jwt := string(clientAssertion)
return &jwt, nil
}
-func (ap *oauth2ClientCredentialsAuthPlugin) mapKMSAlgToSign(alg string) (string, error) {
+func (*oauth2ClientCredentialsAuthPlugin) mapKMSAlgToSign(alg string) (string, error) {
switch alg {
case "ECDSA_SHA_256":
return "ES256", nil
@@ -382,12 +439,7 @@ func (ap *oauth2ClientCredentialsAuthPlugin) SignWithKMS(ctx context.Context, pa
encodedHdr := base64.RawURLEncoding.EncodeToString(hdrBuf)
encodedPayload := base64.RawURLEncoding.EncodeToString(payload)
- input := strings.Join(
- []string{
- encodedHdr,
- encodedPayload,
- }, ".",
- )
+ input := encodedHdr + "." + encodedPayload
digest, err := messageDigest([]byte(input), ap.AWSKmsKey.Algorithm)
if err != nil {
return nil, err
@@ -413,6 +465,28 @@ func (ap *oauth2ClientCredentialsAuthPlugin) SignWithKMS(ctx context.Context, pa
return nil, errors.New("missing AWS credentials, failed to sign the assertion with kms")
}
+func (ap *oauth2ClientCredentialsAuthPlugin) SignWithKeyVault(ctx context.Context, payload []byte, hdrBuf []byte) ([]byte, error) {
+ if ap.AzureSigningPlugin == nil {
+ return nil, errors.New("missing Azure credentials, failed to sign the assertion with KeyVault")
+ }
+
+ encodedHdr := base64.RawURLEncoding.EncodeToString(hdrBuf)
+ encodedPayload := base64.RawURLEncoding.EncodeToString(payload)
+ input := encodedHdr + "." + encodedPayload
+ digest, err := messageDigest([]byte(input), ap.AzureSigningPlugin.keyVaultSignPlugin.config.Alg)
+ if err != nil {
+ fmt.Println("unsupported algorithm", ap.AzureSigningPlugin.keyVaultSignPlugin.config.Alg)
+ return nil, err
+ }
+
+ signature, err := ap.AzureSigningPlugin.SignDigest(ctx, digest)
+ if err != nil {
+ return nil, err
+ }
+
+ return []byte(input + "." + signature), nil
+}
+
func (ap *oauth2ClientCredentialsAuthPlugin) parseSigningKey(c Config) (err error) {
if ap.SigningKeyID == "" {
return errors.New("signing_key required for jwt_bearer grant type")
@@ -427,8 +501,37 @@ func (ap *oauth2ClientCredentialsAuthPlugin) parseSigningKey(c Config) (err erro
return errors.New("signing_key refers to non-existent key")
}
- alg := jwa.SignatureAlgorithm(ap.signingKey.Algorithm)
- ap.signingKeyParsed, err = sign.GetSigningKey(ap.signingKey.PrivateKey, alg)
+ alg, ok := jwa.LookupSignatureAlgorithm(ap.signingKey.Algorithm)
+ if !ok {
+ return fmt.Errorf("unknown signature algorithm: %s", ap.signingKey.Algorithm)
+ }
+
+ // Parse the private key directly
+ keyData := ap.signingKey.PrivateKey
+
+ // For HMAC algorithms, return the key as bytes
+ if alg == jwa.HS256() || alg == jwa.HS384() || alg == jwa.HS512() {
+ ap.signingKeyParsed = []byte(keyData)
+ return nil
+ }
+
+ // For RSA/ECDSA algorithms, parse the PEM-encoded key
+ block, _ := pem.Decode([]byte(keyData))
+ if block == nil {
+ return errors.New("failed to decode PEM key")
+ }
+
+ switch block.Type {
+ case "RSA PRIVATE KEY":
+ ap.signingKeyParsed, err = x509.ParsePKCS1PrivateKey(block.Bytes)
+ case "PRIVATE KEY":
+ ap.signingKeyParsed, err = x509.ParsePKCS8PrivateKey(block.Bytes)
+ case "EC PRIVATE KEY":
+ ap.signingKeyParsed, err = x509.ParseECPrivateKey(block.Bytes)
+ default:
+ return fmt.Errorf("unsupported key type: %s", block.Type)
+ }
+
if err != nil {
return err
}
@@ -468,6 +571,7 @@ func (ap *oauth2ClientCredentialsAuthPlugin) NewClient(c Config) (*http.Client,
clientCredentialExists["client_secret"] = ap.ClientSecret != ""
clientCredentialExists["signing_key"] = ap.SigningKeyID != ""
clientCredentialExists["aws_kms"] = ap.AWSKmsKey != nil
+ clientCredentialExists["azure_keyvault"] = ap.AzureKeyVault != nil
clientCredentialExists["client_assertion"] = ap.ClientAssertion != ""
clientCredentialExists["client_assertion_path"] = ap.ClientAssertionPath != ""
@@ -480,14 +584,15 @@ func (ap *oauth2ClientCredentialsAuthPlugin) NewClient(c Config) (*http.Client,
}
if notEmptyVarCount == 0 {
- return nil, errors.New("please provide one of client_secret, signing_key, aws_kms, client_assertion, or client_assertion_path required")
+ return nil, errors.New("please provide one of client_secret, signing_key, aws_kms, azure_keyvault, client_assertion, or client_assertion_path required")
}
if notEmptyVarCount > 1 {
- return nil, errors.New("can only use one of client_secret, signing_key, aws_kms, client_assertion, or client_assertion_path")
+ return nil, errors.New("can only use one of client_secret, signing_key, aws_kms, azure_keyvault, client_assertion, or client_assertion_path")
}
- if clientCredentialExists["aws_kms"] {
+ switch {
+ case clientCredentialExists["aws_kms"]:
if ap.AWSSigningPlugin == nil {
return nil, errors.New("aws_kms and aws_signing required")
}
@@ -496,91 +601,107 @@ func (ap *oauth2ClientCredentialsAuthPlugin) NewClient(c Config) (*http.Client,
if err != nil {
return nil, err
}
- } else if clientCredentialExists["client_assertion"] {
+ case clientCredentialExists["azure_keyvault"]:
+ _, err := ap.AzureSigningPlugin.NewClient(c)
+ if err != nil {
+ return nil, err
+ }
+ case clientCredentialExists["client_assertion"]:
if ap.ClientAssertionType == "" {
ap.ClientAssertionType = defaultClientAssertionType
}
if ap.ClientID == "" {
return nil, errors.New("client_id and client_assertion required")
}
- } else if clientCredentialExists["client_assertion_path"] {
+ case clientCredentialExists["client_assertion_path"]:
if ap.ClientAssertionType == "" {
ap.ClientAssertionType = defaultClientAssertionType
}
if ap.ClientID == "" {
return nil, errors.New("client_id and client_assertion_path required")
}
- } else if clientCredentialExists["client_secret"] {
- if ap.ClientID == "" {
- return nil, errors.New("client_id and client_secret required")
- }
+ case clientCredentialExists["client_secret"] && ap.ClientID == "":
+ return nil, errors.New("client_id and client_secret required")
}
}
return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil
}
-// requestToken tries to obtain an access token using either the client credentials flow
-// https://tools.ietf.org/html/rfc6749#section-4.4
-// or the JWT authorization grant
-// https://tools.ietf.org/html/rfc7523
-func (ap *oauth2ClientCredentialsAuthPlugin) requestToken(ctx context.Context) (*oauth2Token, error) {
+func (ap *oauth2ClientCredentialsAuthPlugin) createTokenReqBody(ctx context.Context) (url.Values, error) {
body := url.Values{}
+
+ if len(ap.Scopes) > 0 {
+ body.Add("scope", strings.Join(ap.Scopes, " "))
+ }
+
+ for k, v := range ap.AdditionalParameters {
+ body.Set(k, v)
+ }
+
if ap.GrantType == grantTypeJwtBearer {
- authJwt, err := ap.createAuthJWT(ctx, ap.Claims, ap.signingKeyParsed)
+ authJWT, err := ap.createAuthJWT(ctx, ap.Claims, ap.signingKeyParsed)
if err != nil {
return nil, err
}
body.Add("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer")
- body.Add("assertion", *authJwt)
- } else {
- body.Add("grant_type", grantTypeClientCredentials)
+ body.Add("assertion", *authJWT)
+ return body, nil
+ }
- if ap.SigningKeyID != "" || ap.AWSKmsKey != nil {
- authJwt, err := ap.createAuthJWT(ctx, ap.Claims, ap.signingKeyParsed)
- if err != nil {
- return nil, err
- }
- body.Add("client_assertion_type", defaultClientAssertionType)
- body.Add("client_assertion", *authJwt)
+ body.Add("grant_type", grantTypeClientCredentials)
- if ap.ClientID != "" {
- body.Add("client_id", ap.ClientID)
- }
- } else if ap.ClientAssertion != "" {
- if ap.ClientAssertionType == "" {
- ap.ClientAssertionType = defaultClientAssertionType
- }
- if ap.ClientID != "" {
- body.Add("client_id", ap.ClientID)
- }
- body.Add("client_assertion_type", ap.ClientAssertionType)
- body.Add("client_assertion", ap.ClientAssertion)
- } else if ap.ClientAssertionPath != "" {
- if ap.ClientAssertionType == "" {
- ap.ClientAssertionType = defaultClientAssertionType
- }
- bytes, err := os.ReadFile(ap.ClientAssertionPath)
- if err != nil {
- return nil, err
- }
- if ap.ClientID != "" {
- body.Add("client_id", ap.ClientID)
- }
- body.Add("client_assertion_type", ap.ClientAssertionType)
- body.Add("client_assertion", strings.TrimSpace(string(bytes)))
+ switch {
+ case ap.SigningKeyID != "" || ap.AWSKmsKey != nil || ap.AzureKeyVault != nil:
+ authJwt, err := ap.createAuthJWT(ctx, ap.Claims, ap.signingKeyParsed)
+ if err != nil {
+ return nil, err
}
- }
+ body.Add("client_assertion_type", defaultClientAssertionType)
+ body.Add("client_assertion", *authJwt)
- if len(ap.Scopes) > 0 {
- body.Add("scope", strings.Join(ap.Scopes, " "))
+ if ap.ClientID != "" {
+ body.Add("client_id", ap.ClientID)
+ }
+ case ap.ClientAssertion != "":
+ if ap.ClientAssertionType == "" {
+ ap.ClientAssertionType = defaultClientAssertionType
+ }
+ if ap.ClientID != "" {
+ body.Add("client_id", ap.ClientID)
+ }
+ body.Add("client_assertion_type", ap.ClientAssertionType)
+ body.Add("client_assertion", ap.ClientAssertion)
+
+ case ap.ClientAssertionPath != "":
+ if ap.ClientAssertionType == "" {
+ ap.ClientAssertionType = defaultClientAssertionType
+ }
+ bytes, err := os.ReadFile(ap.ClientAssertionPath)
+ if err != nil {
+ return nil, err
+ }
+ if ap.ClientID != "" {
+ body.Add("client_id", ap.ClientID)
+ }
+ body.Add("client_assertion_type", ap.ClientAssertionType)
+ body.Add("client_assertion", strings.TrimSpace(string(bytes)))
}
- for k, v := range ap.AdditionalParameters {
- body.Set(k, v)
+ return body, nil
+}
+
+// requestToken tries to obtain an access token using either the client credentials flow
+// https://tools.ietf.org/html/rfc6749#section-4.4
+// or the JWT authorization grant
+// https://tools.ietf.org/html/rfc7523
+func (ap *oauth2ClientCredentialsAuthPlugin) requestToken(ctx context.Context) (*oauth2Token, error) {
+ body, err := ap.createTokenReqBody(ctx)
+ if err != nil {
+ return nil, err
}
- r, err := http.NewRequestWithContext(ctx, "POST", ap.TokenURL, strings.NewReader(body.Encode()))
+ r, err := http.NewRequestWithContext(ctx, http.MethodPost, ap.TokenURL, strings.NewReader(body.Encode()))
if err != nil {
return nil, err
}
@@ -616,7 +737,7 @@ func (ap *oauth2ClientCredentialsAuthPlugin) requestToken(ctx context.Context) (
return nil, err
}
- if strings.ToLower(tokenResponse.TokenType) != "bearer" {
+ if !strings.EqualFold(tokenResponse.TokenType, "bearer") {
return nil, errors.New("unknown token type returned from token endpoint")
}
@@ -751,7 +872,7 @@ func (ap *clientTLSAuthPlugin) NewClient(c Config) (*http.Client, error) {
return client, nil
}
-func (ap *clientTLSAuthPlugin) Prepare(_ *http.Request) error {
+func (*clientTLSAuthPlugin) Prepare(_ *http.Request) error {
return nil
}
@@ -762,6 +883,7 @@ type awsSigningAuthPlugin struct {
AWSAssumeRoleCredentials *awsAssumeRoleCredentialService `json:"assume_role_credentials,omitempty"`
AWSWebIdentityCredentials *awsWebIdentityCredentialService `json:"web_identity_credentials,omitempty"`
AWSProfileCredentials *awsProfileCredentialService `json:"profile_credentials,omitempty"`
+ AWSSSOCredentials *awsSSOCredentialsService `json:"sso_credentials,omitempty"`
AWSService string `json:"service,omitempty"`
AWSSignatureVersion string `json:"signature_version,omitempty"`
@@ -877,6 +999,11 @@ func (ap *awsSigningAuthPlugin) awsCredentialService() awsCredentialService {
chain.addService(ap.AWSMetadataCredentials)
}
+ if ap.AWSSSOCredentials != nil {
+ ap.AWSSSOCredentials.logger = ap.logger
+ chain.addService(ap.AWSSSOCredentials)
+ }
+
return &chain
}
@@ -934,6 +1061,7 @@ func (ap *awsSigningAuthPlugin) validateAndSetDefaults(serviceType string) error
cfgs[ap.AWSAssumeRoleCredentials != nil]++
cfgs[ap.AWSWebIdentityCredentials != nil]++
cfgs[ap.AWSProfileCredentials != nil]++
+ cfgs[ap.AWSSSOCredentials != nil]++
if cfgs[true] == 0 {
return errors.New("a AWS credential service must be specified when S3 signing is enabled")
@@ -1000,3 +1128,84 @@ func (ap *awsSigningAuthPlugin) SignDigest(ctx context.Context, digest []byte, k
return "", fmt.Errorf(`cannot use SignDigest with aws service %q`, ap.AWSService)
}
}
+
+type azureSigningAuthPlugin struct {
+ MIAuthPlugin *azureManagedIdentitiesAuthPlugin `json:"azure_managed_identity,omitempty"`
+ keyVaultSignPlugin *azureKeyVaultSignPlugin
+ keyVaultConfig *azureKeyVaultConfig
+ host string
+ Service string `json:"service"`
+ logger logging.Logger
+}
+
+func (ap *azureSigningAuthPlugin) NewClient(c Config) (*http.Client, error) {
+ t, err := DefaultTLSConfig(c)
+ if err != nil {
+ return nil, err
+ }
+
+ tknURL, err := url.Parse(c.URL)
+ if err != nil {
+ return nil, err
+ }
+
+ ap.host = tknURL.Host
+
+ if ap.logger == nil {
+ ap.logger = c.logger
+ }
+
+ if c.Credentials.OAuth2.AzureKeyVault == nil {
+ return nil, errors.New("missing keyvault config")
+ }
+ ap.keyVaultConfig = c.Credentials.OAuth2.AzureKeyVault
+
+ if err := ap.validateAndSetDefaults(); err != nil {
+ return nil, err
+ }
+
+ return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil
+}
+
+func (ap *azureSigningAuthPlugin) validateAndSetDefaults() error {
+ if ap.MIAuthPlugin == nil {
+ return errors.New("missing azure managed identity config")
+ }
+ ap.MIAuthPlugin.setDefaults()
+
+ if ap.keyVaultSignPlugin != nil {
+ return nil
+ }
+ ap.keyVaultConfig.URL = &url.URL{
+ Scheme: "https",
+ Host: ap.keyVaultConfig.Vault + ".vault.azure.net",
+ }
+ ap.keyVaultSignPlugin = newKeyVaultSignPlugin(ap.MIAuthPlugin, ap.keyVaultConfig)
+ ap.keyVaultSignPlugin.setDefaults()
+ ap.keyVaultConfig = &ap.keyVaultSignPlugin.config
+
+ return nil
+}
+
+func (ap *azureSigningAuthPlugin) Prepare(req *http.Request) error {
+ switch ap.Service {
+ case "keyvault":
+ tkn, err := ap.keyVaultSignPlugin.tokener()
+ if err != nil {
+ return err
+ }
+ req.Header.Add("Authorization", "Bearer "+tkn)
+ return nil
+ default:
+ return fmt.Errorf("azureSigningAuthPlugin.Prepare() with %s not supported", ap.Service)
+ }
+}
+
+func (ap *azureSigningAuthPlugin) SignDigest(ctx context.Context, digest []byte) (string, error) {
+ switch ap.Service {
+ case "keyvault":
+ return ap.keyVaultSignPlugin.SignDigest(ctx, digest)
+ default:
+ return "", fmt.Errorf(`cannot use SignDigest with azure service %q`, ap.Service)
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/aws.go
similarity index 73%
rename from vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go
rename to vendor/github.com/open-policy-agent/opa/v1/plugins/rest/aws.go
index cc45dfa9c7..45c708ab80 100644
--- a/vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/aws.go
@@ -5,7 +5,10 @@
package rest
import (
+ "bytes"
"context"
+ "crypto/sha1"
+ "encoding/hex"
"encoding/json"
"encoding/xml"
"errors"
@@ -13,13 +16,14 @@ import (
"net/http"
"net/url"
"os"
+ "path"
"path/filepath"
"strings"
"time"
"github.com/go-ini/ini"
"github.com/open-policy-agent/opa/internal/providers/aws"
- "github.com/open-policy-agent/opa/logging"
+ "github.com/open-policy-agent/opa/v1/logging"
)
const (
@@ -51,6 +55,7 @@ const (
awsRoleArnEnvVar = "AWS_ROLE_ARN"
awsWebIdentityTokenFileEnvVar = "AWS_WEB_IDENTITY_TOKEN_FILE"
awsCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE"
+ awsConfigFileEnvVar = "AWS_CONFIG_FILE"
awsProfileEnvVar = "AWS_PROFILE"
// ref. https://docs.aws.amazon.com/sdkref/latest/guide/settings-global.html
@@ -69,7 +74,7 @@ type awsEnvironmentCredentialService struct {
logger logging.Logger
}
-func (cs *awsEnvironmentCredentialService) credentials(context.Context) (aws.Credentials, error) {
+func (*awsEnvironmentCredentialService) credentials(context.Context) (aws.Credentials, error) {
var creds aws.Credentials
creds.AccessKey = os.Getenv(accessKeyEnvVar)
if creds.AccessKey == "" {
@@ -95,6 +100,333 @@ func (cs *awsEnvironmentCredentialService) credentials(context.Context) (aws.Cre
return creds, nil
}
+type ssoSessionDetails struct {
+ StartUrl string `json:"startUrl"`
+ Region string `json:"region"`
+ Name string
+ AccountID string
+ RoleName string
+ AccessToken string `json:"accessToken"`
+ ExpiresAt time.Time `json:"expiresAt"`
+ RegistrationExpiresAt time.Time `json:"registrationExpiresAt"`
+ RefreshToken string `json:"refreshToken"`
+ ClientId string `json:"clientId"`
+ ClientSecret string `json:"clientSecret"`
+}
+
+type awsSSOCredentialsService struct {
+ Path string `json:"path,omitempty"`
+ SSOCachePath string `json:"cache_path,omitempty"`
+
+ Profile string `json:"profile,omitempty"`
+
+ logger logging.Logger
+
+ creds aws.Credentials
+
+ credentialsExpiresAt time.Time
+
+ session *ssoSessionDetails
+}
+
+func (cs *awsSSOCredentialsService) configPath() (string, error) {
+ if len(cs.Path) != 0 {
+ return cs.Path, nil
+ }
+
+ if cs.Path = os.Getenv(awsConfigFileEnvVar); len(cs.Path) != 0 {
+ return cs.Path, nil
+ }
+
+ homeDir, err := os.UserHomeDir()
+ if err != nil {
+ return "", fmt.Errorf("user home directory not found: %w", err)
+ }
+
+ cs.Path = filepath.Join(homeDir, ".aws", "config")
+
+ return cs.Path, nil
+}
+func (cs *awsSSOCredentialsService) ssoCachePath() (string, error) {
+ if len(cs.SSOCachePath) != 0 {
+ return cs.SSOCachePath, nil
+ }
+
+ homeDir, err := os.UserHomeDir()
+ if err != nil {
+ return "", fmt.Errorf("user home directory not found: %w", err)
+ }
+
+ cs.Path = filepath.Join(homeDir, ".aws", "sso", "cache")
+
+ return cs.Path, nil
+}
+
+func (cs *awsSSOCredentialsService) cacheKeyFileName() (string, error) {
+
+ val := cs.session.StartUrl
+ if cs.session.Name != "" {
+ val = cs.session.Name
+ }
+
+ hash := sha1.New()
+ hash.Write([]byte(val))
+ cacheKey := hex.EncodeToString(hash.Sum(nil))
+
+ return cacheKey + ".json", nil
+}
+
+func (cs *awsSSOCredentialsService) loadSSOCredentials() error {
+ ssoCachePath, err := cs.ssoCachePath()
+ if err != nil {
+ return fmt.Errorf("failed to get sso cache path: %w", err)
+ }
+
+ cacheKeyFile, err := cs.cacheKeyFileName()
+ if err != nil {
+ return err
+ }
+
+ cacheFile := path.Join(ssoCachePath, cacheKeyFile)
+ cache, err := os.ReadFile(cacheFile)
+ if err != nil {
+ return fmt.Errorf("failed to load cache file: %v", err)
+ }
+
+ if err := json.Unmarshal(cache, &cs.session); err != nil {
+ return fmt.Errorf("failed to unmarshal cache file: %v", err)
+ }
+
+ return nil
+
+}
+
+func (cs *awsSSOCredentialsService) loadSession() error {
+ configPath, err := cs.configPath()
+ if err != nil {
+ return fmt.Errorf("failed to get config path: %w", err)
+ }
+ config, err := ini.Load(configPath)
+ if err != nil {
+ return fmt.Errorf("failed to load config file: %w", err)
+ }
+
+ section, err := config.GetSection("profile " + cs.Profile)
+
+ if err != nil {
+ return fmt.Errorf("failed to find profile %s", cs.Profile)
+ }
+
+ accountID, err := section.GetKey("sso_account_id")
+ if err != nil {
+ return fmt.Errorf("failed to find sso_account_id key in profile %s", cs.Profile)
+ }
+
+ region, err := section.GetKey("region")
+ if err != nil {
+ return fmt.Errorf("failed to find region key in profile %s", cs.Profile)
+ }
+
+ roleName, err := section.GetKey("sso_role_name")
+ if err != nil {
+ return fmt.Errorf("failed to find sso_role_name key in profile %s", cs.Profile)
+ }
+
+ ssoSession, err := section.GetKey("sso_session")
+ if err != nil {
+ return fmt.Errorf("failed to find sso_session key in profile %s", cs.Profile)
+ }
+
+ sessionName := ssoSession.Value()
+
+ session, err := config.GetSection("sso-session " + sessionName)
+ if err != nil {
+ return fmt.Errorf("failed to find sso-session %s", sessionName)
+ }
+
+ startUrl, err := session.GetKey("sso_start_url")
+ if err != nil {
+ return fmt.Errorf("failed to find sso_start_url key in sso-session %s", sessionName)
+ }
+
+ cs.session = &ssoSessionDetails{
+ StartUrl: startUrl.Value(),
+ Name: sessionName,
+ AccountID: accountID.Value(),
+ Region: region.Value(),
+ RoleName: roleName.Value(),
+ }
+
+ return nil
+}
+
+func (cs *awsSSOCredentialsService) tryRefreshToken() error {
+ // Check if refresh token is empty
+ if cs.session.RefreshToken == "" {
+ return errors.New("refresh token is empty")
+ }
+
+ // Use the refresh token to get a new access token
+ // using the clientId, clientSecret and refreshToken from the loaded token
+ // return the new token
+ // if error, return error
+
+ type refreshTokenRequest struct {
+ ClientId string `json:"clientId"`
+ ClientSecret string `json:"clientSecret"`
+ RefreshToken string `json:"refreshToken"`
+ GrantType string `json:"grantType"`
+ }
+
+ data := refreshTokenRequest{
+ ClientId: cs.session.ClientId,
+ ClientSecret: cs.session.ClientSecret,
+ RefreshToken: cs.session.RefreshToken,
+ GrantType: "refresh_token",
+ }
+
+ body, err := json.Marshal(data)
+ if err != nil {
+ return fmt.Errorf("failed to marshal refresh token request: %v", err)
+ }
+
+ endpoint := fmt.Sprintf("https://oidc.%s.amazonaws.com/token", cs.session.Region)
+ r, err := http.NewRequest("POST", endpoint, bytes.NewReader(body))
+ if err != nil {
+ return fmt.Errorf("failed to create new request: %v", err)
+ }
+
+ r.Header.Add("Content-Type", "application/json")
+ c := &http.Client{}
+ resp, err := c.Do(r)
+ if err != nil {
+ return fmt.Errorf("failed to do request: %v", err)
+ }
+ defer resp.Body.Close()
+
+ type refreshTokenResponse struct {
+ AccessToken string `json:"accessToken"`
+ ExpiresIn int `json:"expiresIn"`
+ RefreshToken string `json:"refreshToken"`
+ }
+
+ refreshedToken := refreshTokenResponse{}
+
+ if err := json.NewDecoder(resp.Body).Decode(&refreshedToken); err != nil {
+ return fmt.Errorf("failed to decode response: %v", err)
+ }
+
+ cs.session.AccessToken = refreshedToken.AccessToken
+ cs.session.ExpiresAt = time.Now().Add(time.Duration(refreshedToken.ExpiresIn) * time.Second)
+ cs.session.RefreshToken = refreshedToken.RefreshToken
+
+ return nil
+}
+
+func (cs *awsSSOCredentialsService) refreshCredentials() error {
+ url := fmt.Sprintf("https://portal.sso.%s.amazonaws.com/federation/credentials?account_id=%s&role_name=%s", cs.session.Region, cs.session.AccountID, cs.session.RoleName)
+
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return err
+ }
+
+ req.Header.Set("Authorization", "Bearer "+cs.session.AccessToken)
+ req.Header.Set("Content-Type", "application/json")
+
+ client := &http.Client{}
+ resp, err := client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ type roleCredentials struct {
+ AccessKeyId string `json:"accessKeyId"`
+ SecretAccessKey string `json:"secretAccessKey"`
+ SessionToken string `json:"sessionToken"`
+ Expiration int64 `json:"expiration"`
+ }
+ type getRoleCredentialsResponse struct {
+ RoleCredentials roleCredentials `json:"roleCredentials"`
+ }
+
+ var result getRoleCredentialsResponse
+
+ if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
+ return fmt.Errorf("failed to decode response: %v", err)
+ }
+
+ cs.creds = aws.Credentials{
+ AccessKey: result.RoleCredentials.AccessKeyId,
+ SecretKey: result.RoleCredentials.SecretAccessKey,
+ SessionToken: result.RoleCredentials.SessionToken,
+ RegionName: cs.session.Region,
+ }
+
+ cs.credentialsExpiresAt = time.Unix(result.RoleCredentials.Expiration, 0)
+
+ return nil
+}
+
+func (cs *awsSSOCredentialsService) loadProfile() {
+ if cs.Profile != "" {
+ return
+ }
+
+ cs.Profile = os.Getenv(awsProfileEnvVar)
+
+ if cs.Profile == "" {
+ cs.Profile = "default"
+ }
+
+}
+
+func (cs *awsSSOCredentialsService) init() error {
+ cs.loadProfile()
+
+ if err := cs.loadSession(); err != nil {
+ return fmt.Errorf("failed to load session: %w", err)
+ }
+
+ if err := cs.loadSSOCredentials(); err != nil {
+ return fmt.Errorf("failed to load SSO credentials: %w", err)
+ }
+
+ // this enforces fetching credentials
+ cs.credentialsExpiresAt = time.Unix(0, 0)
+ return nil
+}
+
+func (cs *awsSSOCredentialsService) credentials(context.Context) (aws.Credentials, error) {
+ if cs.session == nil {
+ if err := cs.init(); err != nil {
+ return aws.Credentials{}, err
+ }
+ }
+
+ if cs.credentialsExpiresAt.Before(time.Now().Add(5 * time.Minute)) {
+ // Check if the sso token we have is still valid,
+ // if not, try to refresh it
+ if cs.session.ExpiresAt.Before(time.Now()) {
+ // we try and get a new token if we can
+ if cs.session.RegistrationExpiresAt.Before(time.Now()) {
+ return aws.Credentials{}, errors.New("cannot refresh token, registration expired")
+ }
+
+ if err := cs.tryRefreshToken(); err != nil {
+ return aws.Credentials{}, fmt.Errorf("failed to refresh token: %w", err)
+ }
+ }
+
+ if err := cs.refreshCredentials(); err != nil {
+ return aws.Credentials{}, fmt.Errorf("failed to refresh credentials: %w", err)
+ }
+ }
+
+ return cs.creds, nil
+}
+
// awsProfileCredentialService represents a credential provider for AWS that extracts credentials from the AWS
// credentials file
type awsProfileCredentialService struct {
@@ -678,7 +1010,7 @@ func (ap *ecrAuthPlugin) Prepare(r *http.Request) error {
ap.logger.Debug("Signing request with ECR authorization token")
- r.Header.Set("Authorization", fmt.Sprintf("Basic %s", ap.token.AuthorizationToken))
+ r.Header.Set("Authorization", "Basic "+ap.token.AuthorizationToken)
return nil
}
diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/azure.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/azure.go
similarity index 63%
rename from vendor/github.com/open-policy-agent/opa/plugins/rest/azure.go
rename to vendor/github.com/open-policy-agent/opa/v1/plugins/rest/azure.go
index ae00d48a7c..9f7a164327 100644
--- a/vendor/github.com/open-policy-agent/opa/plugins/rest/azure.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/azure.go
@@ -1,6 +1,9 @@
package rest
import (
+ "bytes"
+ "context"
+ "encoding/base64"
"encoding/json"
"errors"
"fmt"
@@ -17,6 +20,7 @@ var (
defaultResource = "https://storage.azure.com/"
timeout = 5 * time.Second
defaultAPIVersionForAppServiceMsi = "2019-08-01"
+ defaultKeyVaultAPIVersion = "7.4"
)
// azureManagedIdentitiesToken holds a token for managed identities for Azure resources
@@ -52,11 +56,7 @@ type azureManagedIdentitiesAuthPlugin struct {
UseAppServiceMsi bool `json:"use_app_service_msi,omitempty"`
}
-func (ap *azureManagedIdentitiesAuthPlugin) NewClient(c Config) (*http.Client, error) {
- if c.Type == "oci" {
- return nil, errors.New("azure managed identities auth: OCI service not supported")
- }
-
+func (ap *azureManagedIdentitiesAuthPlugin) setDefaults() {
if ap.Endpoint == "" {
identityEndpoint := os.Getenv("IDENTITY_ENDPOINT")
if identityEndpoint != "" {
@@ -79,6 +79,13 @@ func (ap *azureManagedIdentitiesAuthPlugin) NewClient(c Config) (*http.Client, e
}
}
+}
+
+func (ap *azureManagedIdentitiesAuthPlugin) NewClient(c Config) (*http.Client, error) {
+ if c.Type == "oci" {
+ return nil, errors.New("azure managed identities auth: OCI service not supported")
+ }
+ ap.setDefaults()
t, err := DefaultTLSConfig(c)
if err != nil {
return nil, err
@@ -151,7 +158,6 @@ func azureManagedIdentitiesTokenRequest(
if err != nil {
return token, err
}
-
return token, nil
}
@@ -178,3 +184,104 @@ func buildAzureManagedIdentitiesRequestPath(
return endpoint + "?" + params.Encode()
}
+
+type azureKeyVaultSignPlugin struct {
+ config azureKeyVaultConfig
+ tokener func() (string, error)
+}
+
+func newKeyVaultSignPlugin(ap *azureManagedIdentitiesAuthPlugin, cfg *azureKeyVaultConfig) *azureKeyVaultSignPlugin {
+ resp := &azureKeyVaultSignPlugin{
+ tokener: func() (string, error) {
+ resp, err := azureManagedIdentitiesTokenRequest(
+ ap.Endpoint,
+ ap.APIVersion,
+ cfg.URL.String(),
+ ap.ObjectID,
+ ap.ClientID,
+ ap.MiResID,
+ ap.UseAppServiceMsi)
+ if err != nil {
+ return "", err
+ }
+ return resp.AccessToken, nil
+ },
+ config: *cfg,
+ }
+ return resp
+}
+
+func (akv *azureKeyVaultSignPlugin) setDefaults() {
+ if akv.config.APIVersion == "" {
+ akv.config.APIVersion = defaultKeyVaultAPIVersion
+ }
+}
+
+type kvRequest struct {
+ Alg string `json:"alg"`
+ Value string `json:"value"`
+}
+
+type kvResponse struct {
+ KID string `json:"kid"`
+ Value string `json:"value"`
+}
+
+// SignDigest() uses the Microsoft keyvault rest api to sign a byte digest
+// https://learn.microsoft.com/en-us/rest/api/keyvault/keys/sign/sign
+func (ap *azureKeyVaultSignPlugin) SignDigest(ctx context.Context, digest []byte) (string, error) {
+ tkn, err := ap.tokener()
+ if err != nil {
+ return "", err
+ }
+ if ap.config.URL.Host == "" {
+ return "", errors.New("keyvault host not set")
+ }
+
+ signingURL := ap.config.URL.JoinPath("keys", ap.config.Key, ap.config.KeyVersion, "sign")
+ q := signingURL.Query()
+ q.Set("api-version", ap.config.APIVersion)
+ signingURL.RawQuery = q.Encode()
+ reqBody, err := json.Marshal(kvRequest{
+ Alg: ap.config.Alg,
+ Value: base64.StdEncoding.EncodeToString(digest)})
+ if err != nil {
+ return "", err
+ }
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, signingURL.String(), bytes.NewBuffer(reqBody))
+ if err != nil {
+ return "", err
+ }
+
+ req.Header.Add("Authorization", "Bearer "+tkn)
+ req.Header.Add("Content-Type", "application/json")
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return "", err
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ if resp.Body != nil {
+ defer resp.Body.Close()
+ b, _ := io.ReadAll(resp.Body)
+ return "", fmt.Errorf("non 200 status code, got: %d. Body: %v", resp.StatusCode, string(b))
+ }
+ return "", fmt.Errorf("non 200 status code from keyvault sign, got: %d", resp.StatusCode)
+ }
+ defer resp.Body.Close()
+
+ respBytes, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return "", errors.New("failed to read keyvault response body")
+ }
+
+ var res kvResponse
+ err = json.Unmarshal(respBytes, &res)
+ if err != nil {
+ return "", fmt.Errorf("no valid keyvault response, got: %v", string(respBytes))
+ }
+
+ return res.Value, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/gcp.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/gcp.go
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/plugins/rest/gcp.go
rename to vendor/github.com/open-policy-agent/opa/v1/plugins/rest/gcp.go
diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/rest.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/rest.go
similarity index 92%
rename from vendor/github.com/open-policy-agent/opa/plugins/rest/rest.go
rename to vendor/github.com/open-policy-agent/opa/v1/plugins/rest/rest.go
index fd59058ca1..f8be30af5e 100644
--- a/vendor/github.com/open-policy-agent/opa/plugins/rest/rest.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/rest.go
@@ -12,16 +12,17 @@ import (
"errors"
"fmt"
"io"
+ "maps"
"net/http"
"net/http/httputil"
"reflect"
"strings"
"github.com/open-policy-agent/opa/internal/version"
- "github.com/open-policy-agent/opa/keys"
- "github.com/open-policy-agent/opa/logging"
- "github.com/open-policy-agent/opa/tracing"
- "github.com/open-policy-agent/opa/util"
+ "github.com/open-policy-agent/opa/v1/keys"
+ "github.com/open-policy-agent/opa/v1/logging"
+ "github.com/open-policy-agent/opa/v1/tracing"
+ "github.com/open-policy-agent/opa/v1/util"
)
const (
@@ -94,7 +95,7 @@ func (c *Config) AuthPlugin(lookup AuthPluginLookupFunc) (HTTPAuthPlugin, error)
}
// reflection avoids need for this code to change as auth plugins are added
s := reflect.ValueOf(c.Credentials)
- for i := 0; i < s.NumField(); i++ {
+ for i := range s.NumField() {
if s.Field(i).IsNil() {
continue
}
@@ -132,12 +133,12 @@ func (c *Config) authPrepare(req *http.Request, lookup AuthPluginLookupFunc) err
// services.
type Client struct {
bytes *[]byte
- json *interface{}
+ json *any
config Config
headers map[string]string
authPluginLookup AuthPluginLookupFunc
logger logging.Logger
- loggerFields map[string]interface{}
+ loggerFields map[string]any
distributedTacingOpts tracing.Options
}
@@ -233,7 +234,7 @@ func (c Client) Logger() logging.Logger {
}
// LoggerFields returns the fields used for log statements used by Client
-func (c Client) LoggerFields() map[string]interface{} {
+func (c Client) LoggerFields() map[string]any {
return c.loggerFields
}
@@ -253,7 +254,7 @@ func (c Client) WithHeader(k, v string) Client {
// WithJSON returns a shallow copy of the client with the JSON value set as the
// message body to include the requests. This function sets the Content-Type
// header.
-func (c Client) WithJSON(body interface{}) Client {
+func (c Client) WithJSON(body any) Client {
c = c.WithHeader("Content-Type", "application/json")
c.json = &body
return c
@@ -293,7 +294,7 @@ func (c Client) Do(ctx context.Context, method, path string) (*http.Response, er
}
url := c.config.URL + "/" + path
- req, err := http.NewRequest(method, url, body)
+ req, err := http.NewRequestWithContext(ctx, method, url, body)
if err != nil {
return nil, err
}
@@ -303,28 +304,21 @@ func (c Client) Do(ctx context.Context, method, path string) (*http.Response, er
}
// Copy custom headers from config.
- for key, value := range c.config.Headers {
- headers[key] = value
- }
+ maps.Copy(headers, c.config.Headers)
// Overwrite with headers set directly on client.
- for key, value := range c.headers {
- headers[key] = value
- }
+ maps.Copy(headers, c.headers)
for key, value := range headers {
req.Header.Add(key, value)
}
- req = req.WithContext(ctx)
-
- err = c.config.authPrepare(req, c.authPluginLookup)
- if err != nil {
+ if err = c.config.authPrepare(req, c.authPluginLookup); err != nil {
return nil, err
}
if c.logger.GetLevel() >= logging.Debug {
- c.loggerFields = map[string]interface{}{
+ c.loggerFields = map[string]any{
"method": method,
"url": url,
"headers": withMaskedHeaders(req.Header),
@@ -347,7 +341,7 @@ func (c Client) Do(ctx context.Context, method, path string) (*http.Response, er
return nil, err
}
- if len(string(dump)) < defaultResponseSizeLimitBytes {
+ if len(dump) < defaultResponseSizeLimitBytes {
c.loggerFields["response"] = string(dump)
} else {
c.loggerFields["response"] = fmt.Sprintf("%v...", string(dump[:defaultResponseSizeLimitBytes]))
diff --git a/vendor/github.com/open-policy-agent/opa/v1/rego/errors.go b/vendor/github.com/open-policy-agent/opa/v1/rego/errors.go
new file mode 100644
index 0000000000..dcc5e2679d
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/rego/errors.go
@@ -0,0 +1,24 @@
+package rego
+
+// HaltError is an error type to return from a custom function implementation
+// that will abort the evaluation process (analogous to topdown.Halt).
+type HaltError struct {
+ err error
+}
+
+// Error delegates to the wrapped error
+func (h *HaltError) Error() string {
+ return h.err.Error()
+}
+
+// NewHaltError wraps an error such that the evaluation process will stop
+// when it occurs.
+func NewHaltError(err error) error {
+ return &HaltError{err: err}
+}
+
+// ErrorDetails interface is satisfied by an error that provides further
+// details.
+type ErrorDetails interface {
+ Lines() []string
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/rego/plugins.go b/vendor/github.com/open-policy-agent/opa/v1/rego/plugins.go
new file mode 100644
index 0000000000..55b5ed7803
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/rego/plugins.go
@@ -0,0 +1,43 @@
+// Copyright 2023 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package rego
+
+import (
+ "context"
+ "sync"
+
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/ir"
+)
+
+var targetPlugins = map[string]TargetPlugin{}
+var pluginMtx sync.Mutex
+
+type TargetPlugin interface {
+ IsTarget(string) bool
+ PrepareForEval(context.Context, *ir.Policy, ...PrepareOption) (TargetPluginEval, error)
+}
+
+type TargetPluginEval interface {
+ Eval(context.Context, *EvalContext, ast.Value) (ast.Value, error)
+}
+
+func (*Rego) targetPlugin(tgt string) TargetPlugin {
+ for _, p := range targetPlugins {
+ if p.IsTarget(tgt) {
+ return p
+ }
+ }
+ return nil
+}
+
+func RegisterPlugin(name string, p TargetPlugin) {
+ pluginMtx.Lock()
+ defer pluginMtx.Unlock()
+ if _, ok := targetPlugins[name]; ok {
+ panic("plugin already registered " + name)
+ }
+ targetPlugins[name] = p
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/rego/rego.go b/vendor/github.com/open-policy-agent/opa/v1/rego/rego.go
new file mode 100644
index 0000000000..2c4d8a8d91
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/rego/rego.go
@@ -0,0 +1,3028 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package rego exposes high level APIs for evaluating Rego policies.
+package rego
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "maps"
+ "strings"
+ "time"
+
+ bundleUtils "github.com/open-policy-agent/opa/internal/bundle"
+ "github.com/open-policy-agent/opa/internal/compiler/wasm"
+ "github.com/open-policy-agent/opa/internal/future"
+ "github.com/open-policy-agent/opa/internal/planner"
+ "github.com/open-policy-agent/opa/internal/rego/opa"
+ "github.com/open-policy-agent/opa/internal/wasm/encoding"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/bundle"
+ "github.com/open-policy-agent/opa/v1/ir"
+ "github.com/open-policy-agent/opa/v1/loader"
+ "github.com/open-policy-agent/opa/v1/metrics"
+ "github.com/open-policy-agent/opa/v1/plugins"
+ "github.com/open-policy-agent/opa/v1/resolver"
+ "github.com/open-policy-agent/opa/v1/storage"
+ "github.com/open-policy-agent/opa/v1/storage/inmem"
+ "github.com/open-policy-agent/opa/v1/topdown"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/topdown/cache"
+ "github.com/open-policy-agent/opa/v1/topdown/print"
+ "github.com/open-policy-agent/opa/v1/tracing"
+ "github.com/open-policy-agent/opa/v1/types"
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+const (
+ defaultPartialNamespace = "partial"
+ wasmVarPrefix = "^"
+)
+
+// nolint: deadcode,varcheck
+const (
+ targetWasm = "wasm"
+ targetRego = "rego"
+)
+
+// CompileResult represents the result of compiling a Rego query, zero or more
+// Rego modules, and arbitrary contextual data into an executable.
+type CompileResult struct {
+ Bytes []byte `json:"bytes"`
+}
+
+// PartialQueries contains the queries and support modules produced by partial
+// evaluation.
+type PartialQueries struct {
+ Queries []ast.Body `json:"queries,omitempty"`
+ Support []*ast.Module `json:"modules,omitempty"`
+}
+
+// PartialResult represents the result of partial evaluation. The result can be
+// used to generate a new query that can be run when inputs are known.
+type PartialResult struct {
+ compiler *ast.Compiler
+ store storage.Store
+ body ast.Body
+ builtinDecls map[string]*ast.Builtin
+ builtinFuncs map[string]*topdown.Builtin
+}
+
+// Rego returns an object that can be evaluated to produce a query result.
+func (pr PartialResult) Rego(options ...func(*Rego)) *Rego {
+ options = append(options, Compiler(pr.compiler), Store(pr.store), ParsedQuery(pr.body))
+ r := New(options...)
+
+ // Propagate any custom builtins.
+ maps.Copy(r.builtinDecls, pr.builtinDecls)
+ maps.Copy(r.builtinFuncs, pr.builtinFuncs)
+ return r
+}
+
+// preparedQuery is a wrapper around a Rego object which has pre-processed
+// state stored on it. Once prepared there are a more limited number of actions
+// that can be taken with it. It will, however, be able to evaluate faster since
+// it will not have to re-parse or compile as much.
+type preparedQuery struct {
+ r *Rego
+ cfg *PrepareConfig
+}
+
+// EvalContext defines the set of options allowed to be set at evaluation
+// time. Any other options will need to be set on a new Rego object.
+type EvalContext struct {
+ hasInput bool
+ time time.Time
+ seed io.Reader
+ rawInput *any
+ parsedInput ast.Value
+ metrics metrics.Metrics
+ txn storage.Transaction
+ instrument bool
+ instrumentation *topdown.Instrumentation
+ partialNamespace string
+ queryTracers []topdown.QueryTracer
+ compiledQuery compiledQuery
+ unknowns []string
+ disableInlining []ast.Ref
+ nondeterministicBuiltins bool
+ parsedUnknowns []*ast.Term
+ indexing bool
+ earlyExit bool
+ interQueryBuiltinCache cache.InterQueryCache
+ interQueryBuiltinValueCache cache.InterQueryValueCache
+ ndBuiltinCache builtins.NDBCache
+ resolvers []refResolver
+ httpRoundTripper topdown.CustomizeRoundTripper
+ sortSets bool
+ copyMaps bool
+ printHook print.Hook
+ capabilities *ast.Capabilities
+ strictBuiltinErrors bool
+ virtualCache topdown.VirtualCache
+ baseCache topdown.BaseCache
+ tracing tracing.Options
+ externalCancel topdown.Cancel // Note(philip): If non-nil, the cancellation is handled outside of this package.
+}
+
+func (e *EvalContext) RawInput() *any {
+ return e.rawInput
+}
+
+func (e *EvalContext) ParsedInput() ast.Value {
+ return e.parsedInput
+}
+
+func (e *EvalContext) Time() time.Time {
+ return e.time
+}
+
+func (e *EvalContext) Seed() io.Reader {
+ return e.seed
+}
+
+func (e *EvalContext) InterQueryBuiltinCache() cache.InterQueryCache {
+ return e.interQueryBuiltinCache
+}
+
+func (e *EvalContext) InterQueryBuiltinValueCache() cache.InterQueryValueCache {
+ return e.interQueryBuiltinValueCache
+}
+
+func (e *EvalContext) PrintHook() print.Hook {
+ return e.printHook
+}
+
+func (e *EvalContext) Metrics() metrics.Metrics {
+ return e.metrics
+}
+
+func (e *EvalContext) StrictBuiltinErrors() bool {
+ return e.strictBuiltinErrors
+}
+
+func (e *EvalContext) NDBCache() builtins.NDBCache {
+ return e.ndBuiltinCache
+}
+
+func (e *EvalContext) CompiledQuery() ast.Body {
+ return e.compiledQuery.query
+}
+
+func (e *EvalContext) Capabilities() *ast.Capabilities {
+ return e.capabilities
+}
+
+func (e *EvalContext) Transaction() storage.Transaction {
+ return e.txn
+}
+
+func (e *EvalContext) TracingOpts() tracing.Options {
+ return e.tracing
+}
+
+func (e *EvalContext) ExternalCancel() topdown.Cancel {
+ return e.externalCancel
+}
+
+func (e *EvalContext) QueryTracers() []topdown.QueryTracer {
+ return e.queryTracers
+}
+
+// EvalOption defines a function to set an option on an EvalConfig
+type EvalOption func(*EvalContext)
+
+// EvalInput configures the input for a Prepared Query's evaluation
+func EvalInput(input any) EvalOption {
+ return func(e *EvalContext) {
+ e.rawInput = &input
+ e.hasInput = true
+ }
+}
+
+// EvalParsedInput configures the input for a Prepared Query's evaluation
+func EvalParsedInput(input ast.Value) EvalOption {
+ return func(e *EvalContext) {
+ e.parsedInput = input
+ e.hasInput = true
+ }
+}
+
+// EvalMetrics configures the metrics for a Prepared Query's evaluation
+func EvalMetrics(metric metrics.Metrics) EvalOption {
+ return func(e *EvalContext) {
+ e.metrics = metric
+ }
+}
+
+// EvalTransaction configures the Transaction for a Prepared Query's evaluation
+func EvalTransaction(txn storage.Transaction) EvalOption {
+ return func(e *EvalContext) {
+ e.txn = txn
+ }
+}
+
+// EvalInstrument enables or disables instrumenting for a Prepared Query's evaluation
+func EvalInstrument(instrument bool) EvalOption {
+ return func(e *EvalContext) {
+ e.instrument = instrument
+ }
+}
+
+// EvalTracer configures a tracer for a Prepared Query's evaluation
+// Deprecated: Use EvalQueryTracer instead.
+func EvalTracer(tracer topdown.Tracer) EvalOption {
+ return func(e *EvalContext) {
+ if tracer != nil {
+ e.queryTracers = append(e.queryTracers, topdown.WrapLegacyTracer(tracer))
+ }
+ }
+}
+
+// EvalQueryTracer configures a tracer for a Prepared Query's evaluation
+func EvalQueryTracer(tracer topdown.QueryTracer) EvalOption {
+ return func(e *EvalContext) {
+ if tracer != nil {
+ e.queryTracers = append(e.queryTracers, tracer)
+ }
+ }
+}
+
+// EvalPartialNamespace returns an argument that sets the namespace to use for
+// partial evaluation results. The namespace must be a valid package path
+// component.
+func EvalPartialNamespace(ns string) EvalOption {
+ return func(e *EvalContext) {
+ e.partialNamespace = ns
+ }
+}
+
+// EvalUnknowns returns an argument that sets the values to treat as
+// unknown during partial evaluation.
+func EvalUnknowns(unknowns []string) EvalOption {
+ return func(e *EvalContext) {
+ e.unknowns = unknowns
+ }
+}
+
+// EvalDisableInlining returns an argument that adds a set of paths to exclude from
+// partial evaluation inlining.
+func EvalDisableInlining(paths []ast.Ref) EvalOption {
+ return func(e *EvalContext) {
+ e.disableInlining = paths
+ }
+}
+
+// EvalParsedUnknowns returns an argument that sets the values to treat
+// as unknown during partial evaluation.
+func EvalParsedUnknowns(unknowns []*ast.Term) EvalOption {
+ return func(e *EvalContext) {
+ e.parsedUnknowns = unknowns
+ }
+}
+
+// EvalRuleIndexing will disable indexing optimizations for the
+// evaluation. This should only be used when tracing in debug mode.
+func EvalRuleIndexing(enabled bool) EvalOption {
+ return func(e *EvalContext) {
+ e.indexing = enabled
+ }
+}
+
+// EvalEarlyExit will disable 'early exit' optimizations for the
+// evaluation. This should only be used when tracing in debug mode.
+func EvalEarlyExit(enabled bool) EvalOption {
+ return func(e *EvalContext) {
+ e.earlyExit = enabled
+ }
+}
+
+// EvalTime sets the wall clock time to use during policy evaluation.
+// time.now_ns() calls will return this value.
+func EvalTime(x time.Time) EvalOption {
+ return func(e *EvalContext) {
+ e.time = x
+ }
+}
+
+// EvalSeed sets a reader that will seed randomization required by built-in functions.
+// If a seed is not provided crypto/rand.Reader is used.
+func EvalSeed(r io.Reader) EvalOption {
+ return func(e *EvalContext) {
+ e.seed = r
+ }
+}
+
+// EvalInterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize
+// during evaluation.
+func EvalInterQueryBuiltinCache(c cache.InterQueryCache) EvalOption {
+ return func(e *EvalContext) {
+ e.interQueryBuiltinCache = c
+ }
+}
+
+// EvalInterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize
+// during evaluation.
+func EvalInterQueryBuiltinValueCache(c cache.InterQueryValueCache) EvalOption {
+ return func(e *EvalContext) {
+ e.interQueryBuiltinValueCache = c
+ }
+}
+
+// EvalNDBuiltinCache sets the non-deterministic builtin cache that built-in functions can
+// use during evaluation.
+func EvalNDBuiltinCache(c builtins.NDBCache) EvalOption {
+ return func(e *EvalContext) {
+ e.ndBuiltinCache = c
+ }
+}
+
+// EvalResolver sets a Resolver for a specified ref path for this evaluation.
+func EvalResolver(ref ast.Ref, r resolver.Resolver) EvalOption {
+ return func(e *EvalContext) {
+ e.resolvers = append(e.resolvers, refResolver{ref, r})
+ }
+}
+
+// EvalHTTPRoundTripper allows customizing the http.RoundTripper for this evaluation.
+func EvalHTTPRoundTripper(t topdown.CustomizeRoundTripper) EvalOption {
+ return func(e *EvalContext) {
+ e.httpRoundTripper = t
+ }
+}
+
+// EvalSortSets causes the evaluator to sort sets before returning them as JSON arrays.
+func EvalSortSets(yes bool) EvalOption {
+ return func(e *EvalContext) {
+ e.sortSets = yes
+ }
+}
+
+// EvalCopyMaps causes the evaluator to copy `map[string]any`s before returning them.
+func EvalCopyMaps(yes bool) EvalOption {
+ return func(e *EvalContext) {
+ e.copyMaps = yes
+ }
+}
+
+// EvalPrintHook sets the object to use for handling print statement outputs.
+func EvalPrintHook(ph print.Hook) EvalOption {
+ return func(e *EvalContext) {
+ e.printHook = ph
+ }
+}
+
+// EvalVirtualCache sets the topdown.VirtualCache to use for evaluation.
+// This is optional, and if not set, the default cache is used.
+func EvalVirtualCache(vc topdown.VirtualCache) EvalOption {
+ return func(e *EvalContext) {
+ e.virtualCache = vc
+ }
+}
+
+// EvalBaseCache sets the topdown.BaseCache to use for evaluation.
+// This is optional, and if not set, the default cache is used.
+func EvalBaseCache(bc topdown.BaseCache) EvalOption {
+ return func(e *EvalContext) {
+ e.baseCache = bc
+ }
+}
+
+// EvalNondeterministicBuiltins causes non-deterministic builtins to be evalued
+// during partial evaluation. This is needed to pull in external data, or validate
+// a JWT, during PE, so that the result informs what queries are returned.
+func EvalNondeterministicBuiltins(yes bool) EvalOption {
+ return func(e *EvalContext) {
+ e.nondeterministicBuiltins = yes
+ }
+}
+
+// EvalExternalCancel sets an external topdown.Cancel for the interpreter to use
+// for cancellation. This is useful for batch-evaluation of many rego queries.
+func EvalExternalCancel(ec topdown.Cancel) EvalOption {
+ return func(e *EvalContext) {
+ e.externalCancel = ec
+ }
+}
+
+func (pq preparedQuery) Modules() map[string]*ast.Module {
+ mods := make(map[string]*ast.Module)
+
+ maps.Copy(mods, pq.r.parsedModules)
+
+ for _, b := range pq.r.bundles {
+ for _, mod := range b.Modules {
+ mods[mod.Path] = mod.Parsed
+ }
+ }
+
+ return mods
+}
+
+// newEvalContext creates a new EvalContext overlaying any EvalOptions over top
+// the Rego object on the preparedQuery. The returned function should be called
+// once the evaluation is complete to close any transactions that might have
+// been opened.
+func (pq preparedQuery) newEvalContext(ctx context.Context, options []EvalOption) (*EvalContext, func(context.Context), error) {
+ ectx := &EvalContext{
+ hasInput: false,
+ rawInput: nil,
+ parsedInput: nil,
+ metrics: nil,
+ txn: nil,
+ instrument: false,
+ instrumentation: nil,
+ partialNamespace: pq.r.partialNamespace,
+ queryTracers: nil,
+ unknowns: pq.r.unknowns,
+ parsedUnknowns: pq.r.parsedUnknowns,
+ nondeterministicBuiltins: pq.r.nondeterministicBuiltins,
+ compiledQuery: compiledQuery{},
+ indexing: true,
+ earlyExit: true,
+ resolvers: pq.r.resolvers,
+ printHook: pq.r.printHook,
+ capabilities: pq.r.capabilities,
+ strictBuiltinErrors: pq.r.strictBuiltinErrors,
+ tracing: pq.r.distributedTracingOpts,
+ }
+
+ for _, o := range options {
+ o(ectx)
+ }
+
+ if ectx.metrics == nil {
+ ectx.metrics = metrics.New()
+ }
+
+ if ectx.instrument {
+ ectx.instrumentation = topdown.NewInstrumentation(ectx.metrics)
+ }
+
+ // Default to an empty "finish" function
+ finishFunc := func(context.Context) {}
+
+ var err error
+ ectx.disableInlining, err = parseStringsToRefs(pq.r.disableInlining)
+ if err != nil {
+ return nil, finishFunc, err
+ }
+
+ if ectx.txn == nil {
+ ectx.txn, err = pq.r.store.NewTransaction(ctx)
+ if err != nil {
+ return nil, finishFunc, err
+ }
+ finishFunc = func(ctx context.Context) {
+ pq.r.store.Abort(ctx, ectx.txn)
+ }
+ }
+
+ // If we didn't get an input specified in the Eval options
+ // then fall back to the Rego object's input fields.
+ if !ectx.hasInput {
+ ectx.rawInput = pq.r.rawInput
+ ectx.parsedInput = pq.r.parsedInput
+ }
+
+ if ectx.parsedInput == nil {
+ if ectx.rawInput == nil {
+ // Fall back to the original Rego objects input if none was specified
+ // Note that it could still be nil
+ ectx.rawInput = pq.r.rawInput
+ }
+
+ if pq.r.targetPlugin(pq.r.target) == nil && // no plugin claims this target
+ pq.r.target != targetWasm {
+ ectx.parsedInput, err = pq.r.parseRawInput(ectx.rawInput, ectx.metrics)
+ if err != nil {
+ return nil, finishFunc, err
+ }
+ }
+ }
+
+ return ectx, finishFunc, nil
+}
+
+// PreparedEvalQuery holds the prepared Rego state that has been pre-processed
+// for subsequent evaluations.
+type PreparedEvalQuery struct {
+ preparedQuery
+}
+
+// Eval evaluates this PartialResult's Rego object with additional eval options
+// and returns a ResultSet.
+// If options are provided they will override the original Rego options respective value.
+// The original Rego object transaction will *not* be re-used. A new transaction will be opened
+// if one is not provided with an EvalOption.
+func (pq PreparedEvalQuery) Eval(ctx context.Context, options ...EvalOption) (ResultSet, error) {
+ ectx, finish, err := pq.newEvalContext(ctx, options)
+ if err != nil {
+ return nil, err
+ }
+ defer finish(ctx)
+
+ ectx.compiledQuery = pq.r.compiledQueries[evalQueryType]
+
+ return pq.r.eval(ctx, ectx)
+}
+
+// PreparedPartialQuery holds the prepared Rego state that has been pre-processed
+// for partial evaluations.
+type PreparedPartialQuery struct {
+ preparedQuery
+}
+
+// Partial runs partial evaluation on the prepared query and returns the result.
+// The original Rego object transaction will *not* be re-used. A new transaction will be opened
+// if one is not provided with an EvalOption.
+func (pq PreparedPartialQuery) Partial(ctx context.Context, options ...EvalOption) (*PartialQueries, error) {
+ ectx, finish, err := pq.newEvalContext(ctx, options)
+ if err != nil {
+ return nil, err
+ }
+ defer finish(ctx)
+
+ ectx.compiledQuery = pq.r.compiledQueries[partialQueryType]
+
+ return pq.r.partial(ctx, ectx)
+}
+
+// Errors represents a collection of errors returned when evaluating Rego.
+type Errors []error
+
+func (errs Errors) Error() string {
+ if len(errs) == 0 {
+ return "no error"
+ }
+ if len(errs) == 1 {
+ return fmt.Sprintf("1 error occurred: %v", errs[0].Error())
+ }
+ buf := []string{fmt.Sprintf("%v errors occurred", len(errs))}
+ for _, err := range errs {
+ buf = append(buf, err.Error())
+ }
+ return strings.Join(buf, "\n")
+}
+
+var errPartialEvaluationNotEffective = errors.New("partial evaluation not effective")
+
+// IsPartialEvaluationNotEffectiveErr returns true if err is an error returned by
+// this package to indicate that partial evaluation was ineffective.
+func IsPartialEvaluationNotEffectiveErr(err error) bool {
+ errs, ok := err.(Errors)
+ if !ok {
+ return false
+ }
+ return len(errs) == 1 && errs[0] == errPartialEvaluationNotEffective
+}
+
+type compiledQuery struct {
+ query ast.Body
+ compiler ast.QueryCompiler
+}
+
+type queryType int
+
+// Define a query type for each of the top level Rego
+// API's that compile queries differently.
+const (
+ evalQueryType queryType = iota
+ partialResultQueryType
+ partialQueryType
+ compileQueryType
+)
+
+type loadPaths struct {
+ paths []string
+ filter loader.Filter
+}
+
+// Rego constructs a query and can be evaluated to obtain results.
+type Rego struct {
+ query string
+ parsedQuery ast.Body
+ compiledQueries map[queryType]compiledQuery
+ pkg string
+ parsedPackage *ast.Package
+ imports []string
+ parsedImports []*ast.Import
+ rawInput *any
+ parsedInput ast.Value
+ unknowns []string
+ parsedUnknowns []*ast.Term
+ disableInlining []string
+ shallowInlining bool
+ nondeterministicBuiltins bool
+ skipPartialNamespace bool
+ partialNamespace string
+ modules []rawModule
+ parsedModules map[string]*ast.Module
+ compiler *ast.Compiler
+ store storage.Store
+ ownStore bool
+ ownStoreReadAst bool
+ txn storage.Transaction
+ metrics metrics.Metrics
+ queryTracers []topdown.QueryTracer
+ tracebuf *topdown.BufferTracer
+ trace bool
+ instrumentation *topdown.Instrumentation
+ instrument bool
+ capture map[*ast.Expr]ast.Var // map exprs to generated capture vars
+ termVarID int
+ dump io.Writer
+ runtime *ast.Term
+ time time.Time
+ seed io.Reader
+ capabilities *ast.Capabilities
+ builtinDecls map[string]*ast.Builtin
+ builtinFuncs map[string]*topdown.Builtin
+ unsafeBuiltins map[string]struct{}
+ loadPaths loadPaths
+ bundlePaths []string
+ bundles map[string]*bundle.Bundle
+ skipBundleVerification bool
+ bundleActivationPlugin string
+ enableBundleLazyLoadingMode bool
+ interQueryBuiltinCache cache.InterQueryCache
+ interQueryBuiltinValueCache cache.InterQueryValueCache
+ ndBuiltinCache builtins.NDBCache
+ strictBuiltinErrors bool
+ builtinErrorList *[]topdown.Error
+ resolvers []refResolver
+ schemaSet *ast.SchemaSet
+ target string // target type (wasm, rego, etc.)
+ opa opa.EvalEngine
+ generateJSON func(*ast.Term, *EvalContext) (any, error)
+ printHook print.Hook
+ enablePrintStatements bool
+ distributedTracingOpts tracing.Options
+ strict bool
+ pluginMgr *plugins.Manager
+ plugins []TargetPlugin
+ targetPrepState TargetPluginEval
+ regoVersion ast.RegoVersion
+ compilerHook func(*ast.Compiler)
+ evalMode *ast.CompilerEvalMode
+}
+
+func (r *Rego) RegoVersion() ast.RegoVersion {
+ return r.regoVersion
+}
+
+// Function represents a built-in function that is callable in Rego.
+type Function struct {
+ Name string
+ Description string
+ Decl *types.Function
+ Memoize bool
+ Nondeterministic bool
+}
+
+// BuiltinContext contains additional attributes from the evaluator that
+// built-in functions can use, e.g., the request context.Context, caches, etc.
+type BuiltinContext = topdown.BuiltinContext
+
+type (
+ // Builtin1 defines a built-in function that accepts 1 argument.
+ Builtin1 func(bctx BuiltinContext, op1 *ast.Term) (*ast.Term, error)
+
+ // Builtin2 defines a built-in function that accepts 2 arguments.
+ Builtin2 func(bctx BuiltinContext, op1, op2 *ast.Term) (*ast.Term, error)
+
+ // Builtin3 defines a built-in function that accepts 3 argument.
+ Builtin3 func(bctx BuiltinContext, op1, op2, op3 *ast.Term) (*ast.Term, error)
+
+ // Builtin4 defines a built-in function that accepts 4 argument.
+ Builtin4 func(bctx BuiltinContext, op1, op2, op3, op4 *ast.Term) (*ast.Term, error)
+
+ // BuiltinDyn defines a built-in function that accepts a list of arguments.
+ BuiltinDyn func(bctx BuiltinContext, terms []*ast.Term) (*ast.Term, error)
+)
+
+// RegisterBuiltin1 adds a built-in function globally inside the OPA runtime.
+func RegisterBuiltin1(decl *Function, impl Builtin1) {
+ ast.RegisterBuiltin(&ast.Builtin{
+ Name: decl.Name,
+ Description: decl.Description,
+ Decl: decl.Decl,
+ Nondeterministic: decl.Nondeterministic,
+ })
+ topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0]) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// RegisterBuiltin2 adds a built-in function globally inside the OPA runtime.
+func RegisterBuiltin2(decl *Function, impl Builtin2) {
+ ast.RegisterBuiltin(&ast.Builtin{
+ Name: decl.Name,
+ Description: decl.Description,
+ Decl: decl.Decl,
+ Nondeterministic: decl.Nondeterministic,
+ })
+ topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1]) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// RegisterBuiltin3 adds a built-in function globally inside the OPA runtime.
+func RegisterBuiltin3(decl *Function, impl Builtin3) {
+ ast.RegisterBuiltin(&ast.Builtin{
+ Name: decl.Name,
+ Description: decl.Description,
+ Decl: decl.Decl,
+ Nondeterministic: decl.Nondeterministic,
+ })
+ topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1], terms[2]) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// RegisterBuiltin4 adds a built-in function globally inside the OPA runtime.
+func RegisterBuiltin4(decl *Function, impl Builtin4) {
+ ast.RegisterBuiltin(&ast.Builtin{
+ Name: decl.Name,
+ Description: decl.Description,
+ Decl: decl.Decl,
+ Nondeterministic: decl.Nondeterministic,
+ })
+ topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1], terms[2], terms[3]) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// RegisterBuiltinDyn adds a built-in function globally inside the OPA runtime.
+func RegisterBuiltinDyn(decl *Function, impl BuiltinDyn) {
+ ast.RegisterBuiltin(&ast.Builtin{
+ Name: decl.Name,
+ Description: decl.Description,
+ Decl: decl.Decl,
+ Nondeterministic: decl.Nondeterministic,
+ })
+ topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// Function1 returns an option that adds a built-in function to the Rego object.
+func Function1(decl *Function, f Builtin1) func(*Rego) {
+ return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0]) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// Function2 returns an option that adds a built-in function to the Rego object.
+func Function2(decl *Function, f Builtin2) func(*Rego) {
+ return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1]) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// Function3 returns an option that adds a built-in function to the Rego object.
+func Function3(decl *Function, f Builtin3) func(*Rego) {
+ return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1], terms[2]) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// Function4 returns an option that adds a built-in function to the Rego object.
+func Function4(decl *Function, f Builtin4) func(*Rego) {
+ return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1], terms[2], terms[3]) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// FunctionDyn returns an option that adds a built-in function to the Rego object.
+func FunctionDyn(decl *Function, f BuiltinDyn) func(*Rego) {
+ return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// FunctionDecl returns an option that adds a custom-built-in function
+// __declaration__. NO implementation is provided. This is used for
+// non-interpreter execution envs (e.g., Wasm).
+func FunctionDecl(decl *Function) func(*Rego) {
+ return newDecl(decl)
+}
+
+func newDecl(decl *Function) func(*Rego) {
+ return func(r *Rego) {
+ r.builtinDecls[decl.Name] = &ast.Builtin{
+ Name: decl.Name,
+ Decl: decl.Decl,
+ }
+ }
+}
+
+type memo struct {
+ term *ast.Term
+ err error
+}
+
+type memokey string
+
+func memoize(decl *Function, bctx BuiltinContext, terms []*ast.Term, ifEmpty func() (*ast.Term, error)) (*ast.Term, error) {
+ if !decl.Memoize {
+ return ifEmpty()
+ }
+
+ // NOTE(tsandall): we assume memoization is applied to infrequent built-in
+ // calls that do things like fetch data from remote locations. As such,
+ // converting the terms to strings is acceptable for now.
+ var b strings.Builder
+ if _, err := b.WriteString(decl.Name); err != nil {
+ return nil, err
+ }
+
+ // The term slice _may_ include an output term depending on how the caller
+ // referred to the built-in function. Only use the arguments as the cache
+ // key. Unification ensures we don't get false positive matches.
+ for i := range decl.Decl.Arity() {
+ if _, err := b.WriteString(terms[i].String()); err != nil {
+ return nil, err
+ }
+ }
+
+ key := memokey(b.String())
+ hit, ok := bctx.Cache.Get(key)
+ var m memo
+ if ok {
+ m = hit.(memo)
+ } else {
+ m.term, m.err = ifEmpty()
+ bctx.Cache.Put(key, m)
+ }
+
+ return m.term, m.err
+}
+
+// Dump returns an argument that sets the writer to dump debugging information to.
+func Dump(w io.Writer) func(r *Rego) {
+ return func(r *Rego) {
+ r.dump = w
+ }
+}
+
+// Query returns an argument that sets the Rego query.
+func Query(q string) func(r *Rego) {
+ return func(r *Rego) {
+ r.query = q
+ }
+}
+
+// ParsedQuery returns an argument that sets the Rego query.
+func ParsedQuery(q ast.Body) func(r *Rego) {
+ return func(r *Rego) {
+ r.parsedQuery = q
+ }
+}
+
+// Package returns an argument that sets the Rego package on the query's
+// context.
+func Package(p string) func(r *Rego) {
+ return func(r *Rego) {
+ r.pkg = p
+ }
+}
+
+// ParsedPackage returns an argument that sets the Rego package on the query's
+// context.
+func ParsedPackage(pkg *ast.Package) func(r *Rego) {
+ return func(r *Rego) {
+ r.parsedPackage = pkg
+ }
+}
+
+// Imports returns an argument that adds a Rego import to the query's context.
+func Imports(p []string) func(r *Rego) {
+ return func(r *Rego) {
+ r.imports = append(r.imports, p...)
+ }
+}
+
+// ParsedImports returns an argument that adds Rego imports to the query's
+// context.
+func ParsedImports(imp []*ast.Import) func(r *Rego) {
+ return func(r *Rego) {
+ r.parsedImports = append(r.parsedImports, imp...)
+ }
+}
+
+// Input returns an argument that sets the Rego input document. Input should be
+// a native Go value representing the input document.
+func Input(x any) func(r *Rego) {
+ return func(r *Rego) {
+ r.rawInput = &x
+ }
+}
+
+// ParsedInput returns an argument that sets the Rego input document.
+func ParsedInput(x ast.Value) func(r *Rego) {
+ return func(r *Rego) {
+ r.parsedInput = x
+ }
+}
+
+// Unknowns returns an argument that sets the values to treat as unknown during
+// partial evaluation.
+func Unknowns(unknowns []string) func(r *Rego) {
+ return func(r *Rego) {
+ r.unknowns = unknowns
+ }
+}
+
+// ParsedUnknowns returns an argument that sets the values to treat as unknown
+// during partial evaluation.
+func ParsedUnknowns(unknowns []*ast.Term) func(r *Rego) {
+ return func(r *Rego) {
+ r.parsedUnknowns = unknowns
+ }
+}
+
+// DisableInlining adds a set of paths to exclude from partial evaluation inlining.
+func DisableInlining(paths []string) func(r *Rego) {
+ return func(r *Rego) {
+ r.disableInlining = paths
+ }
+}
+
+// NondeterministicBuiltins causes non-deterministic builtins to be evalued during
+// partial evaluation. This is needed to pull in external data, or validate a JWT,
+// during PE, so that the result informs what queries are returned.
+func NondeterministicBuiltins(yes bool) func(r *Rego) {
+ return func(r *Rego) {
+ r.nondeterministicBuiltins = yes
+ }
+}
+
+// ShallowInlining prevents rules that depend on unknown values from being inlined.
+// Rules that only depend on known values are inlined.
+func ShallowInlining(yes bool) func(r *Rego) {
+ return func(r *Rego) {
+ r.shallowInlining = yes
+ }
+}
+
+// SkipPartialNamespace disables namespacing of partial evalution results for support
+// rules generated from policy. Synthetic support rules are still namespaced.
+func SkipPartialNamespace(yes bool) func(r *Rego) {
+ return func(r *Rego) {
+ r.skipPartialNamespace = yes
+ }
+}
+
+// PartialNamespace returns an argument that sets the namespace to use for
+// partial evaluation results. The namespace must be a valid package path
+// component.
+func PartialNamespace(ns string) func(r *Rego) {
+ return func(r *Rego) {
+ r.partialNamespace = ns
+ }
+}
+
+// Module returns an argument that adds a Rego module.
+func Module(filename, input string) func(r *Rego) {
+ return func(r *Rego) {
+ r.modules = append(r.modules, rawModule{
+ filename: filename,
+ module: input,
+ })
+ }
+}
+
+// ParsedModule returns an argument that adds a parsed Rego module. If a string
+// module with the same filename name is added, it will override the parsed
+// module.
+func ParsedModule(module *ast.Module) func(*Rego) {
+ return func(r *Rego) {
+ var filename string
+ if module.Package.Location != nil {
+ filename = module.Package.Location.File
+ } else {
+ filename = fmt.Sprintf("module_%p.rego", module)
+ }
+ r.parsedModules[filename] = module
+ }
+}
+
+// Load returns an argument that adds a filesystem path to load data
+// and Rego modules from. Any file with a *.rego, *.yaml, or *.json
+// extension will be loaded. The path can be either a directory or file,
+// directories are loaded recursively. The optional ignore string patterns
+// can be used to filter which files are used.
+// The Load option can only be used once.
+// Note: Loading files will require a write transaction on the store.
+func Load(paths []string, filter loader.Filter) func(r *Rego) {
+ return func(r *Rego) {
+ r.loadPaths = loadPaths{paths, filter}
+ }
+}
+
+// LoadBundle returns an argument that adds a filesystem path to load
+// a bundle from. The path can be a compressed bundle file or a directory
+// to be loaded as a bundle.
+// Note: Loading bundles will require a write transaction on the store.
+func LoadBundle(path string) func(r *Rego) {
+ return func(r *Rego) {
+ r.bundlePaths = append(r.bundlePaths, path)
+ }
+}
+
+// ParsedBundle returns an argument that adds a bundle to be loaded.
+func ParsedBundle(name string, b *bundle.Bundle) func(r *Rego) {
+ return func(r *Rego) {
+ r.bundles[name] = b
+ }
+}
+
+// Compiler returns an argument that sets the Rego compiler.
+func Compiler(c *ast.Compiler) func(r *Rego) {
+ return func(r *Rego) {
+ r.compiler = c
+ }
+}
+
+// Store returns an argument that sets the policy engine's data storage layer.
+//
+// If using the Load, LoadBundle, or ParsedBundle options then a transaction
+// must also be provided via the Transaction() option. After loading files
+// or bundles the transaction should be aborted or committed.
+func Store(s storage.Store) func(r *Rego) {
+ return func(r *Rego) {
+ r.store = s
+ }
+}
+
+// StoreReadAST returns an argument that sets whether the store should eagerly convert data to AST values.
+//
+// Only applicable when no store has been set on the Rego object through the Store option.
+func StoreReadAST(enabled bool) func(r *Rego) {
+ return func(r *Rego) {
+ r.ownStoreReadAst = enabled
+ }
+}
+
+// Transaction returns an argument that sets the transaction to use for storage
+// layer operations.
+//
+// Requires the store associated with the transaction to be provided via the
+// Store() option. If using Load(), LoadBundle(), or ParsedBundle() options
+// the transaction will likely require write params.
+func Transaction(txn storage.Transaction) func(r *Rego) {
+ return func(r *Rego) {
+ r.txn = txn
+ }
+}
+
+// Metrics returns an argument that sets the metrics collection.
+func Metrics(m metrics.Metrics) func(r *Rego) {
+ return func(r *Rego) {
+ r.metrics = m
+ }
+}
+
+// Instrument returns an argument that enables instrumentation for diagnosing
+// performance issues.
+func Instrument(yes bool) func(r *Rego) {
+ return func(r *Rego) {
+ r.instrument = yes
+ }
+}
+
+// Trace returns an argument that enables tracing on r.
+func Trace(yes bool) func(r *Rego) {
+ return func(r *Rego) {
+ r.trace = yes
+ }
+}
+
+// Tracer returns an argument that adds a query tracer to r.
+// Deprecated: Use QueryTracer instead.
+func Tracer(t topdown.Tracer) func(r *Rego) {
+ return func(r *Rego) {
+ if t != nil {
+ r.queryTracers = append(r.queryTracers, topdown.WrapLegacyTracer(t))
+ }
+ }
+}
+
+// QueryTracer returns an argument that adds a query tracer to r.
+func QueryTracer(t topdown.QueryTracer) func(r *Rego) {
+ return func(r *Rego) {
+ if t != nil {
+ r.queryTracers = append(r.queryTracers, t)
+ }
+ }
+}
+
+// Runtime returns an argument that sets the runtime data to provide to the
+// evaluation engine.
+func Runtime(term *ast.Term) func(r *Rego) {
+ return func(r *Rego) {
+ r.runtime = term
+ }
+}
+
+// Time sets the wall clock time to use during policy evaluation. Prepared queries
+// do not inherit this parameter. Use EvalTime to set the wall clock time when
+// executing a prepared query.
+func Time(x time.Time) func(r *Rego) {
+ return func(r *Rego) {
+ r.time = x
+ }
+}
+
+// Seed sets a reader that will seed randomization required by built-in functions.
+// If a seed is not provided crypto/rand.Reader is used.
+func Seed(r io.Reader) func(*Rego) {
+ return func(e *Rego) {
+ e.seed = r
+ }
+}
+
+// PrintTrace is a helper function to write a human-readable version of the
+// trace to the writer w.
+func PrintTrace(w io.Writer, r *Rego) {
+ if r == nil || r.tracebuf == nil {
+ return
+ }
+ topdown.PrettyTrace(w, *r.tracebuf)
+}
+
+// PrintTraceWithLocation is a helper function to write a human-readable version of the
+// trace to the writer w.
+func PrintTraceWithLocation(w io.Writer, r *Rego) {
+ if r == nil || r.tracebuf == nil {
+ return
+ }
+ topdown.PrettyTraceWithLocation(w, *r.tracebuf)
+}
+
+// UnsafeBuiltins sets the built-in functions to treat as unsafe and not allow.
+// This option is ignored for module compilation if the caller supplies the
+// compiler. This option is always honored for query compilation. Provide an
+// empty (non-nil) map to disable checks on queries.
+func UnsafeBuiltins(unsafeBuiltins map[string]struct{}) func(r *Rego) {
+ return func(r *Rego) {
+ r.unsafeBuiltins = unsafeBuiltins
+ }
+}
+
+// SkipBundleVerification skips verification of a signed bundle.
+func SkipBundleVerification(yes bool) func(r *Rego) {
+ return func(r *Rego) {
+ r.skipBundleVerification = yes
+ }
+}
+
+// BundleActivatorPlugin sets the name of the activator plugin used to load bundles into the store.
+func BundleActivatorPlugin(name string) func(r *Rego) {
+ return func(r *Rego) {
+ r.bundleActivationPlugin = name
+ }
+}
+
+// BundleLazyLoadingMode sets the bundle loading mode. If true, bundles will be
+// read in lazy mode. In this mode, data files in the bundle will not be
+// deserialized and the check to validate that the bundle data does not contain
+// paths outside the bundle's roots will not be performed while reading the bundle.
+func BundleLazyLoadingMode(yes bool) func(r *Rego) {
+ return func(r *Rego) {
+ r.enableBundleLazyLoadingMode = yes
+ }
+}
+
+// InterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize
+// during evaluation.
+func InterQueryBuiltinCache(c cache.InterQueryCache) func(r *Rego) {
+ return func(r *Rego) {
+ r.interQueryBuiltinCache = c
+ }
+}
+
+// InterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize
+// during evaluation.
+func InterQueryBuiltinValueCache(c cache.InterQueryValueCache) func(r *Rego) {
+ return func(r *Rego) {
+ r.interQueryBuiltinValueCache = c
+ }
+}
+
+// NDBuiltinCache sets the non-deterministic builtins cache.
+func NDBuiltinCache(c builtins.NDBCache) func(r *Rego) {
+ return func(r *Rego) {
+ r.ndBuiltinCache = c
+ }
+}
+
+// StrictBuiltinErrors tells the evaluator to treat all built-in function errors as fatal errors.
+func StrictBuiltinErrors(yes bool) func(r *Rego) {
+ return func(r *Rego) {
+ r.strictBuiltinErrors = yes
+ }
+}
+
+// BuiltinErrorList supplies an error slice to store built-in function errors.
+func BuiltinErrorList(list *[]topdown.Error) func(r *Rego) {
+ return func(r *Rego) {
+ r.builtinErrorList = list
+ }
+}
+
+// Resolver sets a Resolver for a specified ref path.
+func Resolver(ref ast.Ref, r resolver.Resolver) func(r *Rego) {
+ return func(rego *Rego) {
+ rego.resolvers = append(rego.resolvers, refResolver{ref, r})
+ }
+}
+
+// Schemas sets the schemaSet
+func Schemas(x *ast.SchemaSet) func(r *Rego) {
+ return func(r *Rego) {
+ r.schemaSet = x
+ }
+}
+
+// Capabilities configures the underlying compiler's capabilities.
+// This option is ignored for module compilation if the caller supplies the
+// compiler.
+func Capabilities(c *ast.Capabilities) func(r *Rego) {
+ return func(r *Rego) {
+ r.capabilities = c
+ }
+}
+
+// Target sets the runtime to exercise.
+func Target(t string) func(r *Rego) {
+ return func(r *Rego) {
+ r.target = t
+ }
+}
+
+// GenerateJSON sets the AST to JSON converter for the results.
+func GenerateJSON(f func(*ast.Term, *EvalContext) (any, error)) func(r *Rego) {
+ return func(r *Rego) {
+ r.generateJSON = f
+ }
+}
+
+// PrintHook sets the object to use for handling print statement outputs.
+func PrintHook(h print.Hook) func(r *Rego) {
+ return func(r *Rego) {
+ r.printHook = h
+ }
+}
+
+// DistributedTracingOpts sets the options to be used by distributed tracing.
+func DistributedTracingOpts(tr tracing.Options) func(r *Rego) {
+ return func(r *Rego) {
+ r.distributedTracingOpts = tr
+ }
+}
+
+// EnablePrintStatements enables print() calls. If this option is not provided,
+// print() calls will be erased from the policy. This option only applies to
+// queries and policies that passed as raw strings, i.e., this function will not
+// have any affect if the caller supplies the ast.Compiler instance.
+func EnablePrintStatements(yes bool) func(r *Rego) {
+ return func(r *Rego) {
+ r.enablePrintStatements = yes
+ }
+}
+
+// Strict enables or disables strict-mode in the compiler
+func Strict(yes bool) func(r *Rego) {
+ return func(r *Rego) {
+ r.strict = yes
+ }
+}
+
+func SetRegoVersion(version ast.RegoVersion) func(r *Rego) {
+ return func(r *Rego) {
+ r.regoVersion = version
+ }
+}
+
+// CompilerHook sets a hook function that will be called after the compiler is initialized.
+// This is only called if the compiler has not been provided already.
+func CompilerHook(hook func(*ast.Compiler)) func(r *Rego) {
+ return func(r *Rego) {
+ r.compilerHook = hook
+ }
+}
+
+// EvalMode lets you override the evaluation mode.
+func EvalMode(mode ast.CompilerEvalMode) func(r *Rego) {
+ return func(r *Rego) {
+ r.evalMode = &mode
+ }
+}
+
+// New returns a new Rego object.
+func New(options ...func(r *Rego)) *Rego {
+ r := &Rego{
+ parsedModules: map[string]*ast.Module{},
+ capture: map[*ast.Expr]ast.Var{},
+ compiledQueries: map[queryType]compiledQuery{},
+ builtinDecls: map[string]*ast.Builtin{},
+ builtinFuncs: map[string]*topdown.Builtin{},
+ bundles: map[string]*bundle.Bundle{},
+ }
+
+ for _, option := range options {
+ option(r)
+ }
+
+ callHook := r.compiler == nil // call hook only if we created the compiler here
+
+ if r.compiler == nil {
+ r.compiler = ast.NewCompiler().
+ WithUnsafeBuiltins(r.unsafeBuiltins).
+ WithBuiltins(r.builtinDecls).
+ WithDebug(r.dump).
+ WithSchemas(r.schemaSet).
+ WithCapabilities(r.capabilities).
+ WithEnablePrintStatements(r.enablePrintStatements).
+ WithStrict(r.strict).
+ WithUseTypeCheckAnnotations(true)
+
+ // topdown could be target "" or "rego", but both could be overridden by
+ // a target plugin (checked below)
+ if r.target == targetWasm {
+ r.compiler = r.compiler.WithEvalMode(ast.EvalModeIR)
+ }
+
+ if r.regoVersion != ast.RegoUndefined {
+ r.compiler = r.compiler.WithDefaultRegoVersion(r.regoVersion)
+ }
+ }
+
+ if r.store == nil {
+ if bundle.HasExtension() {
+ r.store = bundle.BundleExtStore()
+ } else {
+ r.store = inmem.NewWithOpts(inmem.OptReturnASTValuesOnRead(r.ownStoreReadAst))
+ }
+ r.ownStore = true
+ } else {
+ r.ownStore = false
+ }
+
+ if r.metrics == nil {
+ r.metrics = metrics.New()
+ }
+
+ if r.instrument {
+ r.instrumentation = topdown.NewInstrumentation(r.metrics)
+ r.compiler.WithMetrics(r.metrics)
+ }
+
+ if r.trace {
+ r.tracebuf = topdown.NewBufferTracer()
+ r.queryTracers = append(r.queryTracers, r.tracebuf)
+ }
+
+ if r.partialNamespace == "" {
+ r.partialNamespace = defaultPartialNamespace
+ }
+
+ if r.generateJSON == nil {
+ r.generateJSON = generateJSON
+ }
+
+ if r.pluginMgr != nil {
+ for _, pluginName := range r.pluginMgr.Plugins() {
+ p := r.pluginMgr.Plugin(pluginName)
+ if p0, ok := p.(TargetPlugin); ok {
+ r.plugins = append(r.plugins, p0)
+ }
+ }
+ }
+
+ if t := r.targetPlugin(r.target); t != nil {
+ r.compiler = r.compiler.WithEvalMode(ast.EvalModeIR)
+ }
+
+ if r.evalMode != nil {
+ r.compiler = r.compiler.WithEvalMode(*r.evalMode)
+ }
+
+ if r.compilerHook != nil && callHook {
+ r.compilerHook(r.compiler)
+ }
+
+ return r
+}
+
+// Eval evaluates this Rego object and returns a ResultSet.
+func (r *Rego) Eval(ctx context.Context) (ResultSet, error) {
+ var err error
+ var txnClose transactionCloser
+ r.txn, txnClose, err = r.getTxn(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ pq, err := r.PrepareForEval(ctx)
+ if err != nil {
+ _ = txnClose(ctx, err) // Ignore error
+ return nil, err
+ }
+
+ evalArgs := []EvalOption{
+ EvalTransaction(r.txn),
+ EvalMetrics(r.metrics),
+ EvalInstrument(r.instrument),
+ EvalTime(r.time),
+ EvalInterQueryBuiltinCache(r.interQueryBuiltinCache),
+ EvalInterQueryBuiltinValueCache(r.interQueryBuiltinValueCache),
+ EvalSeed(r.seed),
+ }
+
+ if r.ndBuiltinCache != nil {
+ evalArgs = append(evalArgs, EvalNDBuiltinCache(r.ndBuiltinCache))
+ }
+
+ for _, qt := range r.queryTracers {
+ evalArgs = append(evalArgs, EvalQueryTracer(qt))
+ }
+
+ for i := range r.resolvers {
+ evalArgs = append(evalArgs, EvalResolver(r.resolvers[i].ref, r.resolvers[i].r))
+ }
+
+ rs, err := pq.Eval(ctx, evalArgs...)
+ txnErr := txnClose(ctx, err) // Always call closer
+ if err == nil {
+ err = txnErr
+ }
+ return rs, err
+}
+
+// PartialEval has been deprecated and renamed to PartialResult.
+func (r *Rego) PartialEval(ctx context.Context) (PartialResult, error) {
+ return r.PartialResult(ctx)
+}
+
+// PartialResult partially evaluates this Rego object and returns a PartialResult.
+func (r *Rego) PartialResult(ctx context.Context) (PartialResult, error) {
+ var err error
+ var txnClose transactionCloser
+ r.txn, txnClose, err = r.getTxn(ctx)
+ if err != nil {
+ return PartialResult{}, err
+ }
+
+ pq, err := r.PrepareForEval(ctx, WithPartialEval())
+ txnErr := txnClose(ctx, err) // Always call closer
+ if err != nil {
+ return PartialResult{}, err
+ }
+ if txnErr != nil {
+ return PartialResult{}, txnErr
+ }
+
+ pr := PartialResult{
+ compiler: pq.r.compiler,
+ store: pq.r.store,
+ body: pq.r.parsedQuery,
+ builtinDecls: pq.r.builtinDecls,
+ builtinFuncs: pq.r.builtinFuncs,
+ }
+
+ return pr, nil
+}
+
+// Partial runs partial evaluation on r and returns the result.
+func (r *Rego) Partial(ctx context.Context) (*PartialQueries, error) {
+ var err error
+ var txnClose transactionCloser
+ r.txn, txnClose, err = r.getTxn(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ pq, err := r.PrepareForPartial(ctx)
+ if err != nil {
+ _ = txnClose(ctx, err) // Ignore error
+ return nil, err
+ }
+
+ evalArgs := []EvalOption{
+ EvalTransaction(r.txn),
+ EvalMetrics(r.metrics),
+ EvalInstrument(r.instrument),
+ EvalInterQueryBuiltinCache(r.interQueryBuiltinCache),
+ EvalInterQueryBuiltinValueCache(r.interQueryBuiltinValueCache),
+ }
+
+ if r.ndBuiltinCache != nil {
+ evalArgs = append(evalArgs, EvalNDBuiltinCache(r.ndBuiltinCache))
+ }
+
+ for _, t := range r.queryTracers {
+ evalArgs = append(evalArgs, EvalQueryTracer(t))
+ }
+
+ for i := range r.resolvers {
+ evalArgs = append(evalArgs, EvalResolver(r.resolvers[i].ref, r.resolvers[i].r))
+ }
+
+ pqs, err := pq.Partial(ctx, evalArgs...)
+ txnErr := txnClose(ctx, err) // Always call closer
+ if err == nil {
+ err = txnErr
+ }
+ return pqs, err
+}
+
+// CompileOption defines a function to set options on Compile calls.
+type CompileOption func(*CompileContext)
+
+// CompileContext contains options for Compile calls.
+type CompileContext struct {
+ partial bool
+}
+
+// CompilePartial defines an option to control whether partial evaluation is run
+// before the query is planned and compiled.
+func CompilePartial(yes bool) CompileOption {
+ return func(cfg *CompileContext) {
+ cfg.partial = yes
+ }
+}
+
+// Compile returns a compiled policy query.
+func (r *Rego) Compile(ctx context.Context, opts ...CompileOption) (*CompileResult, error) {
+ var cfg CompileContext
+
+ for _, opt := range opts {
+ opt(&cfg)
+ }
+
+ var queries []ast.Body
+ modules := make([]*ast.Module, 0, len(r.compiler.Modules))
+
+ if cfg.partial {
+
+ pq, err := r.Partial(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if r.dump != nil {
+ if len(pq.Queries) != 0 {
+ msg := fmt.Sprintf("QUERIES (%d total):", len(pq.Queries))
+ fmt.Fprintln(r.dump, msg)
+ fmt.Fprintln(r.dump, strings.Repeat("-", len(msg)))
+ for i := range pq.Queries {
+ fmt.Println(pq.Queries[i])
+ }
+ fmt.Fprintln(r.dump)
+ }
+ if len(pq.Support) != 0 {
+ msg := fmt.Sprintf("SUPPORT (%d total):", len(pq.Support))
+ fmt.Fprintln(r.dump, msg)
+ fmt.Fprintln(r.dump, strings.Repeat("-", len(msg)))
+ for i := range pq.Support {
+ fmt.Println(pq.Support[i])
+ }
+ fmt.Fprintln(r.dump)
+ }
+ }
+
+ queries = pq.Queries
+ modules = pq.Support
+
+ for _, module := range r.compiler.Modules {
+ modules = append(modules, module)
+ }
+ } else {
+ var err error
+ // If creating a new transaction it should be closed before calling the
+ // planner to avoid holding open the transaction longer than needed.
+ //
+ // TODO(tsandall): in future, planner could make use of store, in which
+ // case this will need to change.
+ var txnClose transactionCloser
+ r.txn, txnClose, err = r.getTxn(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = r.prepare(ctx, compileQueryType, nil)
+ txnErr := txnClose(ctx, err) // Always call closer
+ if err != nil {
+ return nil, err
+ }
+ if txnErr != nil {
+ return nil, err
+ }
+
+ for _, module := range r.compiler.Modules {
+ modules = append(modules, module)
+ }
+
+ queries = []ast.Body{r.compiledQueries[compileQueryType].query}
+ }
+
+ if tgt := r.targetPlugin(r.target); tgt != nil {
+ return nil, errors.New("unsupported for rego target plugins")
+ }
+
+ return r.compileWasm(modules, queries, compileQueryType) // TODO(sr) control flow is funky here
+}
+
+func (r *Rego) compileWasm(_ []*ast.Module, queries []ast.Body, qType queryType) (*CompileResult, error) {
+ policy, err := r.planQuery(queries, qType)
+ if err != nil {
+ return nil, err
+ }
+
+ m, err := wasm.New().WithPolicy(policy).Compile()
+ if err != nil {
+ return nil, err
+ }
+
+ var out bytes.Buffer
+ if err := encoding.WriteModule(&out, m); err != nil {
+ return nil, err
+ }
+
+ return &CompileResult{
+ Bytes: out.Bytes(),
+ }, nil
+}
+
+// PrepareOption defines a function to set an option to control
+// the behavior of the Prepare call.
+type PrepareOption func(*PrepareConfig)
+
+// PrepareConfig holds settings to control the behavior of the
+// Prepare call.
+type PrepareConfig struct {
+ doPartialEval bool
+ disableInlining *[]string
+ builtinFuncs map[string]*topdown.Builtin
+}
+
+// WithPartialEval configures an option for PrepareForEval
+// which will have it perform partial evaluation while preparing
+// the query (similar to rego.Rego#PartialResult)
+func WithPartialEval() PrepareOption {
+ return func(p *PrepareConfig) {
+ p.doPartialEval = true
+ }
+}
+
+// WithNoInline adds a set of paths to exclude from partial evaluation inlining.
+func WithNoInline(paths []string) PrepareOption {
+ return func(p *PrepareConfig) {
+ p.disableInlining = &paths
+ }
+}
+
+// WithBuiltinFuncs carries the rego.Function{1,2,3} per-query function definitions
+// to the target plugins.
+func WithBuiltinFuncs(bis map[string]*topdown.Builtin) PrepareOption {
+ return func(p *PrepareConfig) {
+ if p.builtinFuncs == nil {
+ p.builtinFuncs = maps.Clone(bis)
+ } else {
+ maps.Copy(p.builtinFuncs, bis)
+ }
+ }
+}
+
+// BuiltinFuncs allows retrieving the builtin funcs set via PrepareOption
+// WithBuiltinFuncs.
+func (p *PrepareConfig) BuiltinFuncs() map[string]*topdown.Builtin {
+ return p.builtinFuncs
+}
+
+// PrepareForEval will parse inputs, modules, and query arguments in preparation
+// of evaluating them.
+func (r *Rego) PrepareForEval(ctx context.Context, opts ...PrepareOption) (PreparedEvalQuery, error) {
+ if !r.hasQuery() {
+ return PreparedEvalQuery{}, errors.New("cannot evaluate empty query")
+ }
+
+ pCfg := &PrepareConfig{}
+ for _, o := range opts {
+ o(pCfg)
+ }
+
+ var err error
+ var txnClose transactionCloser
+ r.txn, txnClose, err = r.getTxn(ctx)
+ if err != nil {
+ return PreparedEvalQuery{}, err
+ }
+
+ // If the caller wanted to do partial evaluation as part of preparation
+ // do it now and use the new Rego object.
+ if pCfg.doPartialEval {
+
+ pr, err := r.partialResult(ctx, pCfg)
+ if err != nil {
+ _ = txnClose(ctx, err) // Ignore error
+ return PreparedEvalQuery{}, err
+ }
+
+ // Prepare the new query using the result of partial evaluation
+ pq, err := pr.Rego(Transaction(r.txn)).PrepareForEval(ctx)
+ txnErr := txnClose(ctx, err)
+ if err != nil {
+ return pq, err
+ }
+ return pq, txnErr
+ }
+
+ err = r.prepare(ctx, evalQueryType, []extraStage{
+ {
+ after: "ResolveRefs",
+ stage: ast.QueryCompilerStageDefinition{
+ Name: "RewriteToCaptureValue",
+ MetricName: "query_compile_stage_rewrite_to_capture_value",
+ Stage: r.rewriteQueryToCaptureValue,
+ },
+ },
+ })
+ if err != nil {
+ _ = txnClose(ctx, err) // Ignore error
+ return PreparedEvalQuery{}, err
+ }
+
+ switch r.target {
+ case targetWasm: // TODO(sr): make wasm a target plugin, too
+
+ if r.hasWasmModule() {
+ _ = txnClose(ctx, err) // Ignore error
+ return PreparedEvalQuery{}, errors.New("wasm target not supported")
+ }
+
+ var modules []*ast.Module
+ for _, module := range r.compiler.Modules {
+ modules = append(modules, module)
+ }
+
+ queries := []ast.Body{r.compiledQueries[evalQueryType].query}
+
+ e, err := opa.LookupEngine(targetWasm)
+ if err != nil {
+ return PreparedEvalQuery{}, err
+ }
+
+ // nolint: staticcheck // SA4006 false positive
+ cr, err := r.compileWasm(modules, queries, evalQueryType)
+ if err != nil {
+ _ = txnClose(ctx, err) // Ignore error
+ return PreparedEvalQuery{}, err
+ }
+
+ // nolint: staticcheck // SA4006 false positive
+ data, err := r.store.Read(ctx, r.txn, storage.RootPath)
+ if err != nil {
+ _ = txnClose(ctx, err) // Ignore error
+ return PreparedEvalQuery{}, err
+ }
+
+ o, err := e.New().WithPolicyBytes(cr.Bytes).WithDataJSON(data).Init()
+ if err != nil {
+ _ = txnClose(ctx, err) // Ignore error
+ return PreparedEvalQuery{}, err
+ }
+ r.opa = o
+
+ case targetRego: // do nothing, don't lookup default plugin
+ default: // either a specific plugin target, or one that is default
+ if tgt := r.targetPlugin(r.target); tgt != nil {
+ queries := []ast.Body{r.compiledQueries[evalQueryType].query}
+ pol, err := r.planQuery(queries, evalQueryType)
+ if err != nil {
+ return PreparedEvalQuery{}, err
+ }
+ // always add the builtins provided via rego.FunctionN options
+ opts = append(opts, WithBuiltinFuncs(r.builtinFuncs))
+ r.targetPrepState, err = tgt.PrepareForEval(ctx, pol, opts...)
+ if err != nil {
+ return PreparedEvalQuery{}, err
+ }
+ }
+ }
+
+ txnErr := txnClose(ctx, err) // Always call closer
+ if txnErr != nil {
+ return PreparedEvalQuery{}, txnErr
+ }
+
+ return PreparedEvalQuery{preparedQuery{r, pCfg}}, err
+}
+
+// PrepareForPartial will parse inputs, modules, and query arguments in preparation
+// of partially evaluating them.
+func (r *Rego) PrepareForPartial(ctx context.Context, opts ...PrepareOption) (PreparedPartialQuery, error) {
+ if !r.hasQuery() {
+ return PreparedPartialQuery{}, errors.New("cannot evaluate empty query")
+ }
+
+ pCfg := &PrepareConfig{}
+ for _, o := range opts {
+ o(pCfg)
+ }
+
+ var err error
+ var txnClose transactionCloser
+ r.txn, txnClose, err = r.getTxn(ctx)
+ if err != nil {
+ return PreparedPartialQuery{}, err
+ }
+
+ err = r.prepare(ctx, partialQueryType, []extraStage{
+ {
+ after: "CheckSafety",
+ stage: ast.QueryCompilerStageDefinition{
+ Name: "RewriteEquals",
+ MetricName: "query_compile_stage_rewrite_equals",
+ Stage: r.rewriteEqualsForPartialQueryCompile,
+ },
+ },
+ })
+ txnErr := txnClose(ctx, err) // Always call closer
+ if err != nil {
+ return PreparedPartialQuery{}, err
+ }
+ if txnErr != nil {
+ return PreparedPartialQuery{}, txnErr
+ }
+
+ return PreparedPartialQuery{preparedQuery{r, pCfg}}, err
+}
+
+func (r *Rego) prepare(ctx context.Context, qType queryType, extras []extraStage) error {
+ var err error
+
+ r.parsedInput, err = r.parseInput()
+ if err != nil {
+ return err
+ }
+
+ err = r.loadFiles(ctx, r.txn, r.metrics)
+ if err != nil {
+ return err
+ }
+
+ err = r.loadBundles(ctx, r.txn, r.metrics)
+ if err != nil {
+ return err
+ }
+
+ err = r.parseModules(ctx, r.txn, r.metrics)
+ if err != nil {
+ return err
+ }
+
+ // Compile the modules *before* the query, else functions
+ // defined in the module won't be found...
+ err = r.compileModules(ctx, r.txn, r.metrics)
+ if err != nil {
+ return err
+ }
+
+ imports, err := r.prepareImports()
+ if err != nil {
+ return err
+ }
+
+ queryImports := []*ast.Import{}
+ for _, imp := range imports {
+ path := imp.Path.Value.(ast.Ref)
+ if path.HasPrefix([]*ast.Term{ast.FutureRootDocument}) || path.HasPrefix([]*ast.Term{ast.RegoRootDocument}) {
+ queryImports = append(queryImports, imp)
+ }
+ }
+
+ r.parsedQuery, err = r.parseQuery(queryImports, r.metrics)
+ if err != nil {
+ return err
+ }
+
+ err = r.compileAndCacheQuery(qType, r.parsedQuery, imports, r.metrics, extras)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *Rego) parseModules(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error {
+ if len(r.modules) == 0 {
+ return nil
+ }
+
+ ids, err := r.store.ListPolicies(ctx, txn)
+ if err != nil {
+ return err
+ }
+
+ m.Timer(metrics.RegoModuleParse).Start()
+ defer m.Timer(metrics.RegoModuleParse).Stop()
+ var errs Errors
+
+ popts := ast.ParserOptions{
+ RegoVersion: r.regoVersion,
+ Capabilities: r.capabilities,
+ }
+
+ // Parse any modules that are saved to the store, but only if
+ // another compile step is going to occur (ie. we have parsed modules
+ // that need to be compiled).
+ for _, id := range ids {
+ // if it is already on the compiler we're using
+ // then don't bother to re-parse it from source
+ if _, haveMod := r.compiler.Modules[id]; haveMod {
+ continue
+ }
+
+ bs, err := r.store.GetPolicy(ctx, txn, id)
+ if err != nil {
+ return err
+ }
+
+ parsed, err := ast.ParseModuleWithOpts(id, string(bs), popts)
+ if err != nil {
+ errs = append(errs, err)
+ }
+
+ r.parsedModules[id] = parsed
+ }
+
+ // Parse any passed in as arguments to the Rego object
+ for _, module := range r.modules {
+ p, err := module.ParseWithOpts(popts)
+ if err != nil {
+ switch errorWithType := err.(type) {
+ case ast.Errors:
+ for _, e := range errorWithType {
+ errs = append(errs, e)
+ }
+ default:
+ errs = append(errs, errorWithType)
+ }
+ }
+ r.parsedModules[module.filename] = p
+ }
+
+ if len(errs) > 0 {
+ return errs
+ }
+
+ return nil
+}
+
+func (r *Rego) loadFiles(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error {
+ if len(r.loadPaths.paths) == 0 {
+ return nil
+ }
+
+ m.Timer(metrics.RegoLoadFiles).Start()
+ defer m.Timer(metrics.RegoLoadFiles).Stop()
+
+ result, err := loader.NewFileLoader().
+ WithMetrics(m).
+ WithProcessAnnotation(true).
+ WithBundleLazyLoadingMode(bundle.HasExtension()).
+ WithRegoVersion(r.regoVersion).
+ WithCapabilities(r.capabilities).
+ Filtered(r.loadPaths.paths, r.loadPaths.filter)
+ if err != nil {
+ return err
+ }
+ for name, mod := range result.Modules {
+ r.parsedModules[name] = mod.Parsed
+ }
+
+ if len(result.Documents) > 0 {
+ err = r.store.Write(ctx, txn, storage.AddOp, storage.RootPath, result.Documents)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *Rego) loadBundles(_ context.Context, _ storage.Transaction, m metrics.Metrics) error {
+ if len(r.bundlePaths) == 0 {
+ return nil
+ }
+
+ m.Timer(metrics.RegoLoadBundles).Start()
+ defer m.Timer(metrics.RegoLoadBundles).Stop()
+
+ for _, path := range r.bundlePaths {
+ bndl, err := loader.NewFileLoader().
+ WithMetrics(m).
+ WithProcessAnnotation(true).
+ WithBundleLazyLoadingMode(bundle.HasExtension()).
+ WithSkipBundleVerification(r.skipBundleVerification).
+ WithRegoVersion(r.regoVersion).
+ WithCapabilities(r.capabilities).
+ AsBundle(path)
+ if err != nil {
+ return fmt.Errorf("loading error: %s", err)
+ }
+ r.bundles[path] = bndl
+ }
+ return nil
+}
+
+func (r *Rego) parseInput() (ast.Value, error) {
+ if r.parsedInput != nil {
+ return r.parsedInput, nil
+ }
+ return r.parseRawInput(r.rawInput, r.metrics)
+}
+
+func (*Rego) parseRawInput(rawInput *any, m metrics.Metrics) (ast.Value, error) {
+ var input ast.Value
+
+ if rawInput == nil {
+ return input, nil
+ }
+
+ m.Timer(metrics.RegoInputParse).Start()
+ defer m.Timer(metrics.RegoInputParse).Stop()
+
+ rawPtr := util.Reference(rawInput)
+
+ // roundtrip through json: this turns slices (e.g. []string, []bool) into
+ // []any, the only array type ast.InterfaceToValue can work with
+ if err := util.RoundTrip(rawPtr); err != nil {
+ return nil, err
+ }
+
+ return ast.InterfaceToValue(*rawPtr)
+}
+
+func (r *Rego) parseQuery(queryImports []*ast.Import, m metrics.Metrics) (ast.Body, error) {
+ if r.parsedQuery != nil {
+ return r.parsedQuery, nil
+ }
+
+ m.Timer(metrics.RegoQueryParse).Start()
+ defer m.Timer(metrics.RegoQueryParse).Stop()
+
+ popts, err := future.ParserOptionsFromFutureImports(queryImports)
+ if err != nil {
+ return nil, err
+ }
+ popts.RegoVersion = r.regoVersion
+ popts, err = parserOptionsFromRegoVersionImport(queryImports, popts)
+ if err != nil {
+ return nil, err
+ }
+ popts.SkipRules = true
+ popts.Capabilities = r.capabilities
+
+ return ast.ParseBodyWithOpts(r.query, popts)
+}
+
+func parserOptionsFromRegoVersionImport(imports []*ast.Import, popts ast.ParserOptions) (ast.ParserOptions, error) {
+ for _, imp := range imports {
+ path := imp.Path.Value.(ast.Ref)
+ if ast.Compare(path, ast.RegoV1CompatibleRef) == 0 {
+ popts.RegoVersion = ast.RegoV1
+ return popts, nil
+ }
+ }
+ return popts, nil
+}
+
+func (r *Rego) compileModules(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error {
+ // Only compile again if there are new modules.
+ if len(r.bundles) > 0 || len(r.parsedModules) > 0 {
+
+ // The bundle.Activate call will activate any bundles passed in
+ // (ie compile + handle data store changes), and include any of
+ // the additional modules passed in. If no bundles are provided
+ // it will only compile the passed in modules.
+ // Use this as the single-point of compiling everything only a
+ // single time.
+ opts := &bundle.ActivateOpts{
+ Ctx: ctx,
+ Store: r.store,
+ Txn: txn,
+ Compiler: r.compilerForTxn(ctx, r.store, txn),
+ Metrics: m,
+ Bundles: r.bundles,
+ ExtraModules: r.parsedModules,
+ ParserOptions: ast.ParserOptions{RegoVersion: r.regoVersion},
+ }
+ err := bundle.Activate(opts)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Ensure all configured resolvers from the store are loaded. Skip if any were explicitly provided.
+ if len(r.resolvers) == 0 {
+ resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, r.store, txn, r.bundles)
+ if err != nil {
+ return err
+ }
+
+ for _, rslvr := range resolvers {
+ for _, ep := range rslvr.Entrypoints() {
+ r.resolvers = append(r.resolvers, refResolver{ep, rslvr})
+ }
+ }
+ }
+ return nil
+}
+
+func (r *Rego) compileAndCacheQuery(qType queryType, query ast.Body, imports []*ast.Import, m metrics.Metrics, extras []extraStage) error {
+ m.Timer(metrics.RegoQueryCompile).Start()
+ defer m.Timer(metrics.RegoQueryCompile).Stop()
+
+ cachedQuery, ok := r.compiledQueries[qType]
+ if ok && cachedQuery.query != nil && cachedQuery.compiler != nil {
+ return nil
+ }
+
+ qc, compiled, err := r.compileQuery(query, imports, m, extras)
+ if err != nil {
+ return err
+ }
+
+ // cache the query for future use
+ r.compiledQueries[qType] = compiledQuery{
+ query: compiled,
+ compiler: qc,
+ }
+ return nil
+}
+
+func (r *Rego) prepareImports() ([]*ast.Import, error) {
+ imports := r.parsedImports
+
+ if len(r.imports) > 0 {
+ s := make([]string, len(r.imports))
+ for i := range r.imports {
+ s[i] = fmt.Sprintf("import %v", r.imports[i])
+ }
+ parsed, err := ast.ParseImports(strings.Join(s, "\n"))
+ if err != nil {
+ return nil, err
+ }
+ imports = append(imports, parsed...)
+ }
+ return imports, nil
+}
+
+func (r *Rego) compileQuery(query ast.Body, imports []*ast.Import, _ metrics.Metrics, extras []extraStage) (ast.QueryCompiler, ast.Body, error) {
+ var pkg *ast.Package
+
+ if r.pkg != "" {
+ var err error
+ pkg, err = ast.ParsePackage(fmt.Sprintf("package %v", r.pkg))
+ if err != nil {
+ return nil, nil, err
+ }
+ } else {
+ pkg = r.parsedPackage
+ }
+
+ qctx := ast.NewQueryContext().
+ WithPackage(pkg).
+ WithImports(imports)
+
+ qc := r.compiler.QueryCompiler().
+ WithContext(qctx).
+ WithUnsafeBuiltins(r.unsafeBuiltins).
+ WithEnablePrintStatements(r.enablePrintStatements).
+ WithStrict(false)
+
+ for _, extra := range extras {
+ qc = qc.WithStageAfter(extra.after, extra.stage)
+ }
+
+ compiled, err := qc.Compile(query)
+
+ return qc, compiled, err
+}
+
+func (r *Rego) eval(ctx context.Context, ectx *EvalContext) (ResultSet, error) {
+ switch {
+ case r.targetPrepState != nil: // target plugin flow
+ var val ast.Value
+ if r.runtime != nil {
+ val = r.runtime.Value
+ }
+ s, err := r.targetPrepState.Eval(ctx, ectx, val)
+ if err != nil {
+ return nil, err
+ }
+ return r.valueToQueryResult(s, ectx)
+ case r.target == targetWasm:
+ return r.evalWasm(ctx, ectx)
+ case r.target == targetRego: // continue
+ }
+
+ q := topdown.NewQuery(ectx.compiledQuery.query).
+ WithQueryCompiler(ectx.compiledQuery.compiler).
+ WithCompiler(r.compiler).
+ WithStore(r.store).
+ WithTransaction(ectx.txn).
+ WithBuiltins(r.builtinFuncs).
+ WithMetrics(ectx.metrics).
+ WithInstrumentation(ectx.instrumentation).
+ WithRuntime(r.runtime).
+ WithIndexing(ectx.indexing).
+ WithEarlyExit(ectx.earlyExit).
+ WithInterQueryBuiltinCache(ectx.interQueryBuiltinCache).
+ WithInterQueryBuiltinValueCache(ectx.interQueryBuiltinValueCache).
+ WithStrictBuiltinErrors(r.strictBuiltinErrors).
+ WithBuiltinErrorList(r.builtinErrorList).
+ WithSeed(ectx.seed).
+ WithPrintHook(ectx.printHook).
+ WithDistributedTracingOpts(r.distributedTracingOpts).
+ WithVirtualCache(ectx.virtualCache).
+ WithBaseCache(ectx.baseCache)
+
+ if !ectx.time.IsZero() {
+ q = q.WithTime(ectx.time)
+ }
+
+ if ectx.ndBuiltinCache != nil {
+ q = q.WithNDBuiltinCache(ectx.ndBuiltinCache)
+ }
+
+ for i := range ectx.queryTracers {
+ q = q.WithQueryTracer(ectx.queryTracers[i])
+ }
+
+ if ectx.parsedInput != nil {
+ q = q.WithInput(ast.NewTerm(ectx.parsedInput))
+ }
+
+ if ectx.httpRoundTripper != nil {
+ q = q.WithHTTPRoundTripper(ectx.httpRoundTripper)
+ }
+
+ for i := range ectx.resolvers {
+ q = q.WithResolver(ectx.resolvers[i].ref, ectx.resolvers[i].r)
+ }
+
+ // Cancel query if context is cancelled or deadline is reached.
+ if ectx.externalCancel == nil {
+ // Create a one-off goroutine to handle cancellation for this query.
+ c := topdown.NewCancel()
+ q = q.WithCancel(c)
+ exit := make(chan struct{})
+ defer close(exit)
+ go waitForDone(ctx, exit, func() {
+ c.Cancel()
+ })
+ } else {
+ // Query cancellation is being handled elsewhere.
+ q = q.WithCancel(ectx.externalCancel)
+ }
+
+ var rs ResultSet
+ err := q.Iter(ctx, func(qr topdown.QueryResult) error {
+ result, err := r.generateResult(qr, ectx)
+ if err != nil {
+ return err
+ }
+ rs = append(rs, result)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if len(rs) == 0 {
+ return nil, nil
+ }
+
+ return rs, nil
+}
+
+func (r *Rego) evalWasm(ctx context.Context, ectx *EvalContext) (ResultSet, error) {
+ input := ectx.rawInput
+ if ectx.parsedInput != nil {
+ i := any(ectx.parsedInput)
+ input = &i
+ }
+ result, err := r.opa.Eval(ctx, opa.EvalOpts{
+ Metrics: r.metrics,
+ Input: input,
+ Time: ectx.time,
+ Seed: ectx.seed,
+ InterQueryBuiltinCache: ectx.interQueryBuiltinCache,
+ NDBuiltinCache: ectx.ndBuiltinCache,
+ PrintHook: ectx.printHook,
+ Capabilities: ectx.capabilities,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ parsed, err := ast.ParseTerm(string(result.Result))
+ if err != nil {
+ return nil, err
+ }
+
+ return r.valueToQueryResult(parsed.Value, ectx)
+}
+
+func (r *Rego) valueToQueryResult(res ast.Value, ectx *EvalContext) (ResultSet, error) {
+ resultSet, ok := res.(ast.Set)
+ if !ok {
+ return nil, errors.New("illegal result type")
+ }
+
+ if resultSet.Len() == 0 {
+ return nil, nil
+ }
+
+ var rs ResultSet
+ err := resultSet.Iter(func(term *ast.Term) error {
+ obj, ok := term.Value.(ast.Object)
+ if !ok {
+ return errors.New("illegal result type")
+ }
+ qr := topdown.QueryResult{}
+ obj.Foreach(func(k, v *ast.Term) {
+ kvt := ast.VarTerm(string(k.Value.(ast.String)))
+ qr[kvt.Value.(ast.Var)] = v
+ })
+ result, err := r.generateResult(qr, ectx)
+ if err != nil {
+ return err
+ }
+ rs = append(rs, result)
+ return nil
+ })
+
+ return rs, err
+}
+
+func (r *Rego) generateResult(qr topdown.QueryResult, ectx *EvalContext) (Result, error) {
+ rewritten := ectx.compiledQuery.compiler.RewrittenVars()
+
+ result := newResult()
+ for k, term := range qr {
+ if rw, ok := rewritten[k]; ok {
+ k = rw
+ }
+ if isTermVar(k) || isTermWasmVar(k) || k.IsGenerated() || k.IsWildcard() {
+ continue
+ }
+
+ v, err := r.generateJSON(term, ectx)
+ if err != nil {
+ return result, err
+ }
+
+ result.Bindings[string(k)] = v
+ }
+
+ for _, expr := range ectx.compiledQuery.query {
+ if expr.Generated {
+ continue
+ }
+
+ if k, ok := r.capture[expr]; ok {
+ v, err := r.generateJSON(qr[k], ectx)
+ if err != nil {
+ return result, err
+ }
+ result.Expressions = append(result.Expressions, newExpressionValue(expr, v))
+ } else {
+ result.Expressions = append(result.Expressions, newExpressionValue(expr, true))
+ }
+
+ }
+ return result, nil
+}
+
+func (r *Rego) partialResult(ctx context.Context, pCfg *PrepareConfig) (PartialResult, error) {
+ err := r.prepare(ctx, partialResultQueryType, []extraStage{
+ {
+ after: "ResolveRefs",
+ stage: ast.QueryCompilerStageDefinition{
+ Name: "RewriteForPartialEval",
+ MetricName: "query_compile_stage_rewrite_for_partial_eval",
+ Stage: r.rewriteQueryForPartialEval,
+ },
+ },
+ })
+ if err != nil {
+ return PartialResult{}, err
+ }
+
+ ectx := &EvalContext{
+ parsedInput: r.parsedInput,
+ metrics: r.metrics,
+ txn: r.txn,
+ partialNamespace: r.partialNamespace,
+ queryTracers: r.queryTracers,
+ compiledQuery: r.compiledQueries[partialResultQueryType],
+ instrumentation: r.instrumentation,
+ indexing: true,
+ resolvers: r.resolvers,
+ capabilities: r.capabilities,
+ strictBuiltinErrors: r.strictBuiltinErrors,
+ nondeterministicBuiltins: r.nondeterministicBuiltins,
+ }
+
+ disableInlining := r.disableInlining
+
+ if pCfg.disableInlining != nil {
+ disableInlining = *pCfg.disableInlining
+ }
+
+ ectx.disableInlining, err = parseStringsToRefs(disableInlining)
+ if err != nil {
+ return PartialResult{}, err
+ }
+
+ pq, err := r.partial(ctx, ectx)
+ if err != nil {
+ return PartialResult{}, err
+ }
+
+ // Construct module for queries.
+ id := fmt.Sprintf("__partialresult__%s__", ectx.partialNamespace)
+
+ module, err := ast.ParseModuleWithOpts(id, "package "+ectx.partialNamespace,
+ ast.ParserOptions{RegoVersion: r.regoVersion})
+ if err != nil {
+ return PartialResult{}, errors.New("bad partial namespace")
+ }
+
+ module.Rules = make([]*ast.Rule, len(pq.Queries))
+ for i, body := range pq.Queries {
+ rule := &ast.Rule{
+ Head: ast.NewHead(ast.Var("__result__"), nil, ast.Wildcard),
+ Body: body,
+ Module: module,
+ }
+ module.Rules[i] = rule
+ if checkPartialResultForRecursiveRefs(body, rule.Path()) {
+ return PartialResult{}, Errors{errPartialEvaluationNotEffective}
+ }
+ }
+
+ // Update compiler with partial evaluation output.
+ r.compiler.Modules[id] = module
+ for i, module := range pq.Support {
+ r.compiler.Modules[fmt.Sprintf("__partialsupport__%s__%d__", ectx.partialNamespace, i)] = module
+ }
+
+ r.metrics.Timer(metrics.RegoModuleCompile).Start()
+ r.compilerForTxn(ctx, r.store, r.txn).Compile(r.compiler.Modules)
+ r.metrics.Timer(metrics.RegoModuleCompile).Stop()
+
+ if r.compiler.Failed() {
+ return PartialResult{}, r.compiler.Errors
+ }
+
+ result := PartialResult{
+ compiler: r.compiler,
+ store: r.store,
+ body: ast.MustParseBody(fmt.Sprintf("data.%v.__result__", ectx.partialNamespace)),
+ builtinDecls: r.builtinDecls,
+ builtinFuncs: r.builtinFuncs,
+ }
+
+ return result, nil
+}
+
+func (r *Rego) partial(ctx context.Context, ectx *EvalContext) (*PartialQueries, error) {
+ var unknowns []*ast.Term
+
+ switch {
+ case ectx.parsedUnknowns != nil:
+ unknowns = ectx.parsedUnknowns
+ case ectx.unknowns != nil:
+ unknowns = make([]*ast.Term, len(ectx.unknowns))
+ for i := range ectx.unknowns {
+ var err error
+ unknowns[i], err = ast.ParseTerm(ectx.unknowns[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ default:
+ // Use input document as unknown if caller has not specified any.
+ unknowns = []*ast.Term{ast.NewTerm(ast.InputRootRef)}
+ }
+
+ q := topdown.NewQuery(ectx.compiledQuery.query).
+ WithQueryCompiler(ectx.compiledQuery.compiler).
+ WithCompiler(r.compiler).
+ WithStore(r.store).
+ WithTransaction(ectx.txn).
+ WithBuiltins(r.builtinFuncs).
+ WithMetrics(ectx.metrics).
+ WithInstrumentation(ectx.instrumentation).
+ WithUnknowns(unknowns).
+ WithDisableInlining(ectx.disableInlining).
+ WithNondeterministicBuiltins(ectx.nondeterministicBuiltins).
+ WithRuntime(r.runtime).
+ WithIndexing(ectx.indexing).
+ WithEarlyExit(ectx.earlyExit).
+ WithPartialNamespace(ectx.partialNamespace).
+ WithSkipPartialNamespace(r.skipPartialNamespace).
+ WithShallowInlining(r.shallowInlining).
+ WithInterQueryBuiltinCache(ectx.interQueryBuiltinCache).
+ WithInterQueryBuiltinValueCache(ectx.interQueryBuiltinValueCache).
+ WithStrictBuiltinErrors(ectx.strictBuiltinErrors).
+ WithSeed(ectx.seed).
+ WithPrintHook(ectx.printHook)
+
+ if !ectx.time.IsZero() {
+ q = q.WithTime(ectx.time)
+ }
+
+ if ectx.ndBuiltinCache != nil {
+ q = q.WithNDBuiltinCache(ectx.ndBuiltinCache)
+ }
+
+ for i := range ectx.queryTracers {
+ q = q.WithQueryTracer(ectx.queryTracers[i])
+ }
+
+ if ectx.parsedInput != nil {
+ q = q.WithInput(ast.NewTerm(ectx.parsedInput))
+ }
+
+ for i := range ectx.resolvers {
+ q = q.WithResolver(ectx.resolvers[i].ref, ectx.resolvers[i].r)
+ }
+
+ // Cancel query if context is cancelled or deadline is reached.
+ if ectx.externalCancel == nil {
+ // Create a one-off goroutine to handle cancellation for this query.
+ c := topdown.NewCancel()
+ q = q.WithCancel(c)
+ exit := make(chan struct{})
+ defer close(exit)
+ go waitForDone(ctx, exit, func() {
+ c.Cancel()
+ })
+ } else {
+ // Query cancellation is being handled elsewhere.
+ q = q.WithCancel(ectx.externalCancel)
+ }
+
+ queries, support, err := q.PartialRun(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // If the target rego-version is v0, and the rego.v1 import is available, then we attempt to apply it to support modules.
+ if r.regoVersion == ast.RegoV0 &&
+ (r.capabilities == nil ||
+ r.capabilities.ContainsFeature(ast.FeatureRegoV1Import) ||
+ r.capabilities.ContainsFeature(ast.FeatureRegoV1)) {
+
+ for i, mod := range support {
+ // We can't apply the RegoV0CompatV1 version to the support module if it contains rules or vars that
+ // conflict with future keywords.
+ applyRegoVersion := true
+
+ ast.WalkRules(mod, func(r *ast.Rule) bool {
+ name := r.Head.Name
+ if name == "" && len(r.Head.Reference) > 0 {
+ name = r.Head.Reference[0].Value.(ast.Var)
+ }
+ if ast.IsFutureKeywordForRegoVersion(name.String(), ast.RegoV0) {
+ applyRegoVersion = false
+ return true
+ }
+ return false
+ })
+
+ if applyRegoVersion {
+ ast.WalkVars(mod, func(v ast.Var) bool {
+ if ast.IsFutureKeywordForRegoVersion(v.String(), ast.RegoV0) {
+ applyRegoVersion = false
+ return true
+ }
+ return false
+ })
+ }
+
+ if applyRegoVersion {
+ support[i].SetRegoVersion(ast.RegoV0CompatV1)
+ } else {
+ support[i].SetRegoVersion(r.regoVersion)
+ }
+ }
+ } else {
+ // If the target rego-version is not v0, then we apply the target rego-version to the support modules.
+ for i := range support {
+ support[i].SetRegoVersion(r.regoVersion)
+ }
+ }
+
+ pq := &PartialQueries{
+ Queries: queries,
+ Support: support,
+ }
+
+ return pq, nil
+}
+
+func (r *Rego) rewriteQueryToCaptureValue(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) {
+ checkCapture := iteration(query) || len(query) > 1
+
+ for _, expr := range query {
+
+ if expr.Negated {
+ continue
+ }
+
+ if expr.IsAssignment() || expr.IsEquality() {
+ continue
+ }
+
+ var capture *ast.Term
+
+ // If the expression can be evaluated as a function, rewrite it to
+ // capture the return value. E.g., neq(1,2) becomes neq(1,2,x) but
+ // plus(1,2,x) does not get rewritten.
+ switch terms := expr.Terms.(type) {
+ case *ast.Term:
+ capture = r.generateTermVar()
+ expr.Terms = ast.Equality.Expr(terms, capture).Terms
+ r.capture[expr] = capture.Value.(ast.Var)
+ case []*ast.Term:
+ tpe := r.compiler.TypeEnv.Get(terms[0])
+ if !types.Void(tpe) && types.Arity(tpe) == len(terms)-1 {
+ capture = r.generateTermVar()
+ expr.Terms = append(terms, capture)
+ r.capture[expr] = capture.Value.(ast.Var)
+ }
+ }
+
+ if capture != nil && checkCapture {
+ cpy := expr.Copy()
+ cpy.Terms = capture
+ cpy.Generated = true
+ cpy.With = nil
+ query.Append(cpy)
+ }
+ }
+
+ return query, nil
+}
+
+func (*Rego) rewriteQueryForPartialEval(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) {
+ if len(query) != 1 {
+ return nil, errors.New("partial evaluation requires single ref (not multiple expressions)")
+ }
+
+ term, ok := query[0].Terms.(*ast.Term)
+ if !ok {
+ return nil, errors.New("partial evaluation requires ref (not expression)")
+ }
+
+ ref, ok := term.Value.(ast.Ref)
+ if !ok {
+ return nil, fmt.Errorf("partial evaluation requires ref (not %v)", ast.ValueName(term.Value))
+ }
+
+ if !ref.IsGround() {
+ return nil, errors.New("partial evaluation requires ground ref")
+ }
+
+ return ast.NewBody(ast.Equality.Expr(ast.Wildcard, term)), nil
+}
+
+// rewriteEqualsForPartialQueryCompile will rewrite == to = in queries. Normally
+// this wouldn't be done, except for handling queries with the `Partial` API
+// where rewriting them can substantially simplify the result, and it is unlikely
+// that the caller would need expression values.
+func (*Rego) rewriteEqualsForPartialQueryCompile(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) {
+ doubleEq := ast.Equal.Ref()
+ unifyOp := ast.Equality.Ref()
+ ast.WalkExprs(query, func(x *ast.Expr) bool {
+ if x.IsCall() {
+ operator := x.Operator()
+ if operator.Equal(doubleEq) && len(x.Operands()) == 2 {
+ x.SetOperator(ast.NewTerm(unifyOp))
+ }
+ }
+ return false
+ })
+ return query, nil
+}
+
+func (r *Rego) generateTermVar() *ast.Term {
+ r.termVarID++
+ prefix := ast.WildcardPrefix
+ if p := r.targetPlugin(r.target); p != nil {
+ prefix = wasmVarPrefix
+ } else if r.target == targetWasm {
+ prefix = wasmVarPrefix
+ }
+ return ast.VarTerm(fmt.Sprintf("%sterm%v", prefix, r.termVarID))
+}
+
+func (r Rego) hasQuery() bool {
+ return len(r.query) != 0 || len(r.parsedQuery) != 0
+}
+
+func (r Rego) hasWasmModule() bool {
+ for _, b := range r.bundles {
+ if len(b.WasmModules) > 0 {
+ return true
+ }
+ }
+ return false
+}
+
+type transactionCloser func(ctx context.Context, err error) error
+
+// getTxn will conditionally create a read or write transaction suitable for
+// the configured Rego object. The returned function should be used to close the txn
+// regardless of status.
+func (r *Rego) getTxn(ctx context.Context) (storage.Transaction, transactionCloser, error) {
+ noopCloser := func(_ context.Context, _ error) error {
+ return nil // no-op default
+ }
+
+ if r.txn != nil {
+ // Externally provided txn
+ return r.txn, noopCloser, nil
+ }
+
+ // Create a new transaction..
+ params := storage.TransactionParams{}
+
+ // Bundles and data paths may require writing data files or manifests to storage
+ if len(r.bundles) > 0 || len(r.bundlePaths) > 0 || len(r.loadPaths.paths) > 0 {
+
+ // If we were given a store we will *not* write to it, only do that on one
+ // which was created automatically on behalf of the user.
+ if !r.ownStore {
+ return nil, noopCloser, errors.New("unable to start write transaction when store was provided")
+ }
+
+ params.Write = true
+ }
+
+ txn, err := r.store.NewTransaction(ctx, params)
+ if err != nil {
+ return nil, noopCloser, err
+ }
+
+ // Setup a closer function that will abort or commit as needed.
+ closer := func(ctx context.Context, txnErr error) error {
+ var err error
+
+ if txnErr == nil && params.Write {
+ err = r.store.Commit(ctx, txn)
+ } else {
+ r.store.Abort(ctx, txn)
+ }
+
+ // Clear the auto created transaction now that it is closed.
+ r.txn = nil
+
+ return err
+ }
+
+ return txn, closer, nil
+}
+
+func (r *Rego) compilerForTxn(ctx context.Context, store storage.Store, txn storage.Transaction) *ast.Compiler {
+ // Update the compiler to have a valid path conflict check
+ // for the current context and transaction.
+ return r.compiler.WithPathConflictsCheck(storage.NonEmpty(ctx, store, txn))
+}
+
+func checkPartialResultForRecursiveRefs(body ast.Body, path ast.Ref) bool {
+ var stop bool
+ ast.WalkRefs(body, func(x ast.Ref) bool {
+ if !stop {
+ if path.HasPrefix(x) {
+ stop = true
+ }
+ }
+ return stop
+ })
+ return stop
+}
+
+func isTermVar(v ast.Var) bool {
+ return strings.HasPrefix(string(v), ast.WildcardPrefix+"term")
+}
+
+func isTermWasmVar(v ast.Var) bool {
+ return strings.HasPrefix(string(v), wasmVarPrefix+"term")
+}
+
+func waitForDone(ctx context.Context, exit chan struct{}, f func()) {
+ select {
+ case <-exit:
+ return
+ case <-ctx.Done():
+ f()
+ return
+ }
+}
+
+type rawModule struct {
+ filename string
+ module string
+}
+
+func (m rawModule) Parse() (*ast.Module, error) {
+ return ast.ParseModule(m.filename, m.module)
+}
+
+func (m rawModule) ParseWithOpts(opts ast.ParserOptions) (*ast.Module, error) {
+ return ast.ParseModuleWithOpts(m.filename, m.module, opts)
+}
+
+type extraStage struct {
+ after string
+ stage ast.QueryCompilerStageDefinition
+}
+
+type refResolver struct {
+ ref ast.Ref
+ r resolver.Resolver
+}
+
+func iteration(x any) bool {
+ var stopped bool
+
+ vis := ast.NewGenericVisitor(func(x any) bool {
+ switch x := x.(type) {
+ case *ast.Term:
+ if ast.IsComprehension(x.Value) {
+ return true
+ }
+ case ast.Ref:
+ if !stopped {
+ if bi := ast.BuiltinMap[x.String()]; bi != nil {
+ if bi.Relation {
+ stopped = true
+ return stopped
+ }
+ }
+ for i := 1; i < len(x); i++ {
+ if _, ok := x[i].Value.(ast.Var); ok {
+ stopped = true
+ return stopped
+ }
+ }
+ }
+ return stopped
+ }
+ return stopped
+ })
+
+ vis.Walk(x)
+
+ return stopped
+}
+
+func parseStringsToRefs(s []string) ([]ast.Ref, error) {
+ if len(s) == 0 {
+ return nil, nil
+ }
+
+ refs := make([]ast.Ref, len(s))
+ for i := range refs {
+ var err error
+ refs[i], err = ast.ParseRef(s[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return refs, nil
+}
+
+// helper function to finish a built-in function call. If an error occurred,
+// wrap the error and return it. Otherwise, invoke the iterator if the result
+// was defined.
+func finishFunction(name string, bctx topdown.BuiltinContext, result *ast.Term, err error, iter func(*ast.Term) error) error {
+ if err != nil {
+ var e *HaltError
+ sb := strings.Builder{}
+ if errors.As(err, &e) {
+ sb.Grow(len(name) + len(e.Error()) + 2)
+ sb.WriteString(name)
+ sb.WriteString(": ")
+ sb.WriteString(e.Error())
+ tdErr := &topdown.Error{
+ Code: topdown.BuiltinErr,
+ Message: sb.String(),
+ Location: bctx.Location,
+ }
+ return topdown.Halt{Err: tdErr.Wrap(e)}
+ }
+ sb.Grow(len(name) + len(err.Error()) + 2)
+ sb.WriteString(name)
+ sb.WriteString(": ")
+ sb.WriteString(err.Error())
+ tdErr := &topdown.Error{
+ Code: topdown.BuiltinErr,
+ Message: sb.String(),
+ Location: bctx.Location,
+ }
+ return tdErr.Wrap(err)
+ }
+ if result == nil {
+ return nil
+ }
+ return iter(result)
+}
+
+// helper function to return an option that sets a custom built-in function.
+func newFunction(decl *Function, f topdown.BuiltinFunc) func(*Rego) {
+ return func(r *Rego) {
+ r.builtinDecls[decl.Name] = &ast.Builtin{
+ Name: decl.Name,
+ Decl: decl.Decl,
+ Nondeterministic: decl.Nondeterministic,
+ }
+ r.builtinFuncs[decl.Name] = &topdown.Builtin{
+ Decl: r.builtinDecls[decl.Name],
+ Func: f,
+ }
+ }
+}
+
+func generateJSON(term *ast.Term, ectx *EvalContext) (any, error) {
+ return ast.JSONWithOpt(term.Value,
+ ast.JSONOpt{
+ SortSets: ectx.sortSets,
+ CopyMaps: ectx.copyMaps,
+ })
+}
+
+func (r *Rego) planQuery(queries []ast.Body, evalQueryType queryType) (*ir.Policy, error) {
+ modules := make([]*ast.Module, 0, len(r.compiler.Modules))
+ for _, module := range r.compiler.Modules {
+ modules = append(modules, module)
+ }
+
+ decls := make(map[string]*ast.Builtin, len(r.builtinDecls)+len(ast.BuiltinMap))
+ maps.Copy(decls, ast.BuiltinMap)
+ maps.Copy(decls, r.builtinDecls)
+
+ const queryName = "eval" // NOTE(tsandall): the query name is arbitrary
+
+ p := planner.New().
+ WithQueries([]planner.QuerySet{
+ {
+ Name: queryName,
+ Queries: queries,
+ RewrittenVars: r.compiledQueries[evalQueryType].compiler.RewrittenVars(),
+ },
+ }).
+ WithModules(modules).
+ WithBuiltinDecls(decls).
+ WithDebug(r.dump)
+
+ policy, err := p.Plan()
+ if err != nil {
+ return nil, err
+ }
+ if r.dump != nil {
+ fmt.Fprintln(r.dump, "PLAN:")
+ fmt.Fprintln(r.dump, "-----")
+ err = ir.Pretty(r.dump, policy)
+ if err != nil {
+ return nil, err
+ }
+ fmt.Fprintln(r.dump)
+ }
+ return policy, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/rego/resultset.go b/vendor/github.com/open-policy-agent/opa/v1/rego/resultset.go
new file mode 100644
index 0000000000..983de2223e
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/rego/resultset.go
@@ -0,0 +1,90 @@
+package rego
+
+import (
+ "fmt"
+
+ "github.com/open-policy-agent/opa/v1/ast"
+)
+
+// ResultSet represents a collection of output from Rego evaluation. An empty
+// result set represents an undefined query.
+type ResultSet []Result
+
+// Vars represents a collection of variable bindings. The keys are the variable
+// names and the values are the binding values.
+type Vars map[string]any
+
+// WithoutWildcards returns a copy of v with wildcard variables removed.
+func (v Vars) WithoutWildcards() Vars {
+ n := Vars{}
+ for k, v := range v {
+ if ast.Var(k).IsWildcard() || ast.Var(k).IsGenerated() {
+ continue
+ }
+ n[k] = v
+ }
+ return n
+}
+
+// Result defines the output of Rego evaluation.
+type Result struct {
+ Expressions []*ExpressionValue `json:"expressions"`
+ Bindings Vars `json:"bindings,omitempty"`
+}
+
+func newResult() Result {
+ return Result{
+ Bindings: Vars{},
+ }
+}
+
+// Location defines a position in a Rego query or module.
+type Location struct {
+ Row int `json:"row"`
+ Col int `json:"col"`
+}
+
+// ExpressionValue defines the value of an expression in a Rego query.
+type ExpressionValue struct {
+ Value any `json:"value"`
+ Text string `json:"text"`
+ Location *Location `json:"location"`
+}
+
+func newExpressionValue(expr *ast.Expr, value any) *ExpressionValue {
+ result := &ExpressionValue{
+ Value: value,
+ }
+ if expr.Location != nil {
+ result.Text = string(expr.Location.Text)
+ result.Location = &Location{
+ Row: expr.Location.Row,
+ Col: expr.Location.Col,
+ }
+ }
+ return result
+}
+
+func (ev *ExpressionValue) String() string {
+ return fmt.Sprint(ev.Value)
+}
+
+// Allowed is a helper method that'll return true if all of these conditions hold:
+// - the result set only has one element
+// - there is only one expression in the result set's only element
+// - that expression has the value `true`
+// - there are no bindings.
+//
+// If bindings are present, this will yield `false`: it would be a pitfall to
+// return `true` for a query like `data.authz.allow = x`, which always has result
+// set element with value true, but could also have a binding `x: false`.
+func (rs ResultSet) Allowed() bool {
+ if len(rs) == 1 && len(rs[0].Bindings) == 0 {
+ if exprs := rs[0].Expressions; len(exprs) == 1 {
+ if b, ok := exprs[0].Value.(bool); ok {
+ return b
+ }
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/open-policy-agent/opa/resolver/interface.go b/vendor/github.com/open-policy-agent/opa/v1/resolver/interface.go
similarity index 86%
rename from vendor/github.com/open-policy-agent/opa/resolver/interface.go
rename to vendor/github.com/open-policy-agent/opa/v1/resolver/interface.go
index fc02329f57..1f04d21c01 100644
--- a/vendor/github.com/open-policy-agent/opa/resolver/interface.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/resolver/interface.go
@@ -7,8 +7,8 @@ package resolver
import (
"context"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/metrics"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/metrics"
)
// Resolver defines an external value resolver for OPA evaluations.
diff --git a/vendor/github.com/open-policy-agent/opa/resolver/wasm/wasm.go b/vendor/github.com/open-policy-agent/opa/v1/resolver/wasm/wasm.go
similarity index 87%
rename from vendor/github.com/open-policy-agent/opa/resolver/wasm/wasm.go
rename to vendor/github.com/open-policy-agent/opa/v1/resolver/wasm/wasm.go
index 9c13879dc3..884e4ca7cc 100644
--- a/vendor/github.com/open-policy-agent/opa/resolver/wasm/wasm.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/resolver/wasm/wasm.go
@@ -6,17 +6,18 @@ package wasm
import (
"context"
+ "errors"
"fmt"
"strconv"
- "github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/internal/rego/opa"
- "github.com/open-policy-agent/opa/resolver"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/resolver"
)
// New creates a new Resolver instance which is using the Wasm module
// policy for the given entrypoint ref.
-func New(entrypoints []ast.Ref, policy []byte, data interface{}) (*Resolver, error) {
+func New(entrypoints []ast.Ref, policy []byte, data any) (*Resolver, error) {
e, err := opa.LookupEngine("wasm")
if err != nil {
return nil, err
@@ -96,9 +97,9 @@ func (r *Resolver) Eval(ctx context.Context, input resolver.Input) (resolver.Res
return resolver.Result{}, fmt.Errorf("internal error: invalid entrypoint id %s", numValue)
}
- var in *interface{}
+ var in *any
if input.Input != nil {
- var str interface{} = []byte(input.Input.String())
+ var str any = []byte(input.Input.String())
in = &str
}
@@ -121,12 +122,12 @@ func (r *Resolver) Eval(ctx context.Context, input resolver.Input) (resolver.Res
}
// SetData will update the external data for the Wasm instance.
-func (r *Resolver) SetData(ctx context.Context, data interface{}) error {
+func (r *Resolver) SetData(ctx context.Context, data any) error {
return r.o.SetData(ctx, data)
}
// SetDataPath will set the provided data on the wasm instance at the specified path.
-func (r *Resolver) SetDataPath(ctx context.Context, path []string, data interface{}) error {
+func (r *Resolver) SetDataPath(ctx context.Context, path []string, data any) error {
return r.o.SetDataPath(ctx, path, data)
}
@@ -144,7 +145,7 @@ func getResult(evalResult *opa.Result) (ast.Value, error) {
resultSet, ok := parsed.Value.(ast.Set)
if !ok {
- return nil, fmt.Errorf("illegal result type")
+ return nil, errors.New("illegal result type")
}
if resultSet.Len() == 0 {
@@ -152,14 +153,14 @@ func getResult(evalResult *opa.Result) (ast.Value, error) {
}
if resultSet.Len() > 1 {
- return nil, fmt.Errorf("illegal result type")
+ return nil, errors.New("illegal result type")
}
var obj ast.Object
err = resultSet.Iter(func(term *ast.Term) error {
obj, ok = term.Value.(ast.Object)
if !ok || obj.Len() != 1 {
- return fmt.Errorf("illegal result type")
+ return errors.New("illegal result type")
}
return nil
})
@@ -167,7 +168,7 @@ func getResult(evalResult *opa.Result) (ast.Value, error) {
return nil, err
}
- result := obj.Get(ast.StringTerm("result"))
+ result := obj.Get(ast.InternedTerm("result"))
return result.Value, nil
}
diff --git a/vendor/github.com/open-policy-agent/opa/schemas/authorizationPolicy.json b/vendor/github.com/open-policy-agent/opa/v1/schemas/authorizationPolicy.json
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/schemas/authorizationPolicy.json
rename to vendor/github.com/open-policy-agent/opa/v1/schemas/authorizationPolicy.json
diff --git a/vendor/github.com/open-policy-agent/opa/schemas/schemas.go b/vendor/github.com/open-policy-agent/opa/v1/schemas/schemas.go
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/schemas/schemas.go
rename to vendor/github.com/open-policy-agent/opa/v1/schemas/schemas.go
diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/doc.go b/vendor/github.com/open-policy-agent/opa/v1/storage/doc.go
new file mode 100644
index 0000000000..6fa2f86d98
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/storage/doc.go
@@ -0,0 +1,6 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package storage exposes the policy engine's storage layer.
+package storage
diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/errors.go b/vendor/github.com/open-policy-agent/opa/v1/storage/errors.go
new file mode 100644
index 0000000000..a3d1c00737
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/storage/errors.go
@@ -0,0 +1,121 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package storage
+
+import (
+ "fmt"
+)
+
+const (
+ // InternalErr indicates an unknown, internal error has occurred.
+ InternalErr = "storage_internal_error"
+
+ // NotFoundErr indicates the path used in the storage operation does not
+ // locate a document.
+ NotFoundErr = "storage_not_found_error"
+
+ // WriteConflictErr indicates a write on the path enocuntered a conflicting
+ // value inside the transaction.
+ WriteConflictErr = "storage_write_conflict_error"
+
+ // InvalidPatchErr indicates an invalid patch/write was issued. The patch
+ // was rejected.
+ InvalidPatchErr = "storage_invalid_patch_error"
+
+ // InvalidTransactionErr indicates an invalid operation was performed
+ // inside of the transaction.
+ InvalidTransactionErr = "storage_invalid_txn_error"
+
+ // TriggersNotSupportedErr indicates the caller attempted to register a
+ // trigger against a store that does not support them.
+ TriggersNotSupportedErr = "storage_triggers_not_supported_error"
+
+ // WritesNotSupportedErr indicate the caller attempted to perform a write
+ // against a store that does not support them.
+ WritesNotSupportedErr = "storage_writes_not_supported_error"
+
+ // PolicyNotSupportedErr indicate the caller attempted to perform a policy
+ // management operation against a store that does not support them.
+ PolicyNotSupportedErr = "storage_policy_not_supported_error"
+)
+
+// Error is the error type returned by the storage layer.
+type Error struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+}
+
+func (err *Error) Error() string {
+ if err.Message != "" {
+ return fmt.Sprintf("%v: %v", err.Code, err.Message)
+ }
+ return err.Code
+}
+
+// IsNotFound returns true if this error is a NotFoundErr.
+func IsNotFound(err error) bool {
+ if err, ok := err.(*Error); ok {
+ return err.Code == NotFoundErr
+ }
+ return false
+}
+
+// IsWriteConflictError returns true if this error a WriteConflictErr.
+func IsWriteConflictError(err error) bool {
+ switch err := err.(type) {
+ case *Error:
+ return err.Code == WriteConflictErr
+ }
+ return false
+}
+
+// IsInvalidPatch returns true if this error is a InvalidPatchErr.
+func IsInvalidPatch(err error) bool {
+ switch err := err.(type) {
+ case *Error:
+ return err.Code == InvalidPatchErr
+ }
+ return false
+}
+
+// IsInvalidTransaction returns true if this error is a InvalidTransactionErr.
+func IsInvalidTransaction(err error) bool {
+ switch err := err.(type) {
+ case *Error:
+ return err.Code == InvalidTransactionErr
+ }
+ return false
+}
+
+// IsIndexingNotSupported is a stub for backwards-compatibility.
+//
+// Deprecated: We no longer return IndexingNotSupported errors, so it is
+// unnecessary to check for them.
+func IsIndexingNotSupported(error) bool { return false }
+
+func writeConflictError(path Path) *Error {
+ return &Error{
+ Code: WriteConflictErr,
+ Message: path.String(),
+ }
+}
+
+func triggersNotSupportedError() *Error {
+ return &Error{
+ Code: TriggersNotSupportedErr,
+ }
+}
+
+func writesNotSupportedError() *Error {
+ return &Error{
+ Code: WritesNotSupportedErr,
+ }
+}
+
+func policyNotSupportedError() *Error {
+ return &Error{
+ Code: PolicyNotSupportedErr,
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/storage/inmem/ast.go b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/ast.go
similarity index 85%
rename from vendor/github.com/open-policy-agent/opa/storage/inmem/ast.go
rename to vendor/github.com/open-policy-agent/opa/v1/storage/inmem/ast.go
index 5a8a6743fa..40f18ab0de 100644
--- a/vendor/github.com/open-policy-agent/opa/storage/inmem/ast.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/ast.go
@@ -8,10 +8,10 @@ import (
"fmt"
"strconv"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/storage"
- "github.com/open-policy-agent/opa/storage/internal/errors"
- "github.com/open-policy-agent/opa/storage/internal/ptr"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/storage"
+ "github.com/open-policy-agent/opa/v1/storage/internal/errors"
+ "github.com/open-policy-agent/opa/v1/storage/internal/ptr"
)
type updateAST struct {
@@ -28,7 +28,7 @@ func (u *updateAST) Remove() bool {
return u.remove
}
-func (u *updateAST) Set(v interface{}) {
+func (u *updateAST) Set(v any) {
if v, ok := v.(ast.Value); ok {
u.value = v
} else {
@@ -36,7 +36,7 @@ func (u *updateAST) Set(v interface{}) {
}
}
-func (u *updateAST) Value() interface{} {
+func (u *updateAST) Value() any {
return u.value
}
@@ -46,7 +46,7 @@ func (u *updateAST) Relative(path storage.Path) dataUpdate {
return &cpy
}
-func (u *updateAST) Apply(v interface{}) interface{} {
+func (u *updateAST) Apply(v any) any {
if len(u.path) == 0 {
return u.value
}
@@ -72,11 +72,10 @@ func (u *updateAST) Apply(v interface{}) interface{} {
return newV
}
-func newUpdateAST(data interface{}, op storage.PatchOp, path storage.Path, idx int, value ast.Value) (*updateAST, error) {
-
+func newUpdateAST(data any, op storage.PatchOp, path storage.Path, idx int, value ast.Value) (*updateAST, error) {
switch data.(type) {
case ast.Null, ast.Boolean, ast.Number, ast.String:
- return nil, errors.NewNotFoundError(path)
+ return nil, errors.NotFoundErr
}
switch data := data.(type) {
@@ -94,15 +93,13 @@ func newUpdateAST(data interface{}, op storage.PatchOp, path storage.Path, idx i
}
func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, idx int, value ast.Value) (*updateAST, error) {
-
if idx == len(path)-1 {
if path[idx] == "-" || path[idx] == strconv.Itoa(data.Len()) {
if op != storage.AddOp {
- return nil, invalidPatchError("%v: invalid patch path", path)
+ return nil, errors.NewInvalidPatchError("%v: invalid patch path", path)
}
- cpy := data.Copy()
- cpy = cpy.Append(ast.NewTerm(value))
+ cpy := data.Append(ast.NewTerm(value))
return &updateAST{path[:len(path)-1], false, cpy}, nil
}
@@ -114,7 +111,7 @@ func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, i
switch op {
case storage.AddOp:
var results []*ast.Term
- for i := 0; i < data.Len(); i++ {
+ for i := range data.Len() {
if i == pos {
results = append(results, ast.NewTerm(value))
}
@@ -125,7 +122,7 @@ func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, i
case storage.RemoveOp:
var results []*ast.Term
- for i := 0; i < data.Len(); i++ {
+ for i := range data.Len() {
if i != pos {
results = append(results, data.Elem(i))
}
@@ -134,7 +131,7 @@ func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, i
default:
var results []*ast.Term
- for i := 0; i < data.Len(); i++ {
+ for i := range data.Len() {
if i == pos {
results = append(results, ast.NewTerm(value))
} else {
@@ -155,14 +152,14 @@ func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, i
}
func newUpdateObjectAST(data ast.Object, op storage.PatchOp, path storage.Path, idx int, value ast.Value) (*updateAST, error) {
- key := ast.StringTerm(path[idx])
+ key := ast.InternedTerm(path[idx])
val := data.Get(key)
if idx == len(path)-1 {
switch op {
case storage.ReplaceOp, storage.RemoveOp:
if val == nil {
- return nil, errors.NewNotFoundError(path)
+ return nil, errors.NotFoundErr
}
}
return &updateAST{path, op == storage.RemoveOp, value}, nil
@@ -172,14 +169,7 @@ func newUpdateObjectAST(data ast.Object, op storage.PatchOp, path storage.Path,
return newUpdateAST(val.Value, op, path, idx+1, value)
}
- return nil, errors.NewNotFoundError(path)
-}
-
-func interfaceToValue(v interface{}) (ast.Value, error) {
- if v, ok := v.(ast.Value); ok {
- return v, nil
- }
- return ast.InterfaceToValue(v)
+ return nil, errors.NotFoundErr
}
// setInAst updates the value in the AST at the given path with the given value.
@@ -201,7 +191,7 @@ func setInAst(data ast.Value, path storage.Path, value ast.Value) (ast.Value, er
}
func setInAstObject(obj ast.Object, path storage.Path, value ast.Value) (ast.Value, error) {
- key := ast.StringTerm(path[0])
+ key := ast.InternedTerm(path[0])
if len(path) == 1 {
obj.Insert(key, ast.NewTerm(value))
@@ -257,7 +247,7 @@ func removeInAst(value ast.Value, path storage.Path) (ast.Value, error) {
}
func removeInAstObject(obj ast.Object, path storage.Path) (ast.Value, error) {
- key := ast.StringTerm(path[0])
+ key := ast.InternedTerm(path[0])
if len(path) == 1 {
var items [][2]*ast.Term
@@ -296,7 +286,7 @@ func removeInAstArray(arr *ast.Array, path storage.Path) (ast.Value, error) {
if len(path) == 1 {
var elems []*ast.Term
// Note: possibly expensive operation for large data.
- for i := 0; i < arr.Len(); i++ {
+ for i := range arr.Len() {
if i == idx {
continue
}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/inmem.go b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/inmem.go
new file mode 100644
index 0000000000..cdc43424dd
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/inmem.go
@@ -0,0 +1,452 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package inmem implements an in-memory version of the policy engine's storage
+// layer.
+//
+// The in-memory store is used as the default storage layer implementation. The
+// in-memory store supports multi-reader/single-writer concurrency with
+// rollback.
+//
+// Callers should assume the in-memory store does not make copies of written
+// data. Once data is written to the in-memory store, it should not be modified
+// (outside of calling Store.Write). Furthermore, data read from the in-memory
+// store should be treated as read-only.
+package inmem
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "path/filepath"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "github.com/open-policy-agent/opa/internal/merge"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/storage"
+ "github.com/open-policy-agent/opa/v1/storage/internal/errors"
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+// New returns an empty in-memory store.
+func New() storage.Store {
+ return NewWithOpts()
+}
+
+// NewWithOpts returns an empty in-memory store, with extra options passed.
+func NewWithOpts(opts ...Opt) storage.Store {
+ s := &store{
+ triggers: map[*handle]storage.TriggerConfig{},
+ policies: map[string][]byte{},
+ roundTripOnWrite: true,
+ returnASTValuesOnRead: false,
+ }
+
+ for _, opt := range opts {
+ opt(s)
+ }
+
+ if s.returnASTValuesOnRead {
+ s.data = ast.NewObject()
+ s.roundTripOnWrite = false
+ } else {
+ s.data = map[string]any{}
+ }
+
+ return s
+}
+
+// NewFromObject returns a new in-memory store from the supplied data object.
+func NewFromObject(data map[string]any) storage.Store {
+ return NewFromObjectWithOpts(data)
+}
+
+// NewFromObjectWithOpts returns a new in-memory store from the supplied data object, with the
+// options passed.
+func NewFromObjectWithOpts(data map[string]any, opts ...Opt) storage.Store {
+ db := NewWithOpts(opts...)
+ ctx := context.Background()
+ txn, err := db.NewTransaction(ctx, storage.WriteParams)
+ if err != nil {
+ panic(err)
+ }
+ if err := db.Write(ctx, txn, storage.AddOp, storage.RootPath, data); err != nil {
+ panic(err)
+ }
+ if err := db.Commit(ctx, txn); err != nil {
+ panic(err)
+ }
+ return db
+}
+
+// NewFromReader returns a new in-memory store from a reader that produces a
+// JSON serialized object. This function is for test purposes.
+func NewFromReader(r io.Reader) storage.Store {
+ return NewFromReaderWithOpts(r)
+}
+
+// NewFromReader returns a new in-memory store from a reader that produces a
+// JSON serialized object, with extra options. This function is for test purposes.
+func NewFromReaderWithOpts(r io.Reader, opts ...Opt) storage.Store {
+ var data map[string]any
+ if err := util.NewJSONDecoder(r).Decode(&data); err != nil {
+ panic(err)
+ }
+ return NewFromObjectWithOpts(data, opts...)
+}
+
+type store struct {
+ rmu sync.RWMutex // reader-writer lock
+ wmu sync.Mutex // writer lock
+ xid uint64 // last generated transaction id
+ data any // raw or AST data
+ policies map[string][]byte // raw policies
+ triggers map[*handle]storage.TriggerConfig // registered triggers
+
+ // roundTripOnWrite, if true, means that every call to Write round trips the
+ // data through JSON before adding the data to the store. Defaults to true.
+ roundTripOnWrite bool
+
+ // returnASTValuesOnRead, if true, means that the store will eagerly convert data to AST values,
+ // and return them on Read.
+ // FIXME: naming(?)
+ returnASTValuesOnRead bool
+}
+
+type handle struct {
+ db *store
+}
+
+func (db *store) NewTransaction(_ context.Context, params ...storage.TransactionParams) (storage.Transaction, error) {
+ txn := &transaction{
+ xid: atomic.AddUint64(&db.xid, uint64(1)),
+ db: db,
+ }
+
+ if len(params) > 0 {
+ txn.write = params[0].Write
+ txn.context = params[0].Context
+ }
+
+ if txn.write {
+ db.wmu.Lock()
+ } else {
+ db.rmu.RLock()
+ }
+
+ return txn, nil
+}
+
+// Truncate implements the storage.Store interface. This method must be called within a transaction.
+func (db *store) Truncate(ctx context.Context, txn storage.Transaction, params storage.TransactionParams, it storage.Iterator) error {
+ var update *storage.Update
+ var err error
+
+ underlying, err := db.underlying(txn)
+ if err != nil {
+ return err
+ }
+
+ mergedData := map[string]any{}
+
+ for {
+ if update, err = it.Next(); err != nil {
+ break
+ }
+
+ if update.IsPolicy {
+ err = underlying.UpsertPolicy(strings.TrimLeft(update.Path.String(), "/"), update.Value)
+ if err != nil {
+ return err
+ }
+ } else {
+ var value any
+ if err = util.Unmarshal(update.Value, &value); err != nil {
+ return err
+ }
+
+ var key []string
+ dirpath := strings.TrimLeft(update.Path.String(), "/")
+ if len(dirpath) > 0 {
+ key = strings.Split(dirpath, "/")
+ }
+
+ if value != nil {
+ obj, err := mktree(key, value)
+ if err != nil {
+ return err
+ }
+
+ merged, ok := merge.InterfaceMaps(mergedData, obj)
+ if !ok {
+ return fmt.Errorf("failed to insert data file from path %s", filepath.Join(key...))
+ }
+ mergedData = merged
+ }
+ }
+ }
+
+ // err is known not to be nil at this point, as it getting assigned
+ // a non-nil value is the only way the loop above can exit.
+ if err != io.EOF {
+ return err
+ }
+
+ // For backwards compatibility, check if `RootOverwrite` was configured.
+ if params.RootOverwrite {
+ return underlying.Write(storage.AddOp, storage.RootPath, mergedData)
+ }
+
+ for _, root := range params.BasePaths {
+ newPath, ok := storage.ParsePathEscaped("/" + root)
+ if !ok {
+ return fmt.Errorf("storage path invalid: %v", newPath)
+ }
+
+ if value, ok := lookup(newPath, mergedData); ok {
+ if len(newPath) > 0 {
+ if err := storage.MakeDir(ctx, db, txn, newPath[:len(newPath)-1]); err != nil {
+ return err
+ }
+ }
+ if err := underlying.Write(storage.AddOp, newPath, value); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (db *store) Commit(ctx context.Context, txn storage.Transaction) error {
+ underlying, err := db.underlying(txn)
+ if err != nil {
+ return err
+ }
+ if underlying.write {
+ db.rmu.Lock()
+ event := underlying.Commit()
+ db.runOnCommitTriggers(ctx, txn, event)
+ // Mark the transaction stale after executing triggers, so they can
+ // perform store operations if needed.
+ underlying.stale = true
+ db.rmu.Unlock()
+ db.wmu.Unlock()
+ } else {
+ db.rmu.RUnlock()
+ }
+ return nil
+}
+
+func (db *store) Abort(_ context.Context, txn storage.Transaction) {
+ underlying, err := db.underlying(txn)
+ if err != nil {
+ panic(err)
+ }
+ underlying.stale = true
+ if underlying.write {
+ db.wmu.Unlock()
+ } else {
+ db.rmu.RUnlock()
+ }
+}
+
+func (db *store) ListPolicies(_ context.Context, txn storage.Transaction) ([]string, error) {
+ underlying, err := db.underlying(txn)
+ if err != nil {
+ return nil, err
+ }
+ return underlying.ListPolicies(), nil
+}
+
+func (db *store) GetPolicy(_ context.Context, txn storage.Transaction, id string) ([]byte, error) {
+ underlying, err := db.underlying(txn)
+ if err != nil {
+ return nil, err
+ }
+ return underlying.GetPolicy(id)
+}
+
+func (db *store) UpsertPolicy(_ context.Context, txn storage.Transaction, id string, bs []byte) error {
+ underlying, err := db.underlying(txn)
+ if err != nil {
+ return err
+ }
+ return underlying.UpsertPolicy(id, bs)
+}
+
+func (db *store) DeletePolicy(_ context.Context, txn storage.Transaction, id string) error {
+ underlying, err := db.underlying(txn)
+ if err != nil {
+ return err
+ }
+ if _, err := underlying.GetPolicy(id); err != nil {
+ return err
+ }
+ return underlying.DeletePolicy(id)
+}
+
+func (db *store) Register(_ context.Context, txn storage.Transaction, config storage.TriggerConfig) (storage.TriggerHandle, error) {
+ underlying, err := db.underlying(txn)
+ if err != nil {
+ return nil, err
+ }
+ if !underlying.write {
+ return nil, &storage.Error{
+ Code: storage.InvalidTransactionErr,
+ Message: "triggers must be registered with a write transaction",
+ }
+ }
+ h := &handle{db}
+ db.triggers[h] = config
+ return h, nil
+}
+
+func (db *store) Read(_ context.Context, txn storage.Transaction, path storage.Path) (any, error) {
+ underlying, err := db.underlying(txn)
+ if err != nil {
+ return nil, err
+ }
+
+ return underlying.Read(path)
+}
+
+func (db *store) Write(_ context.Context, txn storage.Transaction, op storage.PatchOp, path storage.Path, value any) error {
+ underlying, err := db.underlying(txn)
+ if err != nil {
+ return err
+ }
+
+ if db.returnASTValuesOnRead || !util.NeedsRoundTrip(value) {
+ // Fast path when value is nil, bool, string or json.Number.
+ return underlying.Write(op, path, value)
+ }
+
+ val := util.Reference(value)
+ if db.roundTripOnWrite {
+ if err := util.RoundTrip(val); err != nil {
+ return err
+ }
+ }
+
+ return underlying.Write(op, path, *val)
+}
+
+func (h *handle) Unregister(_ context.Context, txn storage.Transaction) {
+ underlying, err := h.db.underlying(txn)
+ if err != nil {
+ panic(err)
+ }
+ if !underlying.write {
+ panic(&storage.Error{
+ Code: storage.InvalidTransactionErr,
+ Message: "triggers must be unregistered with a write transaction",
+ })
+ }
+ delete(h.db.triggers, h)
+}
+
+func (db *store) runOnCommitTriggers(ctx context.Context, txn storage.Transaction, event storage.TriggerEvent) {
+ if db.returnASTValuesOnRead && len(db.triggers) > 0 {
+ // FIXME: Not very performant for large data.
+
+ dataEvents := make([]storage.DataEvent, 0, len(event.Data))
+
+ for _, dataEvent := range event.Data {
+ if astData, ok := dataEvent.Data.(ast.Value); ok {
+ jsn, err := ast.ValueToInterface(astData, illegalResolver{})
+ if err != nil {
+ panic(err)
+ }
+ dataEvents = append(dataEvents, storage.DataEvent{
+ Path: dataEvent.Path,
+ Data: jsn,
+ Removed: dataEvent.Removed,
+ })
+ } else {
+ dataEvents = append(dataEvents, dataEvent)
+ }
+ }
+
+ event = storage.TriggerEvent{
+ Policy: event.Policy,
+ Data: dataEvents,
+ Context: event.Context,
+ }
+ }
+
+ for _, t := range db.triggers {
+ t.OnCommit(ctx, txn, event)
+ }
+}
+
+type illegalResolver struct{}
+
+func (illegalResolver) Resolve(ref ast.Ref) (any, error) {
+ return nil, fmt.Errorf("illegal value: %v", ref)
+}
+
+func (db *store) underlying(txn storage.Transaction) (*transaction, error) {
+ underlying, ok := txn.(*transaction)
+ if !ok {
+ return nil, &storage.Error{
+ Code: storage.InvalidTransactionErr,
+ Message: fmt.Sprintf("unexpected transaction type %T", txn),
+ }
+ }
+ if underlying.db != db {
+ return nil, &storage.Error{
+ Code: storage.InvalidTransactionErr,
+ Message: "unknown transaction",
+ }
+ }
+ if underlying.stale {
+ return nil, &storage.Error{
+ Code: storage.InvalidTransactionErr,
+ Message: "stale transaction",
+ }
+ }
+ return underlying, nil
+}
+
+func mktree(path []string, value any) (map[string]any, error) {
+ if len(path) == 0 {
+ // For 0 length path the value is the full tree.
+ obj, ok := value.(map[string]any)
+ if !ok {
+ return nil, errors.RootMustBeObjectErr
+ }
+ return obj, nil
+ }
+
+ dir := map[string]any{}
+ for i := len(path) - 1; i > 0; i-- {
+ dir[path[i]] = value
+ value = dir
+ dir = map[string]any{}
+ }
+ dir[path[0]] = value
+
+ return dir, nil
+}
+
+func lookup(path storage.Path, data map[string]any) (any, bool) {
+ if len(path) == 0 {
+ return data, true
+ }
+ for i := range len(path) - 1 {
+ value, ok := data[path[i]]
+ if !ok {
+ return nil, false
+ }
+ obj, ok := value.(map[string]any)
+ if !ok {
+ return nil, false
+ }
+ data = obj
+ }
+ value, ok := data[path[len(path)-1]]
+ return value, ok
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/opts.go b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/opts.go
new file mode 100644
index 0000000000..2239fc73a3
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/opts.go
@@ -0,0 +1,37 @@
+package inmem
+
+// An Opt modifies store at instantiation.
+type Opt func(*store)
+
+// OptRoundTripOnWrite sets whether incoming objects written to store are
+// round-tripped through JSON to ensure they are serializable to JSON.
+//
+// Callers should disable this if they can guarantee all objects passed to
+// Write() are serializable to JSON. Failing to do so may result in undefined
+// behavior, including panics.
+//
+// Usually, when only storing objects in the inmem store that have been read
+// via encoding/json, this is safe to disable, and comes with an improvement
+// in performance and memory use.
+//
+// If setting to false, callers should deep-copy any objects passed to Write()
+// unless they can guarantee the objects will not be mutated after being written,
+// and that mutations happening to the objects after they have been passed into
+// Write() don't affect their logic.
+func OptRoundTripOnWrite(enabled bool) Opt {
+ return func(s *store) {
+ s.roundTripOnWrite = enabled
+ }
+}
+
+// OptReturnASTValuesOnRead sets whether data values added to the store should be
+// eagerly converted to AST values, which are then returned on read.
+//
+// When enabled, this feature does not sanity check data before converting it to AST values,
+// which may result in panics if the data is not valid. Callers should ensure that passed data
+// can be serialized to AST values; otherwise, it's recommended to also enable OptRoundTripOnWrite.
+func OptReturnASTValuesOnRead(enabled bool) Opt {
+ return func(s *store) {
+ s.returnASTValuesOnRead = enabled
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/storage/inmem/txn.go b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/txn.go
similarity index 62%
rename from vendor/github.com/open-policy-agent/opa/storage/inmem/txn.go
rename to vendor/github.com/open-policy-agent/opa/v1/storage/inmem/txn.go
index d3252e8822..e76bccd013 100644
--- a/vendor/github.com/open-policy-agent/opa/storage/inmem/txn.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/txn.go
@@ -7,13 +7,14 @@ package inmem
import (
"container/list"
"encoding/json"
+ "slices"
"strconv"
- "github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/internal/deepcopy"
- "github.com/open-policy-agent/opa/storage"
- "github.com/open-policy-agent/opa/storage/internal/errors"
- "github.com/open-policy-agent/opa/storage/internal/ptr"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/storage"
+ "github.com/open-policy-agent/opa/v1/storage/internal/errors"
+ "github.com/open-policy-agent/opa/v1/storage/internal/ptr"
)
// transaction implements the low-level read/write operations on the in-memory
@@ -34,13 +35,13 @@ import (
// Read transactions do not require any special handling and simply passthrough
// to the underlying store. Read transactions do not support upgrade.
type transaction struct {
- xid uint64
- write bool
- stale bool
db *store
updates *list.List
- policies map[string]policyUpdate
context *storage.Context
+ policies map[string]policyUpdate
+ xid uint64
+ write bool
+ stale bool
}
type policyUpdate struct {
@@ -48,28 +49,17 @@ type policyUpdate struct {
remove bool
}
-func newTransaction(xid uint64, write bool, context *storage.Context, db *store) *transaction {
- return &transaction{
- xid: xid,
- write: write,
- db: db,
- policies: map[string]policyUpdate{},
- updates: list.New(),
- context: context,
- }
-}
-
func (txn *transaction) ID() uint64 {
return txn.xid
}
-func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value interface{}) error {
-
+func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value any) error {
if !txn.write {
- return &storage.Error{
- Code: storage.InvalidTransactionErr,
- Message: "data write during read transaction",
- }
+ return &storage.Error{Code: storage.InvalidTransactionErr, Message: "data write during read transaction"}
+ }
+
+ if txn.updates == nil {
+ txn.updates = list.New()
}
if len(path) == 0 {
@@ -85,9 +75,20 @@ func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value inter
if update.Path().Equal(path) {
if update.Remove() {
if op != storage.AddOp {
- return errors.NewNotFoundError(path)
+ return errors.NotFoundErr
}
}
+ // If the last update has the same path and value, we have nothing to do.
+ if txn.db.returnASTValuesOnRead {
+ if astValue, ok := update.Value().(ast.Value); ok {
+ if equalsValue(value, astValue) {
+ return nil
+ }
+ }
+ } else if comparableEquals(update.Value(), value) {
+ return nil
+ }
+
txn.updates.Remove(curr)
break
}
@@ -106,7 +107,7 @@ func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value inter
// existing update is mutated.
if path.HasPrefix(update.Path()) {
if update.Remove() {
- return errors.NewNotFoundError(path)
+ return errors.NotFoundErr
}
suffix := path[len(update.Path()):]
newUpdate, err := txn.db.newUpdate(update.Value(), op, suffix, 0, value)
@@ -129,33 +130,53 @@ func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value inter
return nil
}
-func (txn *transaction) updateRoot(op storage.PatchOp, value interface{}) error {
+func comparableEquals(a, b any) bool {
+ switch a := a.(type) {
+ case nil:
+ return b == nil
+ case bool:
+ if vb, ok := b.(bool); ok {
+ return vb == a
+ }
+ case string:
+ if vs, ok := b.(string); ok {
+ return vs == a
+ }
+ case json.Number:
+ if vn, ok := b.(json.Number); ok {
+ return vn == a
+ }
+ }
+ return false
+}
+
+func (txn *transaction) updateRoot(op storage.PatchOp, value any) error {
if op == storage.RemoveOp {
- return invalidPatchError(rootCannotBeRemovedMsg)
+ return errors.RootCannotBeRemovedErr
}
var update any
if txn.db.returnASTValuesOnRead {
- valueAST, err := interfaceToValue(value)
+ valueAST, err := ast.InterfaceToValue(value)
if err != nil {
return err
}
if _, ok := valueAST.(ast.Object); !ok {
- return invalidPatchError(rootMustBeObjectMsg)
+ return errors.RootMustBeObjectErr
}
update = &updateAST{
- path: storage.Path{},
+ path: storage.RootPath,
remove: false,
value: valueAST,
}
} else {
- if _, ok := value.(map[string]interface{}); !ok {
- return invalidPatchError(rootMustBeObjectMsg)
+ if _, ok := value.(map[string]any); !ok {
+ return errors.RootMustBeObjectErr
}
update = &updateRaw{
- path: storage.Path{},
+ path: storage.RootPath,
remove: false,
value: value,
}
@@ -163,21 +184,36 @@ func (txn *transaction) updateRoot(op storage.PatchOp, value interface{}) error
txn.updates.Init()
txn.updates.PushFront(update)
+
return nil
}
func (txn *transaction) Commit() (result storage.TriggerEvent) {
result.Context = txn.context
- for curr := txn.updates.Front(); curr != nil; curr = curr.Next() {
- action := curr.Value.(dataUpdate)
- txn.db.data = action.Apply(txn.db.data)
- result.Data = append(result.Data, storage.DataEvent{
- Path: action.Path(),
- Data: action.Value(),
- Removed: action.Remove(),
- })
+ if txn.updates != nil {
+ if len(txn.db.triggers) > 0 {
+ result.Data = slices.Grow(result.Data, txn.updates.Len())
+ }
+
+ for curr := txn.updates.Front(); curr != nil; curr = curr.Next() {
+ action := curr.Value.(dataUpdate)
+ txn.db.data = action.Apply(txn.db.data)
+
+ if len(txn.db.triggers) > 0 {
+ result.Data = append(result.Data, storage.DataEvent{
+ Path: action.Path(),
+ Data: action.Value(),
+ Removed: action.Remove(),
+ })
+ }
+ }
+ }
+
+ if len(txn.policies) > 0 && len(txn.db.triggers) > 0 {
+ result.Policy = slices.Grow(result.Policy, len(txn.policies))
}
+
for id, upd := range txn.policies {
if upd.remove {
delete(txn.db.policies, id)
@@ -185,23 +221,25 @@ func (txn *transaction) Commit() (result storage.TriggerEvent) {
txn.db.policies[id] = upd.value
}
- result.Policy = append(result.Policy, storage.PolicyEvent{
- ID: id,
- Data: upd.value,
- Removed: upd.remove,
- })
+ if len(txn.db.triggers) > 0 {
+ result.Policy = append(result.Policy, storage.PolicyEvent{
+ ID: id,
+ Data: upd.value,
+ Removed: upd.remove,
+ })
+ }
}
return result
}
-func pointer(v interface{}, path storage.Path) (interface{}, error) {
+func pointer(v any, path storage.Path) (any, error) {
if v, ok := v.(ast.Value); ok {
return ptr.ValuePtr(v, path)
}
return ptr.Ptr(v, path)
}
-func deepcpy(v interface{}) interface{} {
+func deepcpy(v any) any {
if v, ok := v.(ast.Value); ok {
var cpy ast.Value
@@ -217,9 +255,8 @@ func deepcpy(v interface{}) interface{} {
return deepcopy.DeepCopy(v)
}
-func (txn *transaction) Read(path storage.Path) (interface{}, error) {
-
- if !txn.write {
+func (txn *transaction) Read(path storage.Path) (any, error) {
+ if !txn.write || txn.updates == nil {
return pointer(txn.db.data, path)
}
@@ -231,7 +268,7 @@ func (txn *transaction) Read(path storage.Path) (interface{}, error) {
if path.HasPrefix(upd.Path()) {
if upd.Remove() {
- return nil, errors.NewNotFoundError(path)
+ return nil, errors.NotFoundErr
}
return pointer(upd.Value(), path[len(upd.Path()):])
}
@@ -260,8 +297,7 @@ func (txn *transaction) Read(path storage.Path) (interface{}, error) {
return cpy, nil
}
-func (txn *transaction) ListPolicies() []string {
- var ids []string
+func (txn *transaction) ListPolicies() (ids []string) {
for id := range txn.db.policies {
if _, ok := txn.policies[id]; !ok {
ids = append(ids, id)
@@ -276,11 +312,13 @@ func (txn *transaction) ListPolicies() []string {
}
func (txn *transaction) GetPolicy(id string) ([]byte, error) {
- if update, ok := txn.policies[id]; ok {
- if !update.remove {
- return update.value, nil
+ if txn.policies != nil {
+ if update, ok := txn.policies[id]; ok {
+ if !update.remove {
+ return update.value, nil
+ }
+ return nil, errors.NewNotFoundErrorf("policy id %q", id)
}
- return nil, errors.NewNotFoundErrorf("policy id %q", id)
}
if exist, ok := txn.db.policies[id]; ok {
return exist, nil
@@ -289,34 +327,34 @@ func (txn *transaction) GetPolicy(id string) ([]byte, error) {
}
func (txn *transaction) UpsertPolicy(id string, bs []byte) error {
- if !txn.write {
- return &storage.Error{
- Code: storage.InvalidTransactionErr,
- Message: "policy write during read transaction",
- }
- }
- txn.policies[id] = policyUpdate{bs, false}
- return nil
+ return txn.updatePolicy(id, policyUpdate{bs, false})
}
func (txn *transaction) DeletePolicy(id string) error {
+ return txn.updatePolicy(id, policyUpdate{nil, true})
+}
+
+func (txn *transaction) updatePolicy(id string, update policyUpdate) error {
if !txn.write {
- return &storage.Error{
- Code: storage.InvalidTransactionErr,
- Message: "policy write during read transaction",
- }
+ return &storage.Error{Code: storage.InvalidTransactionErr, Message: "policy write during read transaction"}
+ }
+
+ if txn.policies == nil {
+ txn.policies = map[string]policyUpdate{id: update}
+ } else {
+ txn.policies[id] = update
}
- txn.policies[id] = policyUpdate{nil, true}
+
return nil
}
type dataUpdate interface {
Path() storage.Path
Remove() bool
- Apply(interface{}) interface{}
+ Apply(any) any
Relative(path storage.Path) dataUpdate
- Set(interface{})
- Value() interface{}
+ Set(any)
+ Value() any
}
// update contains state associated with an update to be applied to the
@@ -324,16 +362,36 @@ type dataUpdate interface {
type updateRaw struct {
path storage.Path // data path modified by update
remove bool // indicates whether update removes the value at path
- value interface{} // value to add/replace at path (ignored if remove is true)
+ value any // value to add/replace at path (ignored if remove is true)
+}
+
+func equalsValue(a any, v ast.Value) bool {
+ if a, ok := a.(ast.Value); ok {
+ return a.Compare(v) == 0
+ }
+ switch a := a.(type) {
+ case nil:
+ return v == ast.NullValue
+ case bool:
+ if vb, ok := v.(ast.Boolean); ok {
+ return bool(vb) == a
+ }
+ case string:
+ if vs, ok := v.(ast.String); ok {
+ return string(vs) == a
+ }
+ }
+
+ return false
}
-func (db *store) newUpdate(data interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (dataUpdate, error) {
+func (db *store) newUpdate(data any, op storage.PatchOp, path storage.Path, idx int, value any) (dataUpdate, error) {
if db.returnASTValuesOnRead {
- astData, err := interfaceToValue(data)
+ astData, err := ast.InterfaceToValue(data)
if err != nil {
return nil, err
}
- astValue, err := interfaceToValue(value)
+ astValue, err := ast.InterfaceToValue(value)
if err != nil {
return nil, err
}
@@ -342,18 +400,17 @@ func (db *store) newUpdate(data interface{}, op storage.PatchOp, path storage.Pa
return newUpdateRaw(data, op, path, idx, value)
}
-func newUpdateRaw(data interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (dataUpdate, error) {
-
+func newUpdateRaw(data any, op storage.PatchOp, path storage.Path, idx int, value any) (dataUpdate, error) {
switch data.(type) {
case nil, bool, json.Number, string:
- return nil, errors.NewNotFoundError(path)
+ return nil, errors.NotFoundErr
}
switch data := data.(type) {
- case map[string]interface{}:
+ case map[string]any:
return newUpdateObject(data, op, path, idx, value)
- case []interface{}:
+ case []any:
return newUpdateArray(data, op, path, idx, value)
}
@@ -363,14 +420,13 @@ func newUpdateRaw(data interface{}, op storage.PatchOp, path storage.Path, idx i
}
}
-func newUpdateArray(data []interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (dataUpdate, error) {
-
+func newUpdateArray(data []any, op storage.PatchOp, path storage.Path, idx int, value any) (dataUpdate, error) {
if idx == len(path)-1 {
if path[idx] == "-" || path[idx] == strconv.Itoa(len(data)) {
if op != storage.AddOp {
- return nil, invalidPatchError("%v: invalid patch path", path)
+ return nil, errors.NewInvalidPatchError("%v: invalid patch path", path)
}
- cpy := make([]interface{}, len(data)+1)
+ cpy := make([]any, len(data)+1)
copy(cpy, data)
cpy[len(data)] = value
return &updateRaw{path[:len(path)-1], false, cpy}, nil
@@ -383,20 +439,20 @@ func newUpdateArray(data []interface{}, op storage.PatchOp, path storage.Path, i
switch op {
case storage.AddOp:
- cpy := make([]interface{}, len(data)+1)
+ cpy := make([]any, len(data)+1)
copy(cpy[:pos], data[:pos])
copy(cpy[pos+1:], data[pos:])
cpy[pos] = value
return &updateRaw{path[:len(path)-1], false, cpy}, nil
case storage.RemoveOp:
- cpy := make([]interface{}, len(data)-1)
+ cpy := make([]any, len(data)-1)
copy(cpy[:pos], data[:pos])
copy(cpy[pos:], data[pos+1:])
return &updateRaw{path[:len(path)-1], false, cpy}, nil
default:
- cpy := make([]interface{}, len(data))
+ cpy := make([]any, len(data))
copy(cpy, data)
cpy[pos] = value
return &updateRaw{path[:len(path)-1], false, cpy}, nil
@@ -411,13 +467,13 @@ func newUpdateArray(data []interface{}, op storage.PatchOp, path storage.Path, i
return newUpdateRaw(data[pos], op, path, idx+1, value)
}
-func newUpdateObject(data map[string]interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (dataUpdate, error) {
+func newUpdateObject(data map[string]any, op storage.PatchOp, path storage.Path, idx int, value any) (dataUpdate, error) {
if idx == len(path)-1 {
switch op {
case storage.ReplaceOp, storage.RemoveOp:
if _, ok := data[path[idx]]; !ok {
- return nil, errors.NewNotFoundError(path)
+ return nil, errors.NotFoundErr
}
}
return &updateRaw{path, op == storage.RemoveOp, value}, nil
@@ -427,7 +483,7 @@ func newUpdateObject(data map[string]interface{}, op storage.PatchOp, path stora
return newUpdateRaw(data, op, path, idx+1, value)
}
- return nil, errors.NewNotFoundError(path)
+ return nil, errors.NotFoundErr
}
func (u *updateRaw) Remove() bool {
@@ -438,7 +494,7 @@ func (u *updateRaw) Path() storage.Path {
return u.path
}
-func (u *updateRaw) Apply(data interface{}) interface{} {
+func (u *updateRaw) Apply(data any) any {
if len(u.path) == 0 {
return u.value
}
@@ -448,17 +504,17 @@ func (u *updateRaw) Apply(data interface{}) interface{} {
}
key := u.path[len(u.path)-1]
if u.remove {
- obj := parent.(map[string]interface{})
+ obj := parent.(map[string]any)
delete(obj, key)
return data
}
switch parent := parent.(type) {
- case map[string]interface{}:
+ case map[string]any:
if parent == nil {
- parent = make(map[string]interface{}, 1)
+ parent = make(map[string]any, 1)
}
parent[key] = u.value
- case []interface{}:
+ case []any:
idx, err := strconv.Atoi(key)
if err != nil {
panic(err)
@@ -468,11 +524,11 @@ func (u *updateRaw) Apply(data interface{}) interface{} {
return data
}
-func (u *updateRaw) Set(v interface{}) {
+func (u *updateRaw) Set(v any) {
u.value = v
}
-func (u *updateRaw) Value() interface{} {
+func (u *updateRaw) Value() any {
return u.value
}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/interface.go b/vendor/github.com/open-policy-agent/opa/v1/storage/interface.go
new file mode 100644
index 0000000000..a783caae09
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/storage/interface.go
@@ -0,0 +1,252 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package storage
+
+import (
+ "context"
+
+ "github.com/open-policy-agent/opa/v1/metrics"
+)
+
+// Transaction defines the interface that identifies a consistent snapshot over
+// the policy engine's storage layer.
+type Transaction interface {
+ ID() uint64
+}
+
+// Store defines the interface for the storage layer's backend.
+type Store interface {
+ Trigger
+ Policy
+
+ // NewTransaction is called create a new transaction in the store.
+ NewTransaction(context.Context, ...TransactionParams) (Transaction, error)
+
+ // Read is called to fetch a document referred to by path.
+ Read(context.Context, Transaction, Path) (any, error)
+
+ // Write is called to modify a document referred to by path.
+ Write(context.Context, Transaction, PatchOp, Path, any) error
+
+ // Commit is called to finish the transaction. If Commit returns an error, the
+ // transaction must be automatically aborted by the Store implementation.
+ Commit(context.Context, Transaction) error
+
+ // Truncate is called to make a copy of the underlying store, write documents in the new store
+ // by creating multiple transactions in the new store as needed and finally swapping
+ // over to the new storage instance. This method must be called within a transaction on the original store.
+ Truncate(context.Context, Transaction, TransactionParams, Iterator) error
+
+ // Abort is called to cancel the transaction.
+ Abort(context.Context, Transaction)
+}
+
+// MakeDirer defines the interface a Store could realize to override the
+// generic MakeDir functionality in storage.MakeDir
+type MakeDirer interface {
+ MakeDir(context.Context, Transaction, Path) error
+}
+
+// NonEmptyer allows a store implemention to override NonEmpty())
+type NonEmptyer interface {
+ NonEmpty(context.Context, Transaction) func([]string) (bool, error)
+}
+
+// TransactionParams describes a new transaction.
+type TransactionParams struct {
+
+ // BasePaths indicates the top-level paths where write operations will be performed in this transaction.
+ BasePaths []string
+
+ // RootOverwrite is deprecated. Use BasePaths instead.
+ RootOverwrite bool
+
+ // Write indicates if this transaction will perform any write operations.
+ Write bool
+
+ // Context contains key/value pairs passed to triggers.
+ Context *Context
+}
+
+// Context is a simple container for key/value pairs.
+type Context struct {
+ values map[any]any
+}
+
+// NewContext returns a new context object.
+func NewContext() *Context {
+ return &Context{
+ values: map[any]any{},
+ }
+}
+
+// Get returns the key value in the context.
+func (ctx *Context) Get(key any) any {
+ if ctx == nil {
+ return nil
+ }
+ return ctx.values[key]
+}
+
+// Put adds a key/value pair to the context.
+func (ctx *Context) Put(key, value any) {
+ ctx.values[key] = value
+}
+
+var metricsKey = struct{}{}
+
+// WithMetrics allows passing metrics via the Context.
+// It puts the metrics object in the ctx, and returns the same
+// ctx (not a copy) for convenience.
+func (ctx *Context) WithMetrics(m metrics.Metrics) *Context {
+ ctx.values[metricsKey] = m
+ return ctx
+}
+
+// Metrics() allows using a Context's metrics. Returns nil if metrics
+// were not attached to the Context.
+func (ctx *Context) Metrics() metrics.Metrics {
+ if m, ok := ctx.values[metricsKey]; ok {
+ if met, ok := m.(metrics.Metrics); ok {
+ return met
+ }
+ }
+ return nil
+}
+
+// WriteParams specifies the TransactionParams for a write transaction.
+var WriteParams = TransactionParams{
+ Write: true,
+}
+
+// PatchOp is the enumeration of supposed modifications.
+type PatchOp int
+
+// Patch supports add, remove, and replace operations.
+const (
+ AddOp PatchOp = iota
+ RemoveOp = iota
+ ReplaceOp = iota
+)
+
+// WritesNotSupported provides a default implementation of the write
+// interface which may be used if the backend does not support writes.
+type WritesNotSupported struct{}
+
+func (WritesNotSupported) Write(context.Context, Transaction, PatchOp, Path, any) error {
+ return writesNotSupportedError()
+}
+
+// Policy defines the interface for policy module storage.
+type Policy interface {
+ ListPolicies(context.Context, Transaction) ([]string, error)
+ GetPolicy(context.Context, Transaction, string) ([]byte, error)
+ UpsertPolicy(context.Context, Transaction, string, []byte) error
+ DeletePolicy(context.Context, Transaction, string) error
+}
+
+// PolicyNotSupported provides a default implementation of the policy interface
+// which may be used if the backend does not support policy storage.
+type PolicyNotSupported struct{}
+
+// ListPolicies always returns a PolicyNotSupportedErr.
+func (PolicyNotSupported) ListPolicies(context.Context, Transaction) ([]string, error) {
+ return nil, policyNotSupportedError()
+}
+
+// GetPolicy always returns a PolicyNotSupportedErr.
+func (PolicyNotSupported) GetPolicy(context.Context, Transaction, string) ([]byte, error) {
+ return nil, policyNotSupportedError()
+}
+
+// UpsertPolicy always returns a PolicyNotSupportedErr.
+func (PolicyNotSupported) UpsertPolicy(context.Context, Transaction, string, []byte) error {
+ return policyNotSupportedError()
+}
+
+// DeletePolicy always returns a PolicyNotSupportedErr.
+func (PolicyNotSupported) DeletePolicy(context.Context, Transaction, string) error {
+ return policyNotSupportedError()
+}
+
+// PolicyEvent describes a change to a policy.
+type PolicyEvent struct {
+ ID string
+ Data []byte
+ Removed bool
+}
+
+// DataEvent describes a change to a base data document.
+type DataEvent struct {
+ Path Path
+ Data any
+ Removed bool
+}
+
+// TriggerEvent describes the changes that caused the trigger to be invoked.
+type TriggerEvent struct {
+ Policy []PolicyEvent
+ Data []DataEvent
+ Context *Context
+}
+
+// IsZero returns true if the TriggerEvent indicates no changes occurred. This
+// function is primarily for test purposes.
+func (e TriggerEvent) IsZero() bool {
+ return !e.PolicyChanged() && !e.DataChanged()
+}
+
+// PolicyChanged returns true if the trigger was caused by a policy change.
+func (e TriggerEvent) PolicyChanged() bool {
+ return len(e.Policy) > 0
+}
+
+// DataChanged returns true if the trigger was caused by a data change.
+func (e TriggerEvent) DataChanged() bool {
+ return len(e.Data) > 0
+}
+
+// TriggerConfig contains the trigger registration configuration.
+type TriggerConfig struct {
+
+ // OnCommit is invoked when a transaction is successfully committed. The
+ // callback is invoked with a handle to the write transaction that
+ // successfully committed before other clients see the changes.
+ OnCommit func(context.Context, Transaction, TriggerEvent)
+}
+
+// Trigger defines the interface that stores implement to register for change
+// notifications when the store is changed.
+type Trigger interface {
+ Register(context.Context, Transaction, TriggerConfig) (TriggerHandle, error)
+}
+
+// TriggersNotSupported provides default implementations of the Trigger
+// interface which may be used if the backend does not support triggers.
+type TriggersNotSupported struct{}
+
+// Register always returns an error indicating triggers are not supported.
+func (TriggersNotSupported) Register(context.Context, Transaction, TriggerConfig) (TriggerHandle, error) {
+ return nil, triggersNotSupportedError()
+}
+
+// TriggerHandle defines the interface that can be used to unregister triggers that have
+// been registered on a Store.
+type TriggerHandle interface {
+ Unregister(context.Context, Transaction)
+}
+
+// Iterator defines the interface that can be used to read files from a directory starting with
+// files at the base of the directory, then sub-directories etc.
+type Iterator interface {
+ Next() (*Update, error)
+}
+
+// Update contains information about a file
+type Update struct {
+ Path Path
+ Value []byte
+ IsPolicy bool
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/internal/errors/errors.go b/vendor/github.com/open-policy-agent/opa/v1/storage/internal/errors/errors.go
new file mode 100644
index 0000000000..a478b9f257
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/storage/internal/errors/errors.go
@@ -0,0 +1,54 @@
+// Copyright 2021 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package errors contains reusable error-related code for the storage layer.
+package errors
+
+import (
+ "fmt"
+
+ "github.com/open-policy-agent/opa/v1/storage"
+)
+
+const (
+ ArrayIndexTypeMsg = "array index must be integer"
+ DoesNotExistMsg = "document does not exist"
+ OutOfRangeMsg = "array index out of range"
+ RootMustBeObjectMsg = "root must be object"
+ RootCannotBeRemovedMsg = "root cannot be removed"
+)
+
+var (
+ NotFoundErr = &storage.Error{Code: storage.NotFoundErr, Message: DoesNotExistMsg}
+ RootMustBeObjectErr = &storage.Error{Code: storage.InvalidPatchErr, Message: RootMustBeObjectMsg}
+ RootCannotBeRemovedErr = &storage.Error{Code: storage.InvalidPatchErr, Message: RootCannotBeRemovedMsg}
+)
+
+func NewNotFoundErrorWithHint(path storage.Path, hint string) *storage.Error {
+ return &storage.Error{
+ Code: storage.NotFoundErr,
+ Message: path.String() + ": " + hint,
+ }
+}
+
+func NewNotFoundErrorf(f string, a ...any) *storage.Error {
+ return &storage.Error{
+ Code: storage.NotFoundErr,
+ Message: fmt.Sprintf(f, a...),
+ }
+}
+
+func NewWriteConflictError(p storage.Path) *storage.Error {
+ return &storage.Error{
+ Code: storage.WriteConflictErr,
+ Message: p.String(),
+ }
+}
+
+func NewInvalidPatchError(f string, a ...any) *storage.Error {
+ return &storage.Error{
+ Code: storage.InvalidPatchErr,
+ Message: fmt.Sprintf(f, a...),
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/storage/internal/ptr/ptr.go b/vendor/github.com/open-policy-agent/opa/v1/storage/internal/ptr/ptr.go
similarity index 54%
rename from vendor/github.com/open-policy-agent/opa/storage/internal/ptr/ptr.go
rename to vendor/github.com/open-policy-agent/opa/v1/storage/internal/ptr/ptr.go
index 14adbd682e..bef39ebf49 100644
--- a/vendor/github.com/open-policy-agent/opa/storage/internal/ptr/ptr.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/storage/internal/ptr/ptr.go
@@ -8,29 +8,29 @@ package ptr
import (
"strconv"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/storage"
- "github.com/open-policy-agent/opa/storage/internal/errors"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/storage"
+ "github.com/open-policy-agent/opa/v1/storage/internal/errors"
)
-func Ptr(data interface{}, path storage.Path) (interface{}, error) {
+func Ptr(data any, path storage.Path) (any, error) {
node := data
for i := range path {
key := path[i]
switch curr := node.(type) {
- case map[string]interface{}:
+ case map[string]any:
var ok bool
if node, ok = curr[key]; !ok {
- return nil, errors.NewNotFoundError(path)
+ return nil, errors.NotFoundErr
}
- case []interface{}:
+ case []any:
pos, err := ValidateArrayIndex(curr, key, path)
if err != nil {
return nil, err
}
node = curr[pos]
default:
- return nil, errors.NewNotFoundError(path)
+ return nil, errors.NotFoundErr
}
}
@@ -38,17 +38,45 @@ func Ptr(data interface{}, path storage.Path) (interface{}, error) {
}
func ValuePtr(data ast.Value, path storage.Path) (ast.Value, error) {
+ var keyTerm *ast.Term
+
+ defer func() {
+ if keyTerm != nil {
+ ast.TermPtrPool.Put(keyTerm)
+ }
+ }()
+
node := data
for i := range path {
key := path[i]
switch curr := node.(type) {
case ast.Object:
- keyTerm := ast.StringTerm(key)
- val := curr.Get(keyTerm)
- if val == nil {
- return nil, errors.NewNotFoundError(path)
+ // Note(anders):
+ // This term is only created for the lookup, which is not great — especially
+ // considering the path likely was converted from a ref, where we had all
+ // the terms available already! Without chaging the storage API, our options
+ // for performant lookups are limitied to using interning or a pool. Prefer
+ // interning when possible, as that is zero alloc. Using the pool avoids at
+ // least allocating a new term for every lookup, but still requires an alloc
+ // for the string Value.
+ if ast.HasInternedValue(key) {
+ if val := curr.Get(ast.InternedTerm(key)); val != nil {
+ node = val.Value
+ } else {
+ return nil, errors.NotFoundErr
+ }
+ } else {
+ if keyTerm == nil {
+ keyTerm = ast.TermPtrPool.Get()
+ }
+ // 1 alloc
+ keyTerm.Value = ast.String(key)
+ if val := curr.Get(keyTerm); val != nil {
+ node = val.Value
+ } else {
+ return nil, errors.NotFoundErr
+ }
}
- node = val.Value
case *ast.Array:
pos, err := ValidateASTArrayIndex(curr, key, path)
if err != nil {
@@ -56,14 +84,14 @@ func ValuePtr(data ast.Value, path storage.Path) (ast.Value, error) {
}
node = curr.Elem(pos).Value
default:
- return nil, errors.NewNotFoundError(path)
+ return nil, errors.NotFoundErr
}
}
return node, nil
}
-func ValidateArrayIndex(arr []interface{}, s string, path storage.Path) (int, error) {
+func ValidateArrayIndex(arr []any, s string, path storage.Path) (int, error) {
idx, ok := isInt(s)
if !ok {
return 0, errors.NewNotFoundErrorWithHint(path, errors.ArrayIndexTypeMsg)
@@ -82,7 +110,7 @@ func ValidateASTArrayIndex(arr *ast.Array, s string, path storage.Path) (int, er
// ValidateArrayIndexForWrite also checks that `s` is a valid way to address an
// array element like `ValidateArrayIndex`, but returns a `resource_conflict` error
// if it is not.
-func ValidateArrayIndexForWrite(arr []interface{}, s string, i int, path storage.Path) (int, error) {
+func ValidateArrayIndexForWrite(arr []any, s string, i int, path storage.Path) (int, error) {
idx, ok := isInt(s)
if !ok {
return 0, errors.NewWriteConflictError(path[:i-1])
@@ -95,12 +123,12 @@ func isInt(s string) (int, bool) {
return idx, err == nil
}
-func inRange(i int, arr interface{}, path storage.Path) (int, error) {
+func inRange(i int, arr any, path storage.Path) (int, error) {
var arrLen int
switch v := arr.(type) {
- case []interface{}:
+ case []any:
arrLen = len(v)
case *ast.Array:
arrLen = v.Len()
diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/path.go b/vendor/github.com/open-policy-agent/opa/v1/storage/path.go
new file mode 100644
index 0000000000..16bb3e42c5
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/storage/path.go
@@ -0,0 +1,142 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package storage
+
+import (
+ "errors"
+ "fmt"
+ "net/url"
+ "slices"
+ "strconv"
+ "strings"
+
+ "github.com/open-policy-agent/opa/v1/ast"
+)
+
+// RootPath refers to the root document in storage.
+var RootPath = Path{}
+
+// Path refers to a document in storage.
+type Path []string
+
+// ParsePath returns a new path for the given str.
+func ParsePath(str string) (path Path, ok bool) {
+ if len(str) == 0 || str[0] != '/' {
+ return nil, false
+ }
+ if len(str) == 1 {
+ return Path{}, true
+ }
+
+ return strings.Split(str[1:], "/"), true
+}
+
+// ParsePathEscaped returns a new path for the given escaped str.
+func ParsePathEscaped(str string) (path Path, ok bool) {
+ if path, ok = ParsePath(str); ok {
+ for i := range path {
+ if segment, err := url.PathUnescape(path[i]); err == nil {
+ path[i] = segment
+ } else {
+ return nil, false
+ }
+ }
+ }
+ return
+}
+
+// NewPathForRef returns a new path for the given ref.
+func NewPathForRef(ref ast.Ref) (path Path, err error) {
+ if len(ref) == 0 {
+ return nil, errors.New("empty reference (indicates error in caller)")
+ }
+
+ if len(ref) == 1 {
+ return Path{}, nil
+ }
+
+ path = make(Path, 0, len(ref)-1)
+
+ for _, term := range ref[1:] {
+ switch v := term.Value.(type) {
+ case ast.String:
+ path = append(path, string(v))
+ case ast.Number:
+ path = append(path, v.String())
+ case ast.Boolean, ast.Null:
+ return nil, &Error{
+ Code: NotFoundErr,
+ Message: fmt.Sprintf("%v: does not exist", ref),
+ }
+ case *ast.Array, ast.Object, ast.Set:
+ return nil, fmt.Errorf("composites cannot be base document keys: %v", ref)
+ default:
+ return nil, fmt.Errorf("unresolved reference (indicates error in caller): %v", ref)
+ }
+ }
+
+ return path, nil
+}
+
+// Compare performs lexigraphical comparison on p and other and returns -1 if p
+// is less than other, 0 if p is equal to other, or 1 if p is greater than
+// other.
+func (p Path) Compare(other Path) (cmp int) {
+ return slices.Compare(p, other)
+}
+
+// Equal returns true if p is the same as other.
+func (p Path) Equal(other Path) bool {
+ return slices.Equal(p, other)
+}
+
+// HasPrefix returns true if p starts with other.
+func (p Path) HasPrefix(other Path) bool {
+ return len(other) <= len(p) && p[:len(other)].Equal(other)
+}
+
+// Ref returns a ref that represents p rooted at head.
+func (p Path) Ref(head *ast.Term) (ref ast.Ref) {
+ ref = make(ast.Ref, len(p)+1)
+ ref[0] = head
+ for i := range p {
+ idx, err := strconv.ParseInt(p[i], 10, 64)
+ if err == nil {
+ ref[i+1] = ast.UIntNumberTerm(uint64(idx))
+ } else {
+ ref[i+1] = ast.StringTerm(p[i])
+ }
+ }
+ return ref
+}
+
+func (p Path) String() string {
+ if len(p) == 0 {
+ return "/"
+ }
+
+ l := 0
+ for i := range p {
+ l += len(p[i]) + 1
+ }
+
+ sb := strings.Builder{}
+ sb.Grow(l)
+ for i := range p {
+ sb.WriteByte('/')
+ sb.WriteString(url.PathEscape(p[i]))
+ }
+ return sb.String()
+}
+
+// MustParsePath returns a new Path for s. If s cannot be parsed, this function
+// will panic. This is mostly for test purposes.
+func MustParsePath(s string) Path {
+ path, ok := ParsePath(s)
+ if !ok {
+ panic(s)
+ }
+ return path
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/storage.go b/vendor/github.com/open-policy-agent/opa/v1/storage/storage.go
new file mode 100644
index 0000000000..38d51be405
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/storage/storage.go
@@ -0,0 +1,139 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package storage
+
+import (
+ "context"
+
+ "github.com/open-policy-agent/opa/v1/ast"
+)
+
+// NewTransactionOrDie is a helper function to create a new transaction. If the
+// storage layer cannot create a new transaction, this function will panic. This
+// function should only be used for tests.
+func NewTransactionOrDie(ctx context.Context, store Store, params ...TransactionParams) Transaction {
+ txn, err := store.NewTransaction(ctx, params...)
+ if err != nil {
+ panic(err)
+ }
+ return txn
+}
+
+// ReadOne is a convenience function to read a single value from the provided Store. It
+// will create a new Transaction to perform the read with, and clean up after itself
+// should an error occur.
+func ReadOne(ctx context.Context, store Store, path Path) (any, error) {
+ txn, err := store.NewTransaction(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer store.Abort(ctx, txn)
+
+ return store.Read(ctx, txn, path)
+}
+
+// WriteOne is a convenience function to write a single value to the provided Store. It
+// will create a new Transaction to perform the write with, and clean up after itself
+// should an error occur.
+func WriteOne(ctx context.Context, store Store, op PatchOp, path Path, value any) error {
+ txn, err := store.NewTransaction(ctx, WriteParams)
+ if err != nil {
+ return err
+ }
+
+ if err := store.Write(ctx, txn, op, path, value); err != nil {
+ store.Abort(ctx, txn)
+ return err
+ }
+
+ return store.Commit(ctx, txn)
+}
+
+// MakeDir inserts an empty object at path. If the parent path does not exist,
+// MakeDir will create it recursively.
+func MakeDir(ctx context.Context, store Store, txn Transaction, path Path) error {
+
+ // Allow the Store implementation to deal with this in its own way.
+ if md, ok := store.(MakeDirer); ok {
+ return md.MakeDir(ctx, txn, path)
+ }
+
+ if len(path) == 0 {
+ return nil
+ }
+
+ node, err := store.Read(ctx, txn, path)
+ if err != nil {
+ if !IsNotFound(err) {
+ return err
+ }
+
+ if err := MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil {
+ return err
+ }
+
+ return store.Write(ctx, txn, AddOp, path, map[string]any{})
+ }
+
+ if _, ok := node.(map[string]any); ok {
+ return nil
+ }
+
+ if _, ok := node.(ast.Object); ok {
+ return nil
+ }
+
+ return writeConflictError(path)
+}
+
+// Txn is a convenience function that executes f inside a new transaction
+// opened on the store. If the function returns an error, the transaction is
+// aborted and the error is returned. Otherwise, the transaction is committed
+// and the result of the commit is returned.
+func Txn(ctx context.Context, store Store, params TransactionParams, f func(Transaction) error) error {
+
+ txn, err := store.NewTransaction(ctx, params)
+ if err != nil {
+ return err
+ }
+
+ if err := f(txn); err != nil {
+ store.Abort(ctx, txn)
+ return err
+ }
+
+ return store.Commit(ctx, txn)
+}
+
+// NonEmpty returns a function that tests if a path is non-empty. A
+// path is non-empty if a Read on the path returns a value or a Read
+// on any of the path prefixes returns a non-object value.
+func NonEmpty(ctx context.Context, store Store, txn Transaction) func([]string) (bool, error) {
+ if md, ok := store.(NonEmptyer); ok {
+ return md.NonEmpty(ctx, txn)
+ }
+ return func(path []string) (bool, error) {
+ if _, err := store.Read(ctx, txn, Path(path)); err == nil {
+ return true, nil
+ } else if !IsNotFound(err) {
+ return false, err
+ }
+ for i := len(path) - 1; i > 0; i-- {
+ val, err := store.Read(ctx, txn, Path(path[:i]))
+ if err != nil && !IsNotFound(err) {
+ return false, err
+ } else if err == nil {
+ if _, ok := val.(map[string]any); ok {
+ return false, nil
+ }
+ if _, ok := val.(ast.Object); ok {
+ return false, nil
+ }
+ return true, nil
+ }
+ }
+ return false, nil
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/aggregates.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/aggregates.go
similarity index 76%
rename from vendor/github.com/open-policy-agent/opa/topdown/aggregates.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/aggregates.go
index a0f67a7c95..03d07668d8 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/aggregates.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/aggregates.go
@@ -7,20 +7,20 @@ package topdown
import (
"math/big"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
func builtinCount(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
switch a := operands[0].Value.(type) {
case *ast.Array:
- return iter(ast.IntNumberTerm(a.Len()))
+ return iter(ast.InternedTerm(a.Len()))
case ast.Object:
- return iter(ast.IntNumberTerm(a.Len()))
+ return iter(ast.InternedTerm(a.Len()))
case ast.Set:
- return iter(ast.IntNumberTerm(a.Len()))
+ return iter(ast.InternedTerm(a.Len()))
case ast.String:
- return iter(ast.IntNumberTerm(len([]rune(a))))
+ return iter(ast.InternedTerm(len([]rune(a))))
}
return builtins.NewOperandTypeErr(1, operands[0].Value, "array", "object", "set", "string")
}
@@ -28,6 +28,22 @@ func builtinCount(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e
func builtinSum(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
switch a := operands[0].Value.(type) {
case *ast.Array:
+ // Fast path for arrays of integers
+ is := 0
+ nonInts := a.Until(func(x *ast.Term) bool {
+ if n, ok := x.Value.(ast.Number); ok {
+ if i, ok := n.Int(); ok {
+ is += i
+ return false
+ }
+ }
+ return true
+ })
+ if !nonInts {
+ return iter(ast.InternedTerm(is))
+ }
+
+ // Non-integer values found, so we need to sum as floats.
sum := big.NewFloat(0)
err := a.Iter(func(x *ast.Term) error {
n, ok := x.Value.(ast.Number)
@@ -42,6 +58,21 @@ func builtinSum(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err
}
return iter(ast.NewTerm(builtins.FloatToNumber(sum)))
case ast.Set:
+ // Fast path for sets of integers
+ is := 0
+ nonInts := a.Until(func(x *ast.Term) bool {
+ if n, ok := x.Value.(ast.Number); ok {
+ if i, ok := n.Int(); ok {
+ is += i
+ return false
+ }
+ }
+ return true
+ })
+ if !nonInts {
+ return iter(ast.InternedTerm(is))
+ }
+
sum := big.NewFloat(0)
err := a.Iter(func(x *ast.Term) error {
n, ok := x.Value.(ast.Number)
@@ -99,7 +130,7 @@ func builtinMax(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err
if a.Len() == 0 {
return nil
}
- var max = ast.Value(ast.Null{})
+ max := ast.InternedNullTerm.Value
a.Foreach(func(x *ast.Term) {
if ast.Compare(max, x.Value) <= 0 {
max = x.Value
@@ -110,7 +141,7 @@ func builtinMax(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err
if a.Len() == 0 {
return nil
}
- max, err := a.Reduce(ast.NullTerm(), func(max *ast.Term, elem *ast.Term) (*ast.Term, error) {
+ max, err := a.Reduce(ast.InternedNullTerm, func(max *ast.Term, elem *ast.Term) (*ast.Term, error) {
if ast.Compare(max, elem) <= 0 {
return elem, nil
}
@@ -142,11 +173,11 @@ func builtinMin(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err
if a.Len() == 0 {
return nil
}
- min, err := a.Reduce(ast.NullTerm(), func(min *ast.Term, elem *ast.Term) (*ast.Term, error) {
+ min, err := a.Reduce(ast.InternedNullTerm, func(min *ast.Term, elem *ast.Term) (*ast.Term, error) {
// The null term is considered to be less than any other term,
// so in order for min of a set to make sense, we need to check
// for it.
- if min.Value.Compare(ast.Null{}) == 0 {
+ if min.Value.Compare(ast.InternedNullValue) == 0 {
return elem, nil
}
@@ -178,7 +209,7 @@ func builtinAll(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err
switch val := operands[0].Value.(type) {
case ast.Set:
res := true
- match := ast.BooleanTerm(true)
+ match := ast.InternedTerm(true)
val.Until(func(term *ast.Term) bool {
if !match.Equal(term) {
res = false
@@ -186,10 +217,10 @@ func builtinAll(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err
}
return false
})
- return iter(ast.BooleanTerm(res))
+ return iter(ast.InternedTerm(res))
case *ast.Array:
res := true
- match := ast.BooleanTerm(true)
+ match := ast.InternedTerm(true)
val.Until(func(term *ast.Term) bool {
if !match.Equal(term) {
res = false
@@ -197,7 +228,7 @@ func builtinAll(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err
}
return false
})
- return iter(ast.BooleanTerm(res))
+ return iter(ast.InternedTerm(res))
default:
return builtins.NewOperandTypeErr(1, operands[0].Value, "array", "set")
}
@@ -206,11 +237,11 @@ func builtinAll(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err
func builtinAny(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
switch val := operands[0].Value.(type) {
case ast.Set:
- res := val.Len() > 0 && val.Contains(ast.BooleanTerm(true))
- return iter(ast.BooleanTerm(res))
+ res := val.Len() > 0 && val.Contains(ast.InternedTerm(true))
+ return iter(ast.InternedTerm(res))
case *ast.Array:
res := false
- match := ast.BooleanTerm(true)
+ match := ast.InternedTerm(true)
val.Until(func(term *ast.Term) bool {
if match.Equal(term) {
res = true
@@ -218,7 +249,7 @@ func builtinAny(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err
}
return false
})
- return iter(ast.BooleanTerm(res))
+ return iter(ast.InternedTerm(res))
default:
return builtins.NewOperandTypeErr(1, operands[0].Value, "array", "set")
}
@@ -228,27 +259,20 @@ func builtinMember(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term)
containee := operands[0]
switch c := operands[1].Value.(type) {
case ast.Set:
- return iter(ast.BooleanTerm(c.Contains(containee)))
+ return iter(ast.InternedTerm(c.Contains(containee)))
case *ast.Array:
- ret := false
- c.Until(func(v *ast.Term) bool {
- if v.Value.Compare(containee.Value) == 0 {
- ret = true
+ for i := range c.Len() {
+ if c.Elem(i).Value.Compare(containee.Value) == 0 {
+ return iter(ast.InternedTerm(true))
}
- return ret
- })
- return iter(ast.BooleanTerm(ret))
+ }
+ return iter(ast.InternedTerm(false))
case ast.Object:
- ret := false
- c.Until(func(_, v *ast.Term) bool {
- if v.Value.Compare(containee.Value) == 0 {
- ret = true
- }
- return ret
- })
- return iter(ast.BooleanTerm(ret))
+ return iter(ast.InternedTerm(c.Until(func(_, v *ast.Term) bool {
+ return v.Value.Compare(containee.Value) == 0
+ })))
}
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
func builtinMemberWithKey(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -259,9 +283,9 @@ func builtinMemberWithKey(_ BuiltinContext, operands []*ast.Term, iter func(*ast
if act := c.Get(key); act != nil {
ret = act.Value.Compare(val.Value) == 0
}
- return iter(ast.BooleanTerm(ret))
+ return iter(ast.InternedTerm(ret))
}
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
func init() {
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/arithmetic.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/arithmetic.go
similarity index 82%
rename from vendor/github.com/open-policy-agent/opa/topdown/arithmetic.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/arithmetic.go
index 3ac703efa3..91190330fa 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/arithmetic.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/arithmetic.go
@@ -5,11 +5,11 @@
package topdown
import (
- "fmt"
+ "errors"
"math/big"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
type arithArity1 func(a *big.Float) (*big.Float, error)
@@ -67,13 +67,11 @@ func builtinPlus(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) er
y, ok2 := n2.Int()
if ok1 && ok2 && inSmallIntRange(x) && inSmallIntRange(y) {
- return iter(ast.IntNumberTerm(x + y))
+ return iter(ast.InternedTerm(x + y))
}
- f, err := arithPlus(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2))
- if err != nil {
- return err
- }
+ f := new(big.Float).Add(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2))
+
return iter(ast.NewTerm(builtins.FloatToNumber(f)))
}
@@ -91,39 +89,25 @@ func builtinMultiply(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term
y, ok2 := n2.Int()
if ok1 && ok2 && inSmallIntRange(x) && inSmallIntRange(y) {
- return iter(ast.IntNumberTerm(x * y))
+ return iter(ast.InternedTerm(x * y))
}
- f, err := arithMultiply(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2))
- if err != nil {
- return err
- }
- return iter(ast.NewTerm(builtins.FloatToNumber(f)))
-}
+ f := new(big.Float).Mul(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2))
-func arithPlus(a, b *big.Float) (*big.Float, error) {
- return new(big.Float).Add(a, b), nil
-}
-
-func arithMinus(a, b *big.Float) (*big.Float, error) {
- return new(big.Float).Sub(a, b), nil
-}
-
-func arithMultiply(a, b *big.Float) (*big.Float, error) {
- return new(big.Float).Mul(a, b), nil
+ return iter(ast.NewTerm(builtins.FloatToNumber(f)))
}
func arithDivide(a, b *big.Float) (*big.Float, error) {
i, acc := b.Int64()
if acc == big.Exact && i == 0 {
- return nil, fmt.Errorf("divide by zero")
+ return nil, errors.New("divide by zero")
}
return new(big.Float).Quo(a, b), nil
}
func arithRem(a, b *big.Int) (*big.Int, error) {
if b.Int64() == 0 {
- return nil, fmt.Errorf("modulo by zero")
+ return nil, errors.New("modulo by zero")
}
return new(big.Int).Rem(a, b), nil
}
@@ -171,13 +155,11 @@ func builtinMinus(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e
y, oky := n2.Int()
if okx && oky && inSmallIntRange(x) && inSmallIntRange(y) {
- return iter(ast.IntNumberTerm(x - y))
+ return iter(ast.InternedTerm(x - y))
}
- f, err := arithMinus(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2))
- if err != nil {
- return err
- }
+ f := new(big.Float).Sub(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2))
+
return iter(ast.NewTerm(builtins.FloatToNumber(f)))
}
@@ -185,7 +167,11 @@ func builtinMinus(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e
s2, ok4 := operands[1].Value.(ast.Set)
if ok3 && ok4 {
- return iter(ast.NewTerm(s1.Diff(s2)))
+ diff := s1.Diff(s2)
+ if diff.Len() == 0 {
+ return iter(ast.InternedEmptySet)
+ }
+ return iter(ast.NewTerm(diff))
}
if !ok1 && !ok3 {
@@ -210,17 +196,17 @@ func builtinRem(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err
if okx && oky && inSmallIntRange(x) && inSmallIntRange(y) {
if y == 0 {
- return fmt.Errorf("modulo by zero")
+ return errors.New("modulo by zero")
}
- return iter(ast.IntNumberTerm(x % y))
+ return iter(ast.InternedTerm(x % y))
}
op1, err1 := builtins.NumberToInt(n1)
op2, err2 := builtins.NumberToInt(n2)
if err1 != nil || err2 != nil {
- return fmt.Errorf("modulo on floating-point number")
+ return errors.New("modulo on floating-point number")
}
i, err := arithRem(op1, op2)
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/array.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/array.go
similarity index 86%
rename from vendor/github.com/open-policy-agent/opa/topdown/array.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/array.go
index e7fe5be643..526e3ed26d 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/array.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/array.go
@@ -5,8 +5,8 @@
package topdown
import (
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
func builtinArrayConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -20,6 +20,13 @@ func builtinArrayConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.T
return err
}
+ if arrA.Len() == 0 {
+ return iter(operands[1])
+ }
+ if arrB.Len() == 0 {
+ return iter(operands[0])
+ }
+
arrC := make([]*ast.Term, arrA.Len()+arrB.Len())
i := 0
@@ -33,7 +40,7 @@ func builtinArrayConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.T
i++
})
- return iter(ast.NewTerm(ast.NewArray(arrC...)))
+ return iter(ast.ArrayTerm(arrC...))
}
func builtinArraySlice(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -68,6 +75,10 @@ func builtinArraySlice(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te
startIndex = stopIndex
}
+ if startIndex == 0 && stopIndex >= arr.Len() {
+ return iter(operands[0])
+ }
+
return iter(ast.NewTerm(arr.Slice(startIndex, stopIndex)))
}
@@ -80,7 +91,7 @@ func builtinArrayReverse(_ BuiltinContext, operands []*ast.Term, iter func(*ast.
length := arr.Len()
reversedArr := make([]*ast.Term, length)
- for index := 0; index < length; index++ {
+ for index := range length {
reversedArr[index] = arr.Elem(length - index - 1)
}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/binary.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/binary.go
similarity index 81%
rename from vendor/github.com/open-policy-agent/opa/topdown/binary.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/binary.go
index b4f9dbd392..05050dbf7d 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/binary.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/binary.go
@@ -5,8 +5,8 @@
package topdown
import (
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
func builtinBinaryAnd(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -21,7 +21,12 @@ func builtinBinaryAnd(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter
return err
}
- return iter(ast.NewTerm(s1.Intersect(s2)))
+ i := s1.Intersect(s2)
+ if i.Len() == 0 {
+ return iter(ast.InternedEmptySet)
+ }
+
+ return iter(ast.NewTerm(i))
}
func builtinBinaryOr(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/bindings.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/bindings.go
similarity index 92%
rename from vendor/github.com/open-policy-agent/opa/topdown/bindings.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/bindings.go
index 30a8ac5ec4..9dd55f1ba7 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/bindings.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/bindings.go
@@ -6,9 +6,10 @@ package topdown
import (
"fmt"
+ "strconv"
"strings"
- "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/v1/ast"
)
type undo struct {
@@ -68,7 +69,7 @@ func (u *bindings) Plug(a *ast.Term) *ast.Term {
}
func (u *bindings) PlugNamespaced(a *ast.Term, caller *bindings) *ast.Term {
- if u != nil {
+ if u != nil && u.instr != nil {
u.instr.startTimer(evalOpPlug)
t := u.plugNamespaced(a, caller)
u.instr.stopTimer(evalOpPlug)
@@ -92,7 +93,7 @@ func (u *bindings) plugNamespaced(a *ast.Term, caller *bindings) *ast.Term {
}
cpy := *a
arr := make([]*ast.Term, v.Len())
- for i := 0; i < len(arr); i++ {
+ for i := range arr {
arr[i] = u.plugNamespaced(v.Elem(i), caller)
}
cpy.Value = ast.NewArray(arr...)
@@ -118,7 +119,7 @@ func (u *bindings) plugNamespaced(a *ast.Term, caller *bindings) *ast.Term {
case ast.Ref:
cpy := *a
ref := make(ast.Ref, len(v))
- for i := 0; i < len(ref); i++ {
+ for i := range ref {
ref[i] = u.plugNamespaced(v[i], caller)
}
cpy.Value = ref
@@ -184,7 +185,7 @@ func (u *bindings) namespaceVar(v *ast.Term, caller *bindings) *ast.Term {
// Root documents (i.e., data, input) should never be namespaced because they
// are globally unique.
if !ast.RootDocumentNames.Contains(v) {
- return ast.NewTerm(ast.Var(string(name) + fmt.Sprint(u.id)))
+ return ast.VarTerm(string(name) + strconv.FormatUint(u.id, 10))
}
}
return v
@@ -211,7 +212,7 @@ type namespacingVisitor struct {
caller *bindings
}
-func (vis namespacingVisitor) Visit(x interface{}) bool {
+func (vis namespacingVisitor) Visit(x any) bool {
switch x := x.(type) {
case *ast.ArrayComprehension:
x.Term = vis.namespaceTerm(x.Term)
@@ -253,7 +254,7 @@ func (vis namespacingVisitor) namespaceTerm(a *ast.Term) *ast.Term {
}
cpy := *a
arr := make([]*ast.Term, v.Len())
- for i := 0; i < len(arr); i++ {
+ for i := range arr {
arr[i] = vis.namespaceTerm(v.Elem(i))
}
cpy.Value = ast.NewArray(arr...)
@@ -279,7 +280,7 @@ func (vis namespacingVisitor) namespaceTerm(a *ast.Term) *ast.Term {
case ast.Ref:
cpy := *a
ref := make(ast.Ref, len(v))
- for i := 0; i < len(ref); i++ {
+ for i := range ref {
ref[i] = vis.namespaceTerm(v[i])
}
cpy.Value = ref
@@ -313,12 +314,12 @@ func (b *bindingsArrayHashmap) Put(key *ast.Term, value value) {
if b.a == nil {
b.a = new([maxLinearScan]bindingArrayKeyValue)
} else if i := b.find(key); i >= 0 {
- (*b.a)[i].value = value
+ b.a[i].value = value
return
}
if b.n < maxLinearScan {
- (*b.a)[b.n] = bindingArrayKeyValue{key, value}
+ b.a[b.n] = bindingArrayKeyValue{key, value}
b.n++
return
}
@@ -341,7 +342,7 @@ func (b *bindingsArrayHashmap) Put(key *ast.Term, value value) {
func (b *bindingsArrayHashmap) Get(key *ast.Term) (value, bool) {
if b.m == nil {
if i := b.find(key); i >= 0 {
- return (*b.a)[i].value, true
+ return b.a[i].value, true
}
return value{}, false
@@ -360,7 +361,7 @@ func (b *bindingsArrayHashmap) Delete(key *ast.Term) {
if i := b.find(key); i >= 0 {
n := b.n - 1
if i < n {
- (*b.a)[i] = (*b.a)[n]
+ b.a[i] = b.a[n]
}
b.n = n
@@ -373,8 +374,8 @@ func (b *bindingsArrayHashmap) Delete(key *ast.Term) {
func (b *bindingsArrayHashmap) Iter(f func(k *ast.Term, v value) bool) {
if b.m == nil {
- for i := 0; i < b.n; i++ {
- if f((*b.a)[i].key, (*b.a)[i].value) {
+ for i := range b.n {
+ if f(b.a[i].key, b.a[i].value) {
return
}
}
@@ -390,8 +391,8 @@ func (b *bindingsArrayHashmap) Iter(f func(k *ast.Term, v value) bool) {
func (b *bindingsArrayHashmap) find(key *ast.Term) int {
v := key.Value.(ast.Var)
- for i := 0; i < b.n; i++ {
- if (*b.a)[i].key.Value.(ast.Var) == v {
+ for i := range b.n {
+ if b.a[i].key.Value.(ast.Var) == v {
return i
}
}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/bits.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/bits.go
similarity index 96%
rename from vendor/github.com/open-policy-agent/opa/topdown/bits.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/bits.go
index 7a63c0df1e..e420ffe611 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/bits.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/bits.go
@@ -7,8 +7,8 @@ package topdown
import (
"math/big"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
type bitsArity1 func(a *big.Int) (*big.Int, error)
diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins.go
new file mode 100644
index 0000000000..e0b893d477
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins.go
@@ -0,0 +1,224 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "context"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math/rand"
+
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/metrics"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/topdown/cache"
+ "github.com/open-policy-agent/opa/v1/topdown/print"
+ "github.com/open-policy-agent/opa/v1/tracing"
+)
+
+type (
+ // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead.
+ FunctionalBuiltin1 func(op1 ast.Value) (output ast.Value, err error)
+
+ // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead.
+ FunctionalBuiltin2 func(op1, op2 ast.Value) (output ast.Value, err error)
+
+ // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead.
+ FunctionalBuiltin3 func(op1, op2, op3 ast.Value) (output ast.Value, err error)
+
+ // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead.
+ FunctionalBuiltin4 func(op1, op2, op3, op4 ast.Value) (output ast.Value, err error)
+
+ // BuiltinContext contains context from the evaluator that may be used by
+ // built-in functions.
+ BuiltinContext struct {
+ Context context.Context // request context that was passed when query started
+ Metrics metrics.Metrics // metrics registry for recording built-in specific metrics
+ Seed io.Reader // randomization source
+ Time *ast.Term // wall clock time
+ Cancel Cancel // atomic value that signals evaluation to halt
+ Runtime *ast.Term // runtime information on the OPA instance
+ Cache builtins.Cache // built-in function state cache
+ InterQueryBuiltinCache cache.InterQueryCache // cross-query built-in function state cache
+ InterQueryBuiltinValueCache cache.InterQueryValueCache // cross-query built-in function state value cache. this cache is useful for scenarios where the entry size cannot be calculated
+ NDBuiltinCache builtins.NDBCache // cache for non-deterministic built-in state
+ Location *ast.Location // location of built-in call
+ Tracers []Tracer // Deprecated: Use QueryTracers instead
+ QueryTracers []QueryTracer // tracer objects for trace() built-in function
+ TraceEnabled bool // indicates whether tracing is enabled for the evaluation
+ QueryID uint64 // identifies query being evaluated
+ ParentID uint64 // identifies parent of query being evaluated
+ PrintHook print.Hook // provides callback function to use for printing
+ RoundTripper CustomizeRoundTripper // customize transport to use for HTTP requests
+ DistributedTracingOpts tracing.Options // options to be used by distributed tracing.
+ rand *rand.Rand // randomization source for non-security-sensitive operations
+ Capabilities *ast.Capabilities
+ }
+
+ // BuiltinFunc defines an interface for implementing built-in functions.
+ // The built-in function is called with the plugged operands from the call
+ // (including the output operands.) The implementation should evaluate the
+ // operands and invoke the iterator for each successful/defined output
+ // value.
+ BuiltinFunc func(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error
+)
+
+// Rand returns a random number generator based on the Seed for this built-in
+// context. The random number will be re-used across multiple calls to this
+// function. If a random number generator cannot be created, an error is
+// returned.
+func (bctx *BuiltinContext) Rand() (*rand.Rand, error) {
+
+ if bctx.rand != nil {
+ return bctx.rand, nil
+ }
+
+ seed, err := readInt64(bctx.Seed)
+ if err != nil {
+ return nil, err
+ }
+
+ bctx.rand = rand.New(rand.NewSource(seed))
+ return bctx.rand, nil
+}
+
+// RegisterBuiltinFunc adds a new built-in function to the evaluation engine.
+func RegisterBuiltinFunc(name string, f BuiltinFunc) {
+ builtinFunctions[name] = builtinErrorWrapper(name, f)
+}
+
+// Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead.
+func RegisterFunctionalBuiltin1(name string, fun FunctionalBuiltin1) {
+ builtinFunctions[name] = functionalWrapper1(name, fun)
+}
+
+// Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead.
+func RegisterFunctionalBuiltin2(name string, fun FunctionalBuiltin2) {
+ builtinFunctions[name] = functionalWrapper2(name, fun)
+}
+
+// Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead.
+func RegisterFunctionalBuiltin3(name string, fun FunctionalBuiltin3) {
+ builtinFunctions[name] = functionalWrapper3(name, fun)
+}
+
+// Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead.
+func RegisterFunctionalBuiltin4(name string, fun FunctionalBuiltin4) {
+ builtinFunctions[name] = functionalWrapper4(name, fun)
+}
+
+// GetBuiltin returns a built-in function implementation, nil if no built-in found.
+func GetBuiltin(name string) BuiltinFunc {
+ return builtinFunctions[name]
+}
+
+// Deprecated: The BuiltinEmpty type is no longer needed. Use nil return values instead.
+type BuiltinEmpty struct{}
+
+func (BuiltinEmpty) Error() string {
+ return ""
+}
+
+var builtinFunctions = map[string]BuiltinFunc{}
+
+func builtinErrorWrapper(name string, fn BuiltinFunc) BuiltinFunc {
+ return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ err := fn(bctx, args, iter)
+ if err == nil {
+ return nil
+ }
+ return handleBuiltinErr(name, bctx.Location, err)
+ }
+}
+
+func functionalWrapper1(name string, fn FunctionalBuiltin1) BuiltinFunc {
+ return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := fn(args[0].Value)
+ if err == nil {
+ return iter(ast.NewTerm(result))
+ }
+ return handleBuiltinErr(name, bctx.Location, err)
+ }
+}
+
+func functionalWrapper2(name string, fn FunctionalBuiltin2) BuiltinFunc {
+ return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := fn(args[0].Value, args[1].Value)
+ if err == nil {
+ return iter(ast.NewTerm(result))
+ }
+ return handleBuiltinErr(name, bctx.Location, err)
+ }
+}
+
+func functionalWrapper3(name string, fn FunctionalBuiltin3) BuiltinFunc {
+ return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := fn(args[0].Value, args[1].Value, args[2].Value)
+ if err == nil {
+ return iter(ast.NewTerm(result))
+ }
+ return handleBuiltinErr(name, bctx.Location, err)
+ }
+}
+
+func functionalWrapper4(name string, fn FunctionalBuiltin4) BuiltinFunc {
+ return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := fn(args[0].Value, args[1].Value, args[2].Value, args[3].Value)
+ if err == nil {
+ return iter(ast.NewTerm(result))
+ }
+ if _, empty := err.(BuiltinEmpty); empty {
+ return nil
+ }
+ return handleBuiltinErr(name, bctx.Location, err)
+ }
+}
+
+func handleBuiltinErr(name string, loc *ast.Location, err error) error {
+ switch err := err.(type) {
+ case BuiltinEmpty:
+ return nil
+ case *Error, Halt:
+ return err
+ case builtins.ErrOperand:
+ e := &Error{
+ Code: TypeErr,
+ Message: fmt.Sprintf("%v: %v", name, err.Error()),
+ Location: loc,
+ }
+ return e.Wrap(err)
+ default:
+ e := &Error{
+ Code: BuiltinErr,
+ Message: fmt.Sprintf("%v: %v", name, err.Error()),
+ Location: loc,
+ }
+ return e.Wrap(err)
+ }
+}
+
+func readInt64(r io.Reader) (int64, error) {
+ bs := make([]byte, 8)
+ n, err := io.ReadFull(r, bs)
+ if n != len(bs) || err != nil {
+ return 0, err
+ }
+ return int64(binary.BigEndian.Uint64(bs)), nil
+}
+
+// Used to get older-style (ast.Term, error) tuples out of newer functions.
+func getResult(fn BuiltinFunc, operands ...*ast.Term) (*ast.Term, error) {
+ var result *ast.Term
+ extractionFn := func(r *ast.Term) error {
+ result = r
+ return nil
+ }
+ err := fn(BuiltinContext{}, operands, extractionFn)
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/builtins/builtins.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins/builtins.go
similarity index 90%
rename from vendor/github.com/open-policy-agent/opa/topdown/builtins/builtins.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/builtins/builtins.go
index 353f956840..7a1bdede6b 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/builtins/builtins.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins/builtins.go
@@ -7,25 +7,26 @@ package builtins
import (
"encoding/json"
+ "errors"
"fmt"
"math/big"
"strings"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/util"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/util"
)
// Cache defines the built-in cache used by the top-down evaluation. The keys
// must be comparable and should not be of type string.
-type Cache map[interface{}]interface{}
+type Cache map[any]any
// Put updates the cache for the named built-in.
-func (c Cache) Put(k, v interface{}) {
+func (c Cache) Put(k, v any) {
c[k] = v
}
// Get returns the cached value for k.
-func (c Cache) Get(k interface{}) (interface{}, bool) {
+func (c Cache) Get(k any) (any, bool) {
v, ok := c[k]
return v, ok
}
@@ -38,7 +39,7 @@ type NDBCache map[string]ast.Object
func (c NDBCache) AsValue() ast.Value {
out := ast.NewObject()
for bname, obj := range c {
- out.Insert(ast.StringTerm(bname), ast.NewTerm(obj))
+ out.Insert(ast.InternedTerm(bname), ast.NewTerm(obj))
}
return out
}
@@ -75,7 +76,7 @@ func (c NDBCache) MarshalJSON() ([]byte, error) {
func (c *NDBCache) UnmarshalJSON(data []byte) error {
out := map[string]ast.Object{}
- var incoming interface{}
+ var incoming any
// Note: We use util.Unmarshal instead of json.Unmarshal to get
// correct deserialization of number types.
@@ -97,7 +98,7 @@ func (c *NDBCache) UnmarshalJSON(data []byte) error {
out[string(k.Value.(ast.String))] = obj
return nil
}
- return fmt.Errorf("expected Object, got other Value type in conversion")
+ return errors.New("expected Object, got other Value type in conversion")
})
if err != nil {
return err
@@ -119,7 +120,7 @@ func (err ErrOperand) Error() string {
}
// NewOperandErr returns a generic operand error.
-func NewOperandErr(pos int, f string, a ...interface{}) error {
+func NewOperandErr(pos int, f string, a ...any) error {
f = fmt.Sprintf("operand %v ", pos) + f
return ErrOperand(fmt.Sprintf(f, a...))
}
@@ -128,23 +129,23 @@ func NewOperandErr(pos int, f string, a ...interface{}) error {
func NewOperandTypeErr(pos int, got ast.Value, expected ...string) error {
if len(expected) == 1 {
- return NewOperandErr(pos, "must be %v but got %v", expected[0], ast.TypeName(got))
+ return NewOperandErr(pos, "must be %v but got %v", expected[0], ast.ValueName(got))
}
- return NewOperandErr(pos, "must be one of {%v} but got %v", strings.Join(expected, ", "), ast.TypeName(got))
+ return NewOperandErr(pos, "must be one of {%v} but got %v", strings.Join(expected, ", "), ast.ValueName(got))
}
// NewOperandElementErr returns an operand error indicating an element in the
// composite operand was wrong.
func NewOperandElementErr(pos int, composite ast.Value, got ast.Value, expected ...string) error {
- tpe := ast.TypeName(composite)
+ tpe := ast.ValueName(composite)
if len(expected) == 1 {
- return NewOperandErr(pos, "must be %v of %vs but got %v containing %v", tpe, expected[0], tpe, ast.TypeName(got))
+ return NewOperandErr(pos, "must be %v of %vs but got %v containing %v", tpe, expected[0], tpe, ast.ValueName(got))
}
- return NewOperandErr(pos, "must be %v of (any of) {%v} but got %v containing %v", tpe, strings.Join(expected, ", "), tpe, ast.TypeName(got))
+ return NewOperandErr(pos, "must be %v of (any of) {%v} but got %v containing %v", tpe, strings.Join(expected, ", "), tpe, ast.ValueName(got))
}
// NewOperandEnumErr returns an operand error indicating a value was wrong.
@@ -233,7 +234,7 @@ func ObjectOperand(x ast.Value, pos int) (ast.Object, error) {
func ArrayOperand(x ast.Value, pos int) (*ast.Array, error) {
a, ok := x.(*ast.Array)
if !ok {
- return ast.NewArray(), NewOperandTypeErr(pos, x, "array")
+ return nil, NewOperandTypeErr(pos, x, "array")
}
return a, nil
}
@@ -262,7 +263,7 @@ func NumberToInt(n ast.Number) (*big.Int, error) {
f := NumberToFloat(n)
r, accuracy := f.Int(nil)
if accuracy != big.Exact {
- return nil, fmt.Errorf("illegal value")
+ return nil, errors.New("illegal value")
}
return r, nil
}
@@ -309,7 +310,7 @@ func RuneSliceOperand(x ast.Value, pos int) ([]rune, error) {
}
var f = make([]rune, a.Len())
- for k := 0; k < a.Len(); k++ {
+ for k := range a.Len() {
b := a.Elem(k)
c, ok := b.Value.(ast.String)
if !ok {
diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/cache.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/cache.go
new file mode 100644
index 0000000000..a6c89b4537
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/cache.go
@@ -0,0 +1,363 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "slices"
+
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+// VirtualCache defines the interface for a cache that stores the results of
+// evaluated virtual documents (rules).
+// The cache is a stack of frames, where each frame is a mapping from references
+// to values.
+type VirtualCache interface {
+ // Push pushes a new, empty frame of value mappings onto the stack.
+ Push()
+
+ // Pop pops the top frame of value mappings from the stack, removing all associated entries.
+ Pop()
+
+ // Get returns the value associated with the given reference. The second return value
+ // indicates whether the reference has a recorded 'undefined' result.
+ Get(ref ast.Ref) (*ast.Term, bool)
+
+ // Put associates the given reference with the given value. If the value is nil, the reference
+ // is marked as having an 'undefined' result.
+ Put(ref ast.Ref, value *ast.Term)
+
+ // Keys returns the set of keys that have been cached for the active frame.
+ Keys() []ast.Ref
+}
+
+// BaseCache defines the interface for a cache that stores cached base documents, i.e. data.
+type BaseCache interface {
+ Get(ast.Ref) ast.Value
+ Put(ast.Ref, ast.Value)
+}
+
+type virtualCache struct {
+ stack []*virtualCacheElem
+}
+
+type virtualCacheElem struct {
+ value *ast.Term
+ children *util.HasherMap[*ast.Term, *virtualCacheElem]
+ undefined bool
+}
+
+func NewVirtualCache() VirtualCache {
+ cache := &virtualCache{}
+ cache.Push()
+ return cache
+}
+
+func (c *virtualCache) Push() {
+ c.stack = append(c.stack, newVirtualCacheElem())
+}
+
+func (c *virtualCache) Pop() {
+ c.stack = c.stack[:len(c.stack)-1]
+}
+
+// Returns the resolved value of the AST term and a flag indicating if the value
+// should be interpretted as undefined:
+//
+// nil, true indicates the ref is undefined
+// ast.Term, false indicates the ref is defined
+// nil, false indicates the ref has not been cached
+// ast.Term, true is impossible
+func (c *virtualCache) Get(ref ast.Ref) (*ast.Term, bool) {
+ node := c.stack[len(c.stack)-1]
+ for i := range ref {
+ x, ok := node.children.Get(ref[i])
+ if !ok {
+ return nil, false
+ }
+ node = x
+ }
+ if node.undefined {
+ return nil, true
+ }
+
+ return node.value, false
+}
+
+// If value is a nil pointer, set the 'undefined' flag on the cache element to
+// indicate that the Ref has resolved to undefined.
+func (c *virtualCache) Put(ref ast.Ref, value *ast.Term) {
+ node := c.stack[len(c.stack)-1]
+ for i := range ref {
+ x, ok := node.children.Get(ref[i])
+ if ok {
+ node = x
+ } else {
+ next := newVirtualCacheElem()
+ node.children.Put(ref[i], next)
+ node = next
+ }
+ }
+ if value != nil {
+ node.value = value
+ } else {
+ node.undefined = true
+ }
+}
+
+func (c *virtualCache) Keys() []ast.Ref {
+ node := c.stack[len(c.stack)-1]
+ return keysRecursive(nil, node)
+}
+
+func keysRecursive(root ast.Ref, node *virtualCacheElem) []ast.Ref {
+ var keys []ast.Ref
+ node.children.Iter(func(k *ast.Term, v *virtualCacheElem) bool {
+ ref := root.Append(k)
+ if v.value != nil {
+ keys = append(keys, ref)
+ }
+ if v.children.Len() > 0 {
+ keys = append(keys, keysRecursive(ref, v)...)
+ }
+ return false
+ })
+ return keys
+}
+
+func newVirtualCacheElem() *virtualCacheElem {
+ return &virtualCacheElem{children: newVirtualCacheHashMap()}
+}
+
+func newVirtualCacheHashMap() *util.HasherMap[*ast.Term, *virtualCacheElem] {
+ return util.NewHasherMap[*ast.Term, *virtualCacheElem](ast.TermValueEqual)
+}
+
+// baseCache implements a trie structure to cache base documents read out of
+// storage. Values inserted into the cache may contain other values that were
+// previously inserted. In this case, the previous values are erased from the
+// structure.
+type baseCache struct {
+ root *baseCacheElem
+}
+
+func newBaseCache() *baseCache {
+ return &baseCache{
+ root: newBaseCacheElem(),
+ }
+}
+
+func (c *baseCache) Get(ref ast.Ref) ast.Value {
+ node := c.root
+ for i := range ref {
+ node = node.children[ref[i].Value]
+ if node == nil {
+ return nil
+ } else if node.value != nil {
+ if len(ref) == 1 && ast.IsScalar(node.value) {
+ // If the node is a scalar, return the value directly
+ // and avoid an allocation when calling Find.
+ return node.value
+ }
+
+ result, err := node.value.Find(ref[i+1:])
+ if err != nil {
+ return nil
+ }
+ return result
+ }
+ }
+ return nil
+}
+
+func (c *baseCache) Put(ref ast.Ref, value ast.Value) {
+ node := c.root
+ for i := range ref {
+ if child, ok := node.children[ref[i].Value]; ok {
+ node = child
+ } else {
+ child := newBaseCacheElem()
+ node.children[ref[i].Value] = child
+ node = child
+ }
+ }
+ node.set(value)
+}
+
+type baseCacheElem struct {
+ value ast.Value
+ children map[ast.Value]*baseCacheElem
+}
+
+func newBaseCacheElem() *baseCacheElem {
+ return &baseCacheElem{
+ children: map[ast.Value]*baseCacheElem{},
+ }
+}
+
+func (e *baseCacheElem) set(value ast.Value) {
+ e.value = value
+ e.children = map[ast.Value]*baseCacheElem{}
+}
+
+type refStack struct {
+ sl []refStackElem
+}
+
+type refStackElem struct {
+ refs []ast.Ref
+}
+
+func newRefStack() *refStack {
+ return &refStack{}
+}
+
+func (s *refStack) Push(refs []ast.Ref) {
+ s.sl = append(s.sl, refStackElem{refs: refs})
+}
+
+func (s *refStack) Pop() {
+ if s == nil {
+ return
+ }
+ s.sl = s.sl[:len(s.sl)-1]
+}
+
+func (s *refStack) Prefixed(ref ast.Ref) bool {
+ if s != nil {
+ for i := len(s.sl) - 1; i >= 0; i-- {
+ if slices.ContainsFunc(s.sl[i].refs, ref.HasPrefix) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+type comprehensionCache struct {
+ stack []map[*ast.Term]*comprehensionCacheElem
+}
+
+type comprehensionCacheElem struct {
+ value *ast.Term
+ children *util.HasherMap[*ast.Term, *comprehensionCacheElem]
+}
+
+func newComprehensionCache() *comprehensionCache {
+ cache := &comprehensionCache{}
+ cache.Push()
+ return cache
+}
+
+func (c *comprehensionCache) Push() {
+ c.stack = append(c.stack, map[*ast.Term]*comprehensionCacheElem{})
+}
+
+func (c *comprehensionCache) Pop() {
+ c.stack = c.stack[:len(c.stack)-1]
+}
+
+func (c *comprehensionCache) Elem(t *ast.Term) (*comprehensionCacheElem, bool) {
+ elem, ok := c.stack[len(c.stack)-1][t]
+ return elem, ok
+}
+
+func (c *comprehensionCache) Set(t *ast.Term, elem *comprehensionCacheElem) {
+ c.stack[len(c.stack)-1][t] = elem
+}
+
+func newComprehensionCacheElem() *comprehensionCacheElem {
+ return &comprehensionCacheElem{children: newComprehensionCacheHashMap()}
+}
+
+func (c *comprehensionCacheElem) Get(key []*ast.Term) *ast.Term {
+ node := c
+ for i := range key {
+ x, ok := node.children.Get(key[i])
+ if !ok {
+ return nil
+ }
+ node = x
+ }
+ return node.value
+}
+
+func (c *comprehensionCacheElem) Put(key []*ast.Term, value *ast.Term) {
+ node := c
+ for i := range key {
+ x, ok := node.children.Get(key[i])
+ if ok {
+ node = x
+ } else {
+ next := newComprehensionCacheElem()
+ node.children.Put(key[i], next)
+ node = next
+ }
+ }
+ node.value = value
+}
+
+func newComprehensionCacheHashMap() *util.HasherMap[*ast.Term, *comprehensionCacheElem] {
+ return util.NewHasherMap[*ast.Term, *comprehensionCacheElem](ast.TermValueEqual)
+}
+
+type functionMocksStack struct {
+ stack []*functionMocksElem
+}
+
+type functionMocksElem []frame
+
+type frame map[string]*ast.Term
+
+func newFunctionMocksStack() *functionMocksStack {
+ stack := &functionMocksStack{}
+ stack.Push()
+ return stack
+}
+
+func newFunctionMocksElem() *functionMocksElem {
+ return &functionMocksElem{}
+}
+
+func (s *functionMocksStack) Push() {
+ s.stack = append(s.stack, newFunctionMocksElem())
+}
+
+func (s *functionMocksStack) Pop() {
+ s.stack = s.stack[:len(s.stack)-1]
+}
+
+func (s *functionMocksStack) PopPairs() {
+ current := s.stack[len(s.stack)-1]
+ *current = (*current)[:len(*current)-1]
+}
+
+func (s *functionMocksStack) PutPairs(mocks [][2]*ast.Term) {
+ el := frame{}
+ for i := range mocks {
+ el[mocks[i][0].Value.String()] = mocks[i][1]
+ }
+ s.Put(el)
+}
+
+func (s *functionMocksStack) Put(el frame) {
+ current := s.stack[len(s.stack)-1]
+ *current = append(*current, el)
+}
+
+func (s *functionMocksStack) Get(f ast.Ref) (*ast.Term, bool) {
+ if s == nil {
+ return nil, false
+ }
+
+ current := *s.stack[len(s.stack)-1]
+ for i := len(current) - 1; i >= 0; i-- {
+ if r, ok := current[i][f.String()]; ok {
+ return r, true
+ }
+ }
+ return nil, false
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/cache/cache.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/cache/cache.go
similarity index 57%
rename from vendor/github.com/open-policy-agent/opa/topdown/cache/cache.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/cache/cache.go
index 55ed340619..d514bed787 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/cache/cache.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/cache/cache.go
@@ -13,8 +13,8 @@ import (
"sync"
"time"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/util"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/util"
)
const (
@@ -24,16 +24,87 @@ const (
defaultStaleEntryEvictionPeriodSeconds = int64(0) // never
)
+var interQueryBuiltinValueCacheDefaultConfigs = map[string]*NamedValueCacheConfig{}
+
+func getDefaultInterQueryBuiltinValueCacheConfig(name string) *NamedValueCacheConfig {
+ return interQueryBuiltinValueCacheDefaultConfigs[name]
+}
+
+// RegisterDefaultInterQueryBuiltinValueCacheConfig registers a default configuration for the inter-query value cache;
+// used when none has been explicitly configured.
+// To disable a named cache when not configured, pass a nil config.
+func RegisterDefaultInterQueryBuiltinValueCacheConfig(name string, config *NamedValueCacheConfig) {
+ interQueryBuiltinValueCacheDefaultConfigs[name] = config
+}
+
// Config represents the configuration for the inter-query builtin cache.
type Config struct {
InterQueryBuiltinCache InterQueryBuiltinCacheConfig `json:"inter_query_builtin_cache"`
InterQueryBuiltinValueCache InterQueryBuiltinValueCacheConfig `json:"inter_query_builtin_value_cache"`
}
+// Clone creates a deep copy of Config.
+func (c *Config) Clone() *Config {
+ if c == nil {
+ return nil
+ }
+
+ return &Config{
+ InterQueryBuiltinCache: *c.InterQueryBuiltinCache.Clone(),
+ InterQueryBuiltinValueCache: *c.InterQueryBuiltinValueCache.Clone(),
+ }
+}
+
+// NamedValueCacheConfig represents the configuration of a named cache that built-in functions can utilize.
+// A default configuration to be used if not explicitly configured can be registered using RegisterDefaultInterQueryBuiltinValueCacheConfig.
+type NamedValueCacheConfig struct {
+ MaxNumEntries *int `json:"max_num_entries,omitempty"`
+}
+
+// Clone creates a deep copy of NamedValueCacheConfig.
+func (n *NamedValueCacheConfig) Clone() *NamedValueCacheConfig {
+ if n == nil {
+ return nil
+ }
+
+ clone := &NamedValueCacheConfig{}
+
+ if n.MaxNumEntries != nil {
+ maxEntries := *n.MaxNumEntries
+ clone.MaxNumEntries = &maxEntries
+ }
+
+ return clone
+}
+
// InterQueryBuiltinValueCacheConfig represents the configuration of the inter-query value cache that built-in functions can utilize.
// MaxNumEntries - max number of cache entries
type InterQueryBuiltinValueCacheConfig struct {
- MaxNumEntries *int `json:"max_num_entries,omitempty"`
+ MaxNumEntries *int `json:"max_num_entries,omitempty"`
+ NamedCacheConfigs map[string]*NamedValueCacheConfig `json:"named,omitempty"`
+}
+
+// Clone creates a deep copy of InterQueryBuiltinValueCacheConfig.
+func (i *InterQueryBuiltinValueCacheConfig) Clone() *InterQueryBuiltinValueCacheConfig {
+ if i == nil {
+ return nil
+ }
+
+ clone := &InterQueryBuiltinValueCacheConfig{}
+
+ if i.MaxNumEntries != nil {
+ maxEntries := *i.MaxNumEntries
+ clone.MaxNumEntries = &maxEntries
+ }
+
+ if i.NamedCacheConfigs != nil {
+ clone.NamedCacheConfigs = make(map[string]*NamedValueCacheConfig, len(i.NamedCacheConfigs))
+ for k, v := range i.NamedCacheConfigs {
+ clone.NamedCacheConfigs[k] = v.Clone()
+ }
+ }
+
+ return clone
}
// InterQueryBuiltinCacheConfig represents the configuration of the inter-query cache that built-in functions can utilize.
@@ -46,6 +117,32 @@ type InterQueryBuiltinCacheConfig struct {
StaleEntryEvictionPeriodSeconds *int64 `json:"stale_entry_eviction_period_seconds,omitempty"`
}
+// Clone creates a deep copy of InterQueryBuiltinCacheConfig.
+func (i *InterQueryBuiltinCacheConfig) Clone() *InterQueryBuiltinCacheConfig {
+ if i == nil {
+ return nil
+ }
+
+ clone := &InterQueryBuiltinCacheConfig{}
+
+ if i.MaxSizeBytes != nil {
+ maxSize := *i.MaxSizeBytes
+ clone.MaxSizeBytes = &maxSize
+ }
+
+ if i.ForcedEvictionThresholdPercentage != nil {
+ threshold := *i.ForcedEvictionThresholdPercentage
+ clone.ForcedEvictionThresholdPercentage = &threshold
+ }
+
+ if i.StaleEntryEvictionPeriodSeconds != nil {
+ period := *i.StaleEntryEvictionPeriodSeconds
+ clone.StaleEntryEvictionPeriodSeconds = &period
+ }
+
+ return clone
+}
+
// ParseCachingConfig returns the config for the inter-query cache.
func ParseCachingConfig(raw []byte) (*Config, error) {
if raw == nil {
@@ -59,8 +156,16 @@ func ParseCachingConfig(raw []byte) (*Config, error) {
maxInterQueryBuiltinValueCacheSize := new(int)
*maxInterQueryBuiltinValueCacheSize = defaultInterQueryBuiltinValueCacheSize
- return &Config{InterQueryBuiltinCache: InterQueryBuiltinCacheConfig{MaxSizeBytes: maxSize, ForcedEvictionThresholdPercentage: threshold, StaleEntryEvictionPeriodSeconds: period},
- InterQueryBuiltinValueCache: InterQueryBuiltinValueCacheConfig{MaxNumEntries: maxInterQueryBuiltinValueCacheSize}}, nil
+ return &Config{
+ InterQueryBuiltinCache: InterQueryBuiltinCacheConfig{
+ MaxSizeBytes: maxSize,
+ ForcedEvictionThresholdPercentage: threshold,
+ StaleEntryEvictionPeriodSeconds: period,
+ },
+ InterQueryBuiltinValueCache: InterQueryBuiltinValueCacheConfig{
+ MaxNumEntries: maxInterQueryBuiltinValueCacheSize,
+ },
+ }, nil
}
var config Config
@@ -114,6 +219,13 @@ func (c *Config) validateAndInjectDefaults() error {
}
}
+ for name, namedConfig := range c.InterQueryBuiltinValueCache.NamedCacheConfigs {
+ numEntries := *namedConfig.MaxNumEntries
+ if numEntries < 0 {
+ return fmt.Errorf("invalid max_num_entries %v for named cache %v", numEntries, name)
+ }
+ }
+
return nil
}
@@ -154,11 +266,14 @@ func NewInterQueryCache(config *Config) InterQueryCache {
func NewInterQueryCacheWithContext(ctx context.Context, config *Config) InterQueryCache {
iqCache := newCache(config)
if iqCache.staleEntryEvictionTimePeriodSeconds() > 0 {
- cleanupTicker := time.NewTicker(time.Duration(iqCache.staleEntryEvictionTimePeriodSeconds()) * time.Second)
go func() {
+ cleanupTicker := time.NewTicker(time.Duration(iqCache.staleEntryEvictionTimePeriodSeconds()) * time.Second)
for {
select {
case <-cleanupTicker.C:
+ // NOTE: We stop the ticker and create a new one here to ensure that applications
+ // get _at least_ staleEntryEvictionTimePeriodSeconds with the cache unlocked;
+ // see https://github.com/open-policy-agent/opa/pull/7188/files#r1855342998
cleanupTicker.Stop()
iqCache.cleanStaleValues()
cleanupTicker = time.NewTicker(time.Duration(iqCache.staleEntryEvictionTimePeriodSeconds()) * time.Second)
@@ -287,7 +402,7 @@ func (c *cache) unsafeDelete(k ast.Value) {
c.l.Remove(cacheItem.keyElement)
}
-func (c *cache) unsafeClone(value InterQueryCacheValue) (InterQueryCacheValue, error) {
+func (*cache) unsafeClone(value InterQueryCacheValue) (InterQueryCacheValue, error) {
return value.Clone()
}
@@ -327,62 +442,59 @@ func (c *cache) cleanStaleValues() (dropped int) {
return dropped
}
-type InterQueryValueCache interface {
+type InterQueryValueCacheBucket interface {
Get(key ast.Value) (value any, found bool)
Insert(key ast.Value, value any) int
Delete(key ast.Value)
- UpdateConfig(config *Config)
}
-type interQueryValueCache struct {
- items map[string]any
- config *Config
+type interQueryValueCacheBucket struct {
+ items util.HasherMap[ast.Value, any]
+ config *NamedValueCacheConfig
mtx sync.RWMutex
}
-// Get returns the value in the cache for k.
-func (c *interQueryValueCache) Get(k ast.Value) (any, bool) {
+func newItemsMap() *util.HasherMap[ast.Value, any] {
+ return util.NewHasherMap[ast.Value, any](ast.ValueEqual)
+}
+
+func (c *interQueryValueCacheBucket) Get(k ast.Value) (any, bool) {
c.mtx.RLock()
defer c.mtx.RUnlock()
- value, ok := c.items[k.String()]
- return value, ok
+ return c.items.Get(k)
}
-// Insert inserts a key k into the cache with value v.
-func (c *interQueryValueCache) Insert(k ast.Value, v any) (dropped int) {
+func (c *interQueryValueCacheBucket) Insert(k ast.Value, v any) (dropped int) {
c.mtx.Lock()
defer c.mtx.Unlock()
maxEntries := c.maxNumEntries()
if maxEntries > 0 {
- if len(c.items) >= maxEntries {
- itemsToRemove := len(c.items) - maxEntries + 1
+ l := c.items.Len()
+ if l >= maxEntries {
+ itemsToRemove := l - maxEntries + 1
// Delete a (semi-)random key to make room for the new one.
- for k := range c.items {
- delete(c.items, k)
+ c.items.Iter(func(k ast.Value, _ any) bool {
+ c.items.Delete(k)
dropped++
- if itemsToRemove == dropped {
- break
- }
- }
+ return itemsToRemove == dropped
+ })
}
}
- c.items[k.String()] = v
+ c.items.Put(k, v)
return dropped
}
-// Delete deletes the value in the cache for k.
-func (c *interQueryValueCache) Delete(k ast.Value) {
+func (c *interQueryValueCacheBucket) Delete(k ast.Value) {
c.mtx.Lock()
defer c.mtx.Unlock()
- delete(c.items, k.String())
+ c.items.Delete(k)
}
-// UpdateConfig updates the cache config.
-func (c *interQueryValueCache) UpdateConfig(config *Config) {
+func (c *interQueryValueCacheBucket) updateConfig(config *NamedValueCacheConfig) {
if config == nil {
return
}
@@ -391,16 +503,149 @@ func (c *interQueryValueCache) UpdateConfig(config *Config) {
c.config = config
}
-func (c *interQueryValueCache) maxNumEntries() int {
+func (c *interQueryValueCacheBucket) maxNumEntries() int {
if c.config == nil {
return defaultInterQueryBuiltinValueCacheSize
}
- return *c.config.InterQueryBuiltinValueCache.MaxNumEntries
+ return *c.config.MaxNumEntries
+}
+
+type InterQueryValueCache interface {
+ InterQueryValueCacheBucket
+ GetCache(name string) InterQueryValueCacheBucket
+ UpdateConfig(config *Config)
}
func NewInterQueryValueCache(_ context.Context, config *Config) InterQueryValueCache {
- return &interQueryValueCache{
- items: map[string]any{},
- config: config,
+ var c *InterQueryBuiltinValueCacheConfig
+ var nc *NamedValueCacheConfig
+ if config != nil {
+ c = &config.InterQueryBuiltinValueCache
+ // NOTE: This is a side-effect of reusing the interQueryValueCacheBucket as the global cache.
+ // It's a hidden implementation detail that we can clean up in the future when revisiting the named caches
+ // to automatically apply them to any built-in instead of the global cache.
+ nc = &NamedValueCacheConfig{
+ MaxNumEntries: c.MaxNumEntries,
+ }
+ }
+
+ return &interQueryBuiltinValueCache{
+ globalCache: interQueryValueCacheBucket{
+ items: *newItemsMap(),
+ config: nc,
+ },
+ namedCaches: map[string]*interQueryValueCacheBucket{},
+ config: c,
+ }
+}
+
+type interQueryBuiltinValueCache struct {
+ globalCache interQueryValueCacheBucket
+ namedCachesLock sync.RWMutex
+ namedCaches map[string]*interQueryValueCacheBucket
+ config *InterQueryBuiltinValueCacheConfig
+}
+
+func (c *interQueryBuiltinValueCache) Get(k ast.Value) (any, bool) {
+ if c == nil {
+ return nil, false
+ }
+
+ return c.globalCache.Get(k)
+}
+
+func (c *interQueryBuiltinValueCache) Insert(k ast.Value, v any) int {
+ if c == nil {
+ return 0
+ }
+
+ return c.globalCache.Insert(k, v)
+}
+
+func (c *interQueryBuiltinValueCache) Delete(k ast.Value) {
+ if c == nil {
+ return
+ }
+
+ c.globalCache.Delete(k)
+}
+
+func (c *interQueryBuiltinValueCache) GetCache(name string) InterQueryValueCacheBucket {
+ if c == nil {
+ return nil
+ }
+
+ if c.namedCaches == nil {
+ return nil
+ }
+
+ c.namedCachesLock.RLock()
+ nc, ok := c.namedCaches[name]
+ c.namedCachesLock.RUnlock()
+
+ if !ok {
+ c.namedCachesLock.Lock()
+ defer c.namedCachesLock.Unlock()
+
+ if nc, ok := c.namedCaches[name]; ok {
+ // Some other goroutine has created the cache while we were waiting for the lock.
+ return nc
+ }
+
+ var config *NamedValueCacheConfig
+ if c.config != nil {
+ config = c.config.NamedCacheConfigs[name]
+ if config == nil {
+ config = getDefaultInterQueryBuiltinValueCacheConfig(name)
+ }
+ }
+
+ if config == nil {
+ // No config, cache disabled.
+ return nil
+ }
+
+ nc = &interQueryValueCacheBucket{
+ items: *newItemsMap(),
+ config: config,
+ }
+
+ c.namedCaches[name] = nc
+ }
+
+ return nc
+}
+
+func (c *interQueryBuiltinValueCache) UpdateConfig(config *Config) {
+ if c == nil {
+ return
+ }
+
+ if config == nil {
+ c.globalCache.updateConfig(nil)
+ } else {
+
+ c.globalCache.updateConfig(&NamedValueCacheConfig{
+ MaxNumEntries: config.InterQueryBuiltinValueCache.MaxNumEntries,
+ })
+ }
+
+ c.namedCachesLock.Lock()
+ defer c.namedCachesLock.Unlock()
+
+ c.config = &config.InterQueryBuiltinValueCache
+
+ for name, nc := range c.namedCaches {
+ // For each named cache: if it has a config, update it; if no config, remove it.
+ namedConfig := c.config.NamedCacheConfigs[name]
+ if namedConfig == nil {
+ namedConfig = getDefaultInterQueryBuiltinValueCacheConfig(name)
+ }
+
+ if namedConfig == nil {
+ delete(c.namedCaches, name)
+ } else {
+ nc.updateConfig(namedConfig)
+ }
}
}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/cancel.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/cancel.go
new file mode 100644
index 0000000000..534e0799a1
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/cancel.go
@@ -0,0 +1,33 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "sync/atomic"
+)
+
+// Cancel defines the interface for cancelling topdown queries. Cancel
+// operations are thread-safe and idempotent.
+type Cancel interface {
+ Cancel()
+ Cancelled() bool
+}
+
+type cancel struct {
+ flag int32
+}
+
+// NewCancel returns a new Cancel object.
+func NewCancel() Cancel {
+ return &cancel{}
+}
+
+func (c *cancel) Cancel() {
+ atomic.StoreInt32(&c.flag, 1)
+}
+
+func (c *cancel) Cancelled() bool {
+ return atomic.LoadInt32(&c.flag) != 0
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/casts.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/casts.go
similarity index 76%
rename from vendor/github.com/open-policy-agent/opa/topdown/casts.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/casts.go
index 2eb8f97fc9..85e1a9c015 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/casts.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/casts.go
@@ -6,24 +6,38 @@ package topdown
import (
"strconv"
+ "strings"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
func builtinToNumber(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
switch a := operands[0].Value.(type) {
case ast.Null:
- return iter(ast.NumberTerm("0"))
+ return iter(ast.InternedTerm(0))
case ast.Boolean:
if a {
- return iter(ast.NumberTerm("1"))
+ return iter(ast.InternedTerm(1))
}
- return iter(ast.NumberTerm("0"))
+ return iter(ast.InternedTerm(0))
case ast.Number:
- return iter(ast.NewTerm(a))
+ return iter(operands[0])
case ast.String:
- _, err := strconv.ParseFloat(string(a), 64)
+ strValue := string(a)
+
+ if it := ast.InternedIntNumberTermFromString(strValue); it != nil {
+ return iter(it)
+ }
+
+ trimmedVal := strings.TrimLeft(strValue, "+-")
+ lowerCaseVal := strings.ToLower(trimmedVal)
+
+ if lowerCaseVal == "inf" || lowerCaseVal == "infinity" || lowerCaseVal == "nan" {
+ return builtins.NewOperandTypeErr(1, operands[0].Value, "valid number string")
+ }
+
+ _, err := strconv.ParseFloat(strValue, 64)
if err != nil {
return err
}
@@ -32,7 +46,7 @@ func builtinToNumber(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term
return builtins.NewOperandTypeErr(1, operands[0].Value, "null", "boolean", "number", "string")
}
-// Deprecated in v0.13.0.
+// Deprecated: deprecated in v0.13.0.
func builtinToArray(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
switch val := operands[0].Value.(type) {
case *ast.Array:
@@ -50,7 +64,7 @@ func builtinToArray(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term)
}
}
-// Deprecated in v0.13.0.
+// Deprecated: deprecated in v0.13.0.
func builtinToSet(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
switch val := operands[0].Value.(type) {
case *ast.Array:
@@ -66,7 +80,7 @@ func builtinToSet(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e
}
}
-// Deprecated in v0.13.0.
+// Deprecated: deprecated in v0.13.0.
func builtinToString(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
switch val := operands[0].Value.(type) {
case ast.String:
@@ -76,7 +90,7 @@ func builtinToString(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term
}
}
-// Deprecated in v0.13.0.
+// Deprecated: deprecated in v0.13.0.
func builtinToBoolean(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
switch val := operands[0].Value.(type) {
case ast.Boolean:
@@ -86,7 +100,7 @@ func builtinToBoolean(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter
}
}
-// Deprecated in v0.13.0.
+// Deprecated: deprecated in v0.13.0.
func builtinToNull(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
switch val := operands[0].Value.(type) {
case ast.Null:
@@ -96,7 +110,7 @@ func builtinToNull(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term)
}
}
-// Deprecated in v0.13.0.
+// Deprecated: deprecated in v0.13.0.
func builtinToObject(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
switch val := operands[0].Value.(type) {
case ast.Object:
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/cidr.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/cidr.go
similarity index 94%
rename from vendor/github.com/open-policy-agent/opa/topdown/cidr.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/cidr.go
index 5b011bd161..12a4414963 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/cidr.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/cidr.go
@@ -6,11 +6,12 @@ import (
"fmt"
"math/big"
"net"
+ "slices"
"sort"
- "github.com/open-policy-agent/opa/ast"
cidrMerge "github.com/open-policy-agent/opa/internal/cidr/merge"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
func getNetFromOperand(v ast.Value) (*net.IPNet, error) {
@@ -31,7 +32,7 @@ func getLastIP(cidr *net.IPNet) (net.IP, error) {
prefixLen, bits := cidr.Mask.Size()
if prefixLen == 0 && bits == 0 {
// non-standard mask, see https://golang.org/pkg/net/#IPMask.Size
- return nil, fmt.Errorf("CIDR mask is in non-standard format")
+ return nil, errors.New("CIDR mask is in non-standard format")
}
var lastIP []byte
if prefixLen == bits {
@@ -75,7 +76,7 @@ func builtinNetCIDRIntersects(_ BuiltinContext, operands []*ast.Term, iter func(
// If either net contains the others starting IP they are overlapping
cidrsOverlap := cidrnetA.Contains(cidrnetB.IP) || cidrnetB.Contains(cidrnetA.IP)
- return iter(ast.BooleanTerm(cidrsOverlap))
+ return iter(ast.InternedTerm(cidrsOverlap))
}
func builtinNetCIDRContains(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -92,7 +93,7 @@ func builtinNetCIDRContains(_ BuiltinContext, operands []*ast.Term, iter func(*a
ip := net.ParseIP(string(bStr))
if ip != nil {
- return iter(ast.BooleanTerm(cidrnetA.Contains(ip)))
+ return iter(ast.InternedTerm(cidrnetA.Contains(ip)))
}
// It wasn't an IP, try and parse it as a CIDR
@@ -113,7 +114,7 @@ func builtinNetCIDRContains(_ BuiltinContext, operands []*ast.Term, iter func(*a
cidrContained = cidrnetA.Contains(lastIP)
}
- return iter(ast.BooleanTerm(cidrContained))
+ return iter(ast.InternedTerm(cidrContained))
}
var errNetCIDRContainsMatchElementType = errors.New("element must be string or non-empty array")
@@ -137,12 +138,12 @@ func evalNetCIDRContainsMatchesOperand(operand int, a *ast.Term, iter func(cidr,
case ast.String:
return iter(a, a)
case *ast.Array:
- for i := 0; i < v.Len(); i++ {
+ for i := range v.Len() {
cidr, err := getCIDRMatchTerm(v.Elem(i))
if err != nil {
return fmt.Errorf("operand %v: %v", operand, err)
}
- if err := iter(cidr, ast.IntNumberTerm(i)); err != nil {
+ if err := iter(cidr, ast.InternedTerm(i)); err != nil {
return err
}
}
@@ -219,13 +220,13 @@ func builtinNetCIDRExpand(bctx BuiltinContext, operands []*ast.Term, iter func(*
func builtinNetCIDRIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
cidr, err := builtins.StringOperand(operands[0].Value, 1)
if err != nil {
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
if _, _, err := net.ParseCIDR(string(cidr)); err != nil {
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
- return iter(ast.BooleanTerm(true))
+ return iter(ast.InternedTerm(true))
}
type cidrBlockRange struct {
@@ -255,7 +256,7 @@ func (c cidrBlockRanges) Less(i, j int) bool {
}
// Then compare first IP.
- cmp = bytes.Compare(*c[i].First, *c[i].First)
+ cmp = bytes.Compare(*c[i].First, *c[j].First)
if cmp < 0 {
return true
} else if cmp > 0 {
@@ -274,7 +275,7 @@ func builtinNetCIDRMerge(_ BuiltinContext, operands []*ast.Term, iter func(*ast.
switch v := operands[0].Value.(type) {
case *ast.Array:
- for i := 0; i < v.Len(); i++ {
+ for i := range v.Len() {
network, err := generateIPNet(v.Elem(i))
if err != nil {
return err
@@ -392,7 +393,7 @@ func mergeCIDRs(ranges cidrBlockRanges) cidrBlockRanges {
ranges[i-1] = &cidrBlockRange{First: &firstIPRange, Last: &lastIPRange, Network: nil}
// Delete ranges[i] since merged with the previous.
- ranges = append(ranges[:i], ranges[i+1:]...)
+ ranges = slices.Delete(ranges, i, i+1)
}
}
return ranges
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/comparison.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/comparison.go
similarity index 91%
rename from vendor/github.com/open-policy-agent/opa/topdown/comparison.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/comparison.go
index 0d033d2c32..6c10129faa 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/comparison.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/comparison.go
@@ -4,7 +4,7 @@
package topdown
-import "github.com/open-policy-agent/opa/ast"
+import "github.com/open-policy-agent/opa/v1/ast"
type compareFunc func(a, b ast.Value) bool
@@ -34,7 +34,7 @@ func compareEq(a, b ast.Value) bool {
func builtinCompare(cmp compareFunc) BuiltinFunc {
return func(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
- return iter(ast.BooleanTerm(cmp(operands[0].Value, operands[1].Value)))
+ return iter(ast.InternedTerm(cmp(operands[0].Value, operands[1].Value)))
}
}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/copypropagation.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/copypropagation.go
similarity index 95%
rename from vendor/github.com/open-policy-agent/opa/topdown/copypropagation/copypropagation.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/copypropagation.go
index 8824d19bd2..7767e7ff52 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/copypropagation.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/copypropagation.go
@@ -8,7 +8,7 @@ import (
"fmt"
"sort"
- "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/v1/ast"
)
// CopyPropagator implements a simple copy propagation optimization to remove
@@ -163,7 +163,8 @@ func (p *CopyPropagator) Apply(query ast.Body) ast.Body {
// to the current result.
// Invariant: Live vars are bound (above) and reserved vars are implicitly ground.
- safe := ast.ReservedVars.Copy()
+ safe := ast.NewVarSetOfSize(len(p.livevars) + len(ast.ReservedVars) + 6)
+ safe.Update(ast.ReservedVars)
safe.Update(p.livevars)
safe.Update(ast.OutputVarsFromBody(p.compiler, result, safe))
unsafe := result.Vars(ast.SafetyCheckVisitorParams).Diff(safe)
@@ -173,9 +174,8 @@ func (p *CopyPropagator) Apply(query ast.Body) ast.Body {
providesSafety := false
outputVars := ast.OutputVarsFromExpr(p.compiler, removedEq, safe)
- diff := unsafe.Diff(outputVars)
- if len(diff) < len(unsafe) {
- unsafe = diff
+ if unsafe.DiffCount(outputVars) < len(unsafe) {
+ unsafe = unsafe.Diff(outputVars)
providesSafety = true
}
@@ -209,7 +209,7 @@ func (p *CopyPropagator) Apply(query ast.Body) ast.Body {
// plugBindings applies the binding list and union-find to x. This process
// removes as many variables as possible.
-func (p *CopyPropagator) plugBindings(pctx *plugContext, expr *ast.Expr) *ast.Expr {
+func (*CopyPropagator) plugBindings(pctx *plugContext, expr *ast.Expr) *ast.Expr {
xform := bindingPlugTransform{
pctx: pctx,
@@ -233,7 +233,7 @@ type bindingPlugTransform struct {
pctx *plugContext
}
-func (t bindingPlugTransform) Transform(x interface{}) (interface{}, error) {
+func (t bindingPlugTransform) Transform(x any) (any, error) {
switch x := x.(type) {
case ast.Var:
return t.plugBindingsVar(t.pctx, x), nil
@@ -244,7 +244,7 @@ func (t bindingPlugTransform) Transform(x interface{}) (interface{}, error) {
}
}
-func (t bindingPlugTransform) plugBindingsVar(pctx *plugContext, v ast.Var) ast.Value {
+func (bindingPlugTransform) plugBindingsVar(pctx *plugContext, v ast.Var) ast.Value {
var result ast.Value = v
@@ -274,7 +274,7 @@ func (t bindingPlugTransform) plugBindingsVar(pctx *plugContext, v ast.Var) ast.
return b
}
-func (t bindingPlugTransform) plugBindingsRef(pctx *plugContext, v ast.Ref) ast.Ref {
+func (bindingPlugTransform) plugBindingsRef(pctx *plugContext, v ast.Ref) ast.Ref {
// Apply union-find to remove redundant variables from input.
if root, ok := pctx.uf.Find(v[0].Value); ok {
@@ -385,11 +385,11 @@ type binding struct {
k, v ast.Value
}
-func containedIn(value ast.Value, x interface{}) bool {
+func containedIn(value ast.Value, x any) bool {
var stop bool
var vis *ast.GenericVisitor
- vis = ast.NewGenericVisitor(func(x interface{}) bool {
+ vis = ast.NewGenericVisitor(func(x any) bool {
switch x := x.(type) {
case *ast.Every: // skip body
vis.Walk(x.Key)
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/unionfind.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/unionfind.go
similarity index 81%
rename from vendor/github.com/open-policy-agent/opa/topdown/copypropagation/unionfind.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/unionfind.go
index 38ec56f315..cac2a3009f 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/unionfind.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/unionfind.go
@@ -7,25 +7,21 @@ package copypropagation
import (
"fmt"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/util"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/util"
)
type rankFunc func(*unionFindRoot, *unionFindRoot) (*unionFindRoot, *unionFindRoot)
type unionFind struct {
- roots *util.HashMap
+ roots *util.HasherMap[ast.Value, *unionFindRoot]
parents *ast.ValueMap
rank rankFunc
}
func newUnionFind(rank rankFunc) *unionFind {
return &unionFind{
- roots: util.NewHashMap(func(a util.T, b util.T) bool {
- return a.(ast.Value).Compare(b.(ast.Value)) == 0
- }, func(v util.T) int {
- return v.(ast.Value).Hash()
- }),
+ roots: util.NewHasherMap[ast.Value, *unionFindRoot](ast.ValueEqual),
parents: ast.NewValueMap(),
rank: rank,
}
@@ -53,7 +49,7 @@ func (uf *unionFind) Find(v ast.Value) (*unionFindRoot, bool) {
if parent.Compare(v) == 0 {
r, ok := uf.roots.Get(v)
- return r.(*unionFindRoot), ok
+ return r, ok
}
return uf.Find(parent)
@@ -86,20 +82,20 @@ func (uf *unionFind) Merge(a, b ast.Value) (*unionFindRoot, bool) {
func (uf *unionFind) String() string {
o := struct {
- Roots map[string]interface{}
+ Roots map[string]any
Parents map[string]ast.Value
}{
- map[string]interface{}{},
+ map[string]any{},
map[string]ast.Value{},
}
- uf.roots.Iter(func(k util.T, v util.T) bool {
- o.Roots[k.(ast.Value).String()] = struct {
+ uf.roots.Iter(func(k ast.Value, v *unionFindRoot) bool {
+ o.Roots[k.String()] = struct {
Constant *ast.Term
Key ast.Value
}{
- v.(*unionFindRoot).constant,
- v.(*unionFindRoot).key,
+ v.constant,
+ v.key,
}
return true
})
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/crypto.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/crypto.go
similarity index 88%
rename from vendor/github.com/open-policy-agent/opa/topdown/crypto.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/crypto.go
index f24432a264..144c01ee95 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/crypto.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/crypto.go
@@ -15,19 +15,21 @@ import (
"crypto/tls"
"crypto/x509"
"encoding/base64"
+ "encoding/hex"
"encoding/json"
"encoding/pem"
+ "errors"
"fmt"
"hash"
"os"
"strings"
"time"
- "github.com/open-policy-agent/opa/internal/jwx/jwk"
+ "github.com/lestrrat-go/jwx/v3/jwk"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
- "github.com/open-policy-agent/opa/util"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/util"
)
const (
@@ -95,19 +97,14 @@ func builtinCryptoX509ParseAndVerifyCertificates(_ BuiltinContext, operands []*a
return err
}
- invalid := ast.ArrayTerm(
- ast.BooleanTerm(false),
- ast.NewTerm(ast.NewArray()),
- )
-
certs, err := getX509CertsFromString(string(input))
if err != nil {
- return iter(invalid)
+ return iter(ast.ArrayTerm(ast.InternedTerm(false), ast.InternedEmptyArray))
}
verified, err := verifyX509CertificateChain(certs, x509.VerifyOptions{})
if err != nil {
- return iter(invalid)
+ return iter(ast.ArrayTerm(ast.InternedTerm(false), ast.InternedEmptyArray))
}
value, err := ast.InterfaceToValue(extendCertificates(verified))
@@ -115,10 +112,7 @@ func builtinCryptoX509ParseAndVerifyCertificates(_ BuiltinContext, operands []*a
return err
}
- valid := ast.ArrayTerm(
- ast.BooleanTerm(true),
- ast.NewTerm(value),
- )
+ valid := ast.ArrayTerm(ast.InternedTerm(true), ast.NewTerm(value))
return iter(valid)
}
@@ -152,14 +146,9 @@ func builtinCryptoX509ParseAndVerifyCertificatesWithOptions(_ BuiltinContext, op
return err
}
- invalid := ast.ArrayTerm(
- ast.BooleanTerm(false),
- ast.NewTerm(ast.NewArray()),
- )
-
certs, err := getX509CertsFromString(string(input))
if err != nil {
- return iter(invalid)
+ return iter(ast.ArrayTerm(ast.InternedTerm(false), ast.InternedEmptyArray))
}
// Collect the cert verification options
@@ -170,7 +159,7 @@ func builtinCryptoX509ParseAndVerifyCertificatesWithOptions(_ BuiltinContext, op
verified, err := verifyX509CertificateChain(certs, verifyOpt)
if err != nil {
- return iter(invalid)
+ return iter(ast.ArrayTerm(ast.InternedTerm(false), ast.InternedEmptyArray))
}
value, err := ast.InterfaceToValue(verified)
@@ -178,12 +167,7 @@ func builtinCryptoX509ParseAndVerifyCertificatesWithOptions(_ BuiltinContext, op
return err
}
- valid := ast.ArrayTerm(
- ast.BooleanTerm(true),
- ast.NewTerm(value),
- )
-
- return iter(valid)
+ return iter(ast.ArrayTerm(ast.InternedTerm(true), ast.NewTerm(value)))
}
func extractVerifyOpts(options ast.Object) (verifyOpt x509.VerifyOptions, err error) {
@@ -204,7 +188,7 @@ func extractVerifyOpts(options ast.Object) (verifyOpt x509.VerifyOptions, err er
if ok {
verifyOpt.DNSName = strings.Trim(string(dns), "\"")
} else {
- return verifyOpt, fmt.Errorf("'DNSName' should be a string")
+ return verifyOpt, errors.New("'DNSName' should be a string")
}
case "CurrentTime":
c, ok := options.Get(key).Value.(ast.Number)
@@ -213,10 +197,10 @@ func extractVerifyOpts(options ast.Object) (verifyOpt x509.VerifyOptions, err er
if ok {
verifyOpt.CurrentTime = time.Unix(0, nanosecs)
} else {
- return verifyOpt, fmt.Errorf("'CurrentTime' should be a valid int64 number")
+ return verifyOpt, errors.New("'CurrentTime' should be a valid int64 number")
}
} else {
- return verifyOpt, fmt.Errorf("'CurrentTime' should be a number")
+ return verifyOpt, errors.New("'CurrentTime' should be a number")
}
case "MaxConstraintComparisons":
c, ok := options.Get(key).Value.(ast.Number)
@@ -225,23 +209,23 @@ func extractVerifyOpts(options ast.Object) (verifyOpt x509.VerifyOptions, err er
if ok {
verifyOpt.MaxConstraintComparisions = maxComparisons
} else {
- return verifyOpt, fmt.Errorf("'MaxConstraintComparisons' should be a valid number")
+ return verifyOpt, errors.New("'MaxConstraintComparisons' should be a valid number")
}
} else {
- return verifyOpt, fmt.Errorf("'MaxConstraintComparisons' should be a number")
+ return verifyOpt, errors.New("'MaxConstraintComparisons' should be a number")
}
case "KeyUsages":
type forEach interface {
Foreach(func(*ast.Term))
}
var ks forEach
- switch options.Get(key).Value.(type) {
+ switch v := options.Get(key).Value.(type) {
case *ast.Array:
- ks = options.Get(key).Value.(*ast.Array)
+ ks = v
case ast.Set:
- ks = options.Get(key).Value.(ast.Set)
+ ks = v
default:
- return verifyOpt, fmt.Errorf("'KeyUsages' should be an Array or Set")
+ return verifyOpt, errors.New("'KeyUsages' should be an Array or Set")
}
// Collect the x509.ExtKeyUsage values by looking up the
@@ -262,7 +246,7 @@ func extractVerifyOpts(options ast.Object) (verifyOpt x509.VerifyOptions, err er
return x509.VerifyOptions{}, fmt.Errorf("invalid entries for 'KeyUsages' found: %s", invalidKUsgs)
}
default:
- return verifyOpt, fmt.Errorf("invalid key option")
+ return verifyOpt, errors.New("invalid key option")
}
}
@@ -312,7 +296,7 @@ func builtinCryptoX509ParseCertificateRequest(_ BuiltinContext, operands []*ast.
p, _ := pem.Decode(bytes)
if p != nil && p.Type != blockTypeCertificateRequest {
- return fmt.Errorf("invalid PEM-encoded certificate signing request")
+ return errors.New("invalid PEM-encoded certificate signing request")
}
if p != nil {
bytes = p.Bytes
@@ -328,7 +312,7 @@ func builtinCryptoX509ParseCertificateRequest(_ BuiltinContext, operands []*ast.
return err
}
- var x interface{}
+ var x any
if err := util.UnmarshalJSON(bs, &x); err != nil {
return err
}
@@ -342,7 +326,7 @@ func builtinCryptoX509ParseCertificateRequest(_ BuiltinContext, operands []*ast.
}
func builtinCryptoJWKFromPrivateKey(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
- var x interface{}
+ var x any
a := operands[0].Value
input, err := builtins.StringOperand(a, 1)
@@ -354,7 +338,7 @@ func builtinCryptoJWKFromPrivateKey(_ BuiltinContext, operands []*ast.Term, iter
pemDataString := string(input)
if pemDataString == "" {
- return fmt.Errorf("input PEM data was empty")
+ return errors.New("input PEM data was empty")
}
// This built in must be supplied a valid PEM or base64 encoded string.
@@ -374,10 +358,10 @@ func builtinCryptoJWKFromPrivateKey(_ BuiltinContext, operands []*ast.Term, iter
}
if len(rawKeys) == 0 {
- return iter(ast.NullTerm())
+ return iter(ast.InternedNullTerm)
}
- key, err := jwk.New(rawKeys[0])
+ key, err := jwk.Import(rawKeys[0])
if err != nil {
return err
}
@@ -408,7 +392,7 @@ func builtinCryptoParsePrivateKeys(_ BuiltinContext, operands []*ast.Term, iter
}
if string(input) == "" {
- return iter(ast.NullTerm())
+ return iter(ast.InternedNullTerm)
}
// get the raw private key
@@ -418,7 +402,7 @@ func builtinCryptoParsePrivateKeys(_ BuiltinContext, operands []*ast.Term, iter
}
if len(rawKeys) == 0 {
- return iter(ast.NewTerm(ast.NewArray()))
+ return iter(ast.InternedEmptyArray)
}
bs, err := json.Marshal(rawKeys)
@@ -426,7 +410,7 @@ func builtinCryptoParsePrivateKeys(_ BuiltinContext, operands []*ast.Term, iter
return err
}
- var x interface{}
+ var x any
if err := util.UnmarshalJSON(bs, &x); err != nil {
return err
}
@@ -439,36 +423,43 @@ func builtinCryptoParsePrivateKeys(_ BuiltinContext, operands []*ast.Term, iter
return iter(ast.NewTerm(value))
}
-func hashHelper(a ast.Value, h func(ast.String) string) (ast.Value, error) {
- s, err := builtins.StringOperand(a, 1)
- if err != nil {
- return nil, err
- }
- return ast.String(h(s)), nil
+func toHexEncodedString(src []byte) string {
+ dst := make([]byte, hex.EncodedLen(len(src)))
+ hex.Encode(dst, src)
+ return util.ByteSliceToString(dst)
}
func builtinCryptoMd5(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
- res, err := hashHelper(operands[0].Value, func(s ast.String) string { return fmt.Sprintf("%x", md5.Sum([]byte(s))) })
+ s, err := builtins.StringOperand(operands[0].Value, 1)
if err != nil {
return err
}
- return iter(ast.NewTerm(res))
+
+ md5sum := md5.Sum([]byte(s))
+
+ return iter(ast.StringTerm(toHexEncodedString(md5sum[:])))
}
func builtinCryptoSha1(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
- res, err := hashHelper(operands[0].Value, func(s ast.String) string { return fmt.Sprintf("%x", sha1.Sum([]byte(s))) })
+ s, err := builtins.StringOperand(operands[0].Value, 1)
if err != nil {
return err
}
- return iter(ast.NewTerm(res))
+
+ sha1sum := sha1.Sum([]byte(s))
+
+ return iter(ast.StringTerm(toHexEncodedString(sha1sum[:])))
}
func builtinCryptoSha256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
- res, err := hashHelper(operands[0].Value, func(s ast.String) string { return fmt.Sprintf("%x", sha256.Sum256([]byte(s))) })
+ s, err := builtins.StringOperand(operands[0].Value, 1)
if err != nil {
return err
}
- return iter(ast.NewTerm(res))
+
+ sha256sum := sha256.Sum256([]byte(s))
+
+ return iter(ast.StringTerm(toHexEncodedString(sha256sum[:])))
}
func hmacHelper(operands []*ast.Term, iter func(*ast.Term) error, h func() hash.Hash) error {
@@ -488,7 +479,7 @@ func hmacHelper(operands []*ast.Term, iter func(*ast.Term) error, h func() hash.
mac.Write([]byte(message))
messageDigest := mac.Sum(nil)
- return iter(ast.StringTerm(fmt.Sprintf("%x", messageDigest)))
+ return iter(ast.StringTerm(hex.EncodeToString(messageDigest)))
}
func builtinCryptoHmacMd5(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -522,7 +513,7 @@ func builtinCryptoHmacEqual(_ BuiltinContext, operands []*ast.Term, iter func(*a
res := hmac.Equal([]byte(mac1), []byte(mac2))
- return iter(ast.BooleanTerm(res))
+ return iter(ast.InternedTerm(res))
}
func init() {
@@ -697,7 +688,7 @@ func addCACertsFromBytes(pool *x509.CertPool, pemBytes []byte) (*x509.CertPool,
}
if ok := pool.AppendCertsFromPEM(pemBytes); !ok {
- return nil, fmt.Errorf("could not append certificates")
+ return nil, errors.New("could not append certificates")
}
return pool, nil
@@ -725,9 +716,11 @@ func readCertFromFile(localCertFile string) ([]byte, error) {
return certPEM, nil
}
+var beginPrefix = []byte("-----BEGIN ")
+
func getTLSx509KeyPairFromString(certPemBlock []byte, keyPemBlock []byte) (*tls.Certificate, error) {
- if !strings.HasPrefix(string(certPemBlock), "-----BEGIN") {
+ if !bytes.HasPrefix(certPemBlock, beginPrefix) {
s, err := base64.StdEncoding.DecodeString(string(certPemBlock))
if err != nil {
return nil, err
@@ -735,7 +728,7 @@ func getTLSx509KeyPairFromString(certPemBlock []byte, keyPemBlock []byte) (*tls.
certPemBlock = s
}
- if !strings.HasPrefix(string(keyPemBlock), "-----BEGIN") {
+ if !bytes.HasPrefix(keyPemBlock, beginPrefix) {
s, err := base64.StdEncoding.DecodeString(string(keyPemBlock))
if err != nil {
return nil, err
@@ -744,7 +737,7 @@ func getTLSx509KeyPairFromString(certPemBlock []byte, keyPemBlock []byte) (*tls.
}
// we assume it a DER certificate and try to convert it to a PEM.
- if !bytes.HasPrefix(certPemBlock, []byte("-----BEGIN")) {
+ if !bytes.HasPrefix(certPemBlock, beginPrefix) {
pemBlock := &pem.Block{
Type: "CERTIFICATE",
diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/doc.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/doc.go
new file mode 100644
index 0000000000..9aa7aa45c5
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/doc.go
@@ -0,0 +1,10 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package topdown provides low-level query evaluation support.
+//
+// The topdown implementation is a modified version of the standard top-down
+// evaluation algorithm used in Datalog. References and comprehensions are
+// evaluated eagerly while all other terms are evaluated lazily.
+package topdown
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/encoding.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/encoding.go
similarity index 93%
rename from vendor/github.com/open-policy-agent/opa/topdown/encoding.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/encoding.go
index f3475a60d0..541b50d0a9 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/encoding.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/encoding.go
@@ -15,9 +15,9 @@ import (
"sigs.k8s.io/yaml"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
- "github.com/open-policy-agent/opa/util"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/util"
)
func builtinJSONMarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -128,7 +128,7 @@ func builtinJSONUnmarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast
return err
}
- var x interface{}
+ var x any
if err := util.UnmarshalJSON([]byte(str), &x); err != nil {
return err
@@ -144,10 +144,10 @@ func builtinJSONIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.T
str, err := builtins.StringOperand(operands[0].Value, 1)
if err != nil {
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
- return iter(ast.BooleanTerm(json.Valid([]byte(str))))
+ return iter(ast.InternedTerm(json.Valid([]byte(str))))
}
func builtinBase64Encode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -169,17 +169,17 @@ func builtinBase64Decode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.
if err != nil {
return err
}
- return iter(ast.NewTerm(ast.String(result)))
+ return iter(ast.InternedTerm(string(result)))
}
func builtinBase64IsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
str, err := builtins.StringOperand(operands[0].Value, 1)
if err != nil {
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
_, err = base64.StdEncoding.DecodeString(string(str))
- return iter(ast.BooleanTerm(err == nil))
+ return iter(ast.InternedTerm(err == nil))
}
func builtinBase64UrlEncode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -224,7 +224,7 @@ func builtinBase64UrlDecode(_ BuiltinContext, operands []*ast.Term, iter func(*a
if err != nil {
return err
}
- return iter(ast.NewTerm(ast.String(result)))
+ return iter(ast.InternedTerm(string(result)))
}
func builtinURLQueryEncode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -255,7 +255,7 @@ func builtinURLQueryEncodeObject(_ BuiltinContext, operands []*ast.Term, iter fu
return err
}
- inputs, ok := asJSON.(map[string]interface{})
+ inputs, ok := asJSON.(map[string]any)
if !ok {
return builtins.NewOperandTypeErr(1, operands[0].Value, "object")
}
@@ -266,7 +266,7 @@ func builtinURLQueryEncodeObject(_ BuiltinContext, operands []*ast.Term, iter fu
switch vv := v.(type) {
case string:
query.Set(k, vv)
- case []interface{}:
+ case []any:
for _, val := range vv {
strVal, ok := val.(string)
if !ok {
@@ -340,7 +340,7 @@ func builtinYAMLUnmarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast
buf := bytes.NewBuffer(bs)
decoder := util.NewJSONDecoder(buf)
- var val interface{}
+ var val any
err = decoder.Decode(&val)
if err != nil {
return err
@@ -355,12 +355,12 @@ func builtinYAMLUnmarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast
func builtinYAMLIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
str, err := builtins.StringOperand(operands[0].Value, 1)
if err != nil {
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
- var x interface{}
+ var x any
err = yaml.Unmarshal([]byte(str), &x)
- return iter(ast.BooleanTerm(err == nil))
+ return iter(ast.InternedTerm(err == nil))
}
func builtinHexEncode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -380,7 +380,7 @@ func builtinHexDecode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter
if err != nil {
return err
}
- return iter(ast.NewTerm(ast.String(val)))
+ return iter(ast.StringTerm(string(val)))
}
func init() {
diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/errors.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/errors.go
new file mode 100644
index 0000000000..cadd163198
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/errors.go
@@ -0,0 +1,149 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/open-policy-agent/opa/v1/ast"
+)
+
+// Halt is a special error type that built-in function implementations return to indicate
+// that policy evaluation should stop immediately.
+type Halt struct {
+ Err error
+}
+
+func (h Halt) Error() string {
+ return h.Err.Error()
+}
+
+func (h Halt) Unwrap() error { return h.Err }
+
+// Error is the error type returned by the Eval and Query functions when
+// an evaluation error occurs.
+type Error struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+ Location *ast.Location `json:"location,omitempty"`
+ err error `json:"-"`
+}
+
+const (
+
+ // InternalErr represents an unknown evaluation error.
+ InternalErr string = "eval_internal_error"
+
+ // CancelErr indicates the evaluation process was cancelled.
+ CancelErr string = "eval_cancel_error"
+
+ // ConflictErr indicates a conflict was encountered during evaluation. For
+ // instance, a conflict occurs if a rule produces multiple, differing values
+ // for the same key in an object. Conflict errors indicate the policy does
+ // not account for the data loaded into the policy engine.
+ ConflictErr string = "eval_conflict_error"
+
+ // TypeErr indicates evaluation stopped because an expression was applied to
+ // a value of an inappropriate type.
+ TypeErr string = "eval_type_error"
+
+ // BuiltinErr indicates a built-in function received a semantically invalid
+ // input or encountered some kind of runtime error, e.g., connection
+ // timeout, connection refused, etc.
+ BuiltinErr string = "eval_builtin_error"
+
+ // WithMergeErr indicates that the real and replacement data could not be merged.
+ WithMergeErr string = "eval_with_merge_error"
+)
+
+// IsError returns true if the err is an Error.
+func IsError(err error) bool {
+ var e *Error
+ return errors.As(err, &e)
+}
+
+// IsCancel returns true if err was caused by cancellation.
+func IsCancel(err error) bool {
+ return errors.Is(err, &Error{Code: CancelErr})
+}
+
+// Is allows matching topdown errors using errors.Is (see IsCancel).
+func (e *Error) Is(target error) bool {
+ var t *Error
+ if errors.As(target, &t) {
+ return (t.Code == "" || e.Code == t.Code) &&
+ (t.Message == "" || e.Message == t.Message) &&
+ (t.Location == nil || t.Location.Compare(e.Location) == 0)
+ }
+ return false
+}
+
+func (e *Error) Error() string {
+ msg := fmt.Sprintf("%v: %v", e.Code, e.Message)
+
+ if e.Location != nil {
+ msg = e.Location.String() + ": " + msg
+ }
+
+ return msg
+}
+
+func (e *Error) Wrap(err error) *Error {
+ e.err = err
+ return e
+}
+
+func (e *Error) Unwrap() error {
+ return e.err
+}
+
+func functionConflictErr(loc *ast.Location) error {
+ return &Error{
+ Code: ConflictErr,
+ Location: loc,
+ Message: "functions must not produce multiple outputs for same inputs",
+ }
+}
+
+func completeDocConflictErr(loc *ast.Location) error {
+ return &Error{
+ Code: ConflictErr,
+ Location: loc,
+ Message: "complete rules must not produce multiple outputs",
+ }
+}
+
+func objectDocKeyConflictErr(loc *ast.Location) error {
+ return &Error{
+ Code: ConflictErr,
+ Location: loc,
+ Message: "object keys must be unique",
+ }
+}
+
+func unsupportedBuiltinErr(loc *ast.Location) error {
+ return &Error{
+ Code: InternalErr,
+ Location: loc,
+ Message: "unsupported built-in",
+ }
+}
+
+func mergeConflictErr(loc *ast.Location) error {
+ return &Error{
+ Code: WithMergeErr,
+ Location: loc,
+ Message: "real and replacement data could not be merged",
+ }
+}
+
+func internalErr(loc *ast.Location, msg string) error {
+ return &Error{
+ Code: InternalErr,
+ Location: loc,
+ Message: msg,
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/eval.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go
similarity index 81%
rename from vendor/github.com/open-policy-agent/opa/topdown/eval.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go
index 7884ac01e0..f05fd9d94a 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/eval.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go
@@ -5,19 +5,21 @@ import (
"errors"
"fmt"
"io"
- "sort"
+ "slices"
"strconv"
"strings"
-
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/metrics"
- "github.com/open-policy-agent/opa/storage"
- "github.com/open-policy-agent/opa/topdown/builtins"
- "github.com/open-policy-agent/opa/topdown/cache"
- "github.com/open-policy-agent/opa/topdown/copypropagation"
- "github.com/open-policy-agent/opa/topdown/print"
- "github.com/open-policy-agent/opa/tracing"
- "github.com/open-policy-agent/opa/types"
+ "sync"
+
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/metrics"
+ "github.com/open-policy-agent/opa/v1/storage"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/topdown/cache"
+ "github.com/open-policy-agent/opa/v1/topdown/copypropagation"
+ "github.com/open-policy-agent/opa/v1/topdown/print"
+ "github.com/open-policy-agent/opa/v1/tracing"
+ "github.com/open-policy-agent/opa/v1/types"
+ "github.com/open-policy-agent/opa/v1/util"
)
type evalIterator func(*eval) error
@@ -57,60 +59,92 @@ func (ee deferredEarlyExitError) Error() string {
return fmt.Sprintf("%v: deferred early exit", ee.e.query)
}
+// Note(æ): this struct is formatted for optimal alignment as it is big, internal and instantiated
+// *very* frequently during evaluation. If you need to add fields here, please consider the alignment
+// of the struct, and use something like betteralign (https://github.com/dkorunic/betteralign) if you
+// need help with that.
type eval struct {
ctx context.Context
metrics metrics.Metrics
seed io.Reader
+ cancel Cancel
+ queryCompiler ast.QueryCompiler
+ store storage.Store
+ txn storage.Transaction
+ virtualCache VirtualCache
+ baseCache BaseCache
+ interQueryBuiltinCache cache.InterQueryCache
+ interQueryBuiltinValueCache cache.InterQueryValueCache
+ printHook print.Hook
time *ast.Term
- queryID uint64
queryIDFact *queryIDFactory
parent *eval
caller *eval
- cancel Cancel
- query ast.Body
- queryCompiler ast.QueryCompiler
- index int
- indexing bool
- earlyExit bool
bindings *bindings
- store storage.Store
- baseCache *baseCache
- txn storage.Transaction
compiler *ast.Compiler
input *ast.Term
data *ast.Term
external *resolverTrie
targetStack *refStack
- tracers []QueryTracer
- traceEnabled bool
traceLastLocation *ast.Location // Last location of a trace event.
- plugTraceVars bool
instr *Instrumentation
builtins map[string]*Builtin
builtinCache builtins.Cache
ndBuiltinCache builtins.NDBCache
functionMocks *functionMocksStack
- virtualCache VirtualCache
comprehensionCache *comprehensionCache
- interQueryBuiltinCache cache.InterQueryCache
- interQueryBuiltinValueCache cache.InterQueryValueCache
saveSet *saveSet
saveStack *saveStack
saveSupport *saveSupport
saveNamespace *ast.Term
- skipSaveNamespace bool
inliningControl *inliningControl
- genvarprefix string
- genvarid int
runtime *ast.Term
builtinErrors *builtinErrors
- printHook print.Hook
+ roundTripper CustomizeRoundTripper
+ genvarprefix string
+ query ast.Body
+ tracers []QueryTracer
tracingOpts tracing.Options
+ queryID uint64
+ timeStart int64
+ index int
+ genvarid int
+ indexing bool
+ earlyExit bool
+ traceEnabled bool
+ plugTraceVars bool
+ skipSaveNamespace bool
findOne bool
strictObjects bool
+ defined bool
+}
+
+type evp struct {
+ pool sync.Pool
+}
+
+func (ep *evp) Put(e *eval) {
+ ep.pool.Put(e)
+}
+
+func (ep *evp) Get() *eval {
+ return ep.pool.Get().(*eval)
+}
+
+var evalPool = evp{
+ pool: sync.Pool{
+ New: func() any {
+ return &eval{}
+ },
+ },
}
func (e *eval) Run(iter evalIterator) error {
+ if !e.traceEnabled {
+ // avoid function literal escaping to heap if we don't need the trace
+ return e.eval(iter)
+ }
+
e.traceEnter(e.query)
return e.eval(func(e *eval) error {
e.traceExit(e.query)
@@ -129,47 +163,46 @@ func (e *eval) String() string {
func (e *eval) string(s *strings.Builder) {
fmt.Fprintf(s, "')
+ s.WriteByte('>')
}
func (e *eval) builtinFunc(name string) (*ast.Builtin, BuiltinFunc, bool) {
decl, ok := ast.BuiltinMap[name]
if ok {
- f, ok := builtinFunctions[name]
- if ok {
+ if f, ok := builtinFunctions[name]; ok {
return decl, f, true
}
- } else {
- bi, ok := e.builtins[name]
- if ok {
- return bi.Decl, bi.Func, true
+ if bi, ok := e.builtins[name]; ok {
+ return decl, bi.Func, true
}
}
+ if bi, ok := e.builtins[name]; ok {
+ return bi.Decl, bi.Func, true
+ }
+
return nil, nil, false
}
-func (e *eval) closure(query ast.Body) *eval {
- cpy := *e
+func (e *eval) closure(query ast.Body, cpy *eval) {
+ *cpy = *e
cpy.index = 0
cpy.query = query
cpy.queryID = cpy.queryIDFact.Next()
cpy.parent = e
cpy.findOne = false
- return &cpy
}
-func (e *eval) child(query ast.Body) *eval {
- cpy := *e
+func (e *eval) child(query ast.Body, cpy *eval) {
+ *cpy = *e
cpy.index = 0
cpy.query = query
cpy.queryID = cpy.queryIDFact.Next()
cpy.bindings = newBindings(cpy.queryID, e.instr)
cpy.parent = e
cpy.findOne = false
- return &cpy
}
func (e *eval) next(iter evalIterator) error {
@@ -183,7 +216,7 @@ func (e *eval) partial() bool {
return e.saveSet != nil
}
-func (e *eval) unknown(x interface{}, b *bindings) bool {
+func (e *eval) unknown(x any, b *bindings) bool {
if !e.partial() {
return false
}
@@ -198,6 +231,11 @@ func (e *eval) unknown(x interface{}, b *bindings) bool {
return saveRequired(e.compiler, e.inliningControl, true, e.saveSet, b, x, false)
}
+// exactly like `unknown` above` but without the cost of `any` boxing when arg is known to be a ref
+func (e *eval) unknownRef(ref ast.Ref, b *bindings) bool {
+ return e.partial() && saveRequired(e.compiler, e.inliningControl, true, e.saveSet, b, ast.NewTerm(ref), false)
+}
+
func (e *eval) traceEnter(x ast.Node) {
e.traceEvent(EnterOp, x, "", nil)
}
@@ -335,6 +373,13 @@ func (e *eval) evalExpr(iter evalIterator) error {
}
if e.cancel != nil && e.cancel.Cancelled() {
+ if e.ctx != nil && e.ctx.Err() != nil {
+ return &Error{
+ Code: CancelErr,
+ Message: e.ctx.Err().Error(),
+ err: e.ctx.Err(),
+ }
+ }
return &Error{
Code: CancelErr,
Message: "caller cancelled query execution",
@@ -342,13 +387,9 @@ func (e *eval) evalExpr(iter evalIterator) error {
}
if e.index >= len(e.query) {
- err := iter(e)
-
- if err != nil {
+ if err := iter(e); err != nil {
switch err := err.(type) {
- case *deferredEarlyExitError:
- return wrapErr(err)
- case *earlyExitError:
+ case *deferredEarlyExitError, *earlyExitError:
return wrapErr(err)
default:
return err
@@ -374,46 +415,119 @@ func (e *eval) evalExpr(iter evalIterator) error {
}
func (e *eval) evalStep(iter evalIterator) error {
-
expr := e.query[e.index]
if expr.Negated {
return e.evalNot(iter)
}
- var defined bool
var err error
+
+ // NOTE(æ): the reason why there's one branch for the tracing case and one almost
+ // identical branch below for when tracing is disabled is that the tracing case
+ // allocates wildly. These allocations are cause by the "defined" boolean variable
+ // escaping to the heap as its value is set from inside of closures. There may very
+ // well be more elegant solutions to this problem, but this is one that works, and
+ // saves several *million* allocations for some workloads. So feel free to refactor
+ // this, but do make sure that the common non-tracing case doesn't pay in allocations
+ // for something that is only needed when tracing is enabled.
+ if e.traceEnabled {
+ var defined bool
+ switch terms := expr.Terms.(type) {
+ case []*ast.Term:
+ switch {
+ case expr.IsEquality():
+ err = e.unify(terms[1], terms[2], func() error {
+ defined = true
+ err := iter(e)
+ e.traceRedo(expr)
+ return err
+ })
+ default:
+ err = e.evalCall(terms, func() error {
+ defined = true
+ err := iter(e)
+ e.traceRedo(expr)
+ return err
+ })
+ }
+ case *ast.Term:
+ // generateVar inlined here to avoid extra allocations in hot path
+ rterm := ast.VarTerm(e.fmtVarTerm())
+
+ if e.partial() {
+ e.inliningControl.PushDisable(rterm.Value, true)
+ }
+
+ err = e.unify(terms, rterm, func() error {
+ if e.saveSet.Contains(rterm, e.bindings) {
+ return e.saveExpr(ast.NewExpr(rterm), e.bindings, func() error {
+ return iter(e)
+ })
+ }
+ if !e.bindings.Plug(rterm).Equal(ast.InternedTerm(false)) {
+ defined = true
+ err := iter(e)
+ e.traceRedo(expr)
+ return err
+ }
+ return nil
+ })
+
+ if e.partial() {
+ e.inliningControl.PopDisable()
+ }
+ case *ast.Every:
+ eval := evalEvery{
+ Every: terms,
+ e: e,
+ expr: expr,
+ }
+ err = eval.eval(func() error {
+ defined = true
+ err := iter(e)
+ e.traceRedo(expr)
+ return err
+ })
+
+ default: // guard-rail for adding extra (Expr).Terms types
+ return fmt.Errorf("got %T terms: %[1]v", terms)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ if !defined {
+ e.traceFail(expr)
+ }
+
+ return nil
+ }
+
switch terms := expr.Terms.(type) {
case []*ast.Term:
switch {
case expr.IsEquality():
err = e.unify(terms[1], terms[2], func() error {
- defined = true
- err := iter(e)
- e.traceRedo(expr)
- return err
+ return iter(e)
})
default:
err = e.evalCall(terms, func() error {
- defined = true
- err := iter(e)
- e.traceRedo(expr)
- return err
+ return iter(e)
})
}
case *ast.Term:
- rterm := e.generateVar(fmt.Sprintf("term_%d_%d", e.queryID, e.index))
+ // generateVar inlined here to avoid extra allocations in hot path
+ rterm := ast.VarTerm(e.fmtVarTerm())
err = e.unify(terms, rterm, func() error {
if e.saveSet.Contains(rterm, e.bindings) {
return e.saveExpr(ast.NewExpr(rterm), e.bindings, func() error {
return iter(e)
})
}
- if !e.bindings.Plug(rterm).Equal(ast.BooleanTerm(false)) {
- defined = true
- err := iter(e)
- e.traceRedo(expr)
- return err
+ if !e.bindings.Plug(rterm).Equal(ast.InternedTerm(false)) {
+ return iter(e)
}
return nil
})
@@ -424,56 +538,65 @@ func (e *eval) evalStep(iter evalIterator) error {
expr: expr,
}
err = eval.eval(func() error {
- defined = true
- err := iter(e)
- e.traceRedo(expr)
- return err
+ return iter(e)
})
default: // guard-rail for adding extra (Expr).Terms types
return fmt.Errorf("got %T terms: %[1]v", terms)
}
- if err != nil {
- return err
- }
+ return err
+}
- if !defined {
- e.traceFail(expr)
- }
+// Single-purpose fmt.Sprintf replacement for generating variable names with only
+// one allocation performed instead of 4, and in 1/3 the time.
+func (e *eval) fmtVarTerm() string {
+ buf := make([]byte, 0, len(e.genvarprefix)+util.NumDigitsUint(e.queryID)+util.NumDigitsInt(e.index)+7)
- return nil
+ buf = append(buf, e.genvarprefix...)
+ buf = append(buf, "_term_"...)
+ buf = strconv.AppendUint(buf, e.queryID, 10)
+ buf = append(buf, '_')
+ buf = strconv.AppendInt(buf, int64(e.index), 10)
+
+ return util.ByteSliceToString(buf)
}
func (e *eval) evalNot(iter evalIterator) error {
-
expr := e.query[e.index]
if e.unknown(expr, e.bindings) {
return e.evalNotPartial(iter)
}
- negation := ast.NewBody(expr.Complement().NoWith())
- child := e.closure(negation)
+ negation := ast.NewBody(expr.ComplementNoWith())
+ child := evalPool.Get()
+ defer evalPool.Put(child)
- var defined bool
- child.traceEnter(negation)
+ e.closure(negation, child)
- err := child.eval(func(*eval) error {
- child.traceExit(negation)
- defined = true
- child.traceRedo(negation)
- return nil
- })
+ if e.traceEnabled {
+ child.traceEnter(negation)
+ }
- if err != nil {
+ if err := child.eval(func(*eval) error {
+ if e.traceEnabled {
+ child.traceExit(negation)
+ child.traceRedo(negation)
+ }
+ child.defined = true
+
+ return nil
+ }); err != nil {
return err
}
- if !defined {
+ if !child.defined {
return iter(e)
}
+ child.defined = false
+
e.traceFail(expr)
return nil
}
@@ -482,16 +605,18 @@ func (e *eval) evalWith(iter evalIterator) error {
expr := e.query[e.index]
- // Disable inlining on all references in the expression so the result of
- // partial evaluation has the same semantics w/ the with statements
- // preserved.
var disable []ast.Ref
- disableRef := func(x ast.Ref) bool {
- disable = append(disable, x.GroundPrefix())
- return false
- }
if e.partial() {
+ // Avoid the `disable` var to escape to heap unless partial evaluation is enabled.
+ var disablePartial []ast.Ref
+ // Disable inlining on all references in the expression so the result of
+ // partial evaluation has the same semantics w/ the with statements
+ // preserved.
+ disableRef := func(x ast.Ref) bool {
+ disablePartial = append(disablePartial, x.GroundPrefix())
+ return false
+ }
// If the value is unknown the with statement cannot be evaluated and so
// the entire expression should be saved to be safe. In the future this
@@ -516,12 +641,15 @@ func (e *eval) evalWith(iter evalIterator) error {
}
ast.WalkRefs(expr.NoWith(), disableRef)
+
+ disable = disablePartial
}
pairsInput := [][2]*ast.Term{}
pairsData := [][2]*ast.Term{}
- functionMocks := [][2]*ast.Term{}
- targets := []ast.Ref{}
+ targets := make([]ast.Ref, 0, len(expr.With))
+
+ var functionMocks [][2]*ast.Term
for i := range expr.With {
target := expr.With[i].Target
@@ -593,16 +721,31 @@ func (e *eval) evalWithPush(input, data *ast.Term, functionMocks [][2]*ast.Term,
e.data = data
}
+ if e.comprehensionCache == nil {
+ e.comprehensionCache = newComprehensionCache()
+ }
+
e.comprehensionCache.Push()
e.virtualCache.Push()
+
+ if e.targetStack == nil {
+ e.targetStack = newRefStack()
+ }
+
e.targetStack.Push(targets)
e.inliningControl.PushDisable(disable, true)
+
+ if e.functionMocks == nil {
+ e.functionMocks = newFunctionMocksStack()
+ }
+
e.functionMocks.PutPairs(functionMocks)
return oldInput, oldData
}
func (e *eval) evalWithPop(input, data *ast.Term) {
+ // NOTE(ae) no nil checks here as we assume evalWithPush always called first
e.inliningControl.PopDisable()
e.targetStack.Pop()
e.virtualCache.Pop()
@@ -613,11 +756,14 @@ func (e *eval) evalWithPop(input, data *ast.Term) {
}
func (e *eval) evalNotPartial(iter evalIterator) error {
-
// Prepare query normally.
expr := e.query[e.index]
- negation := expr.Complement().NoWith()
- child := e.closure(ast.NewBody(negation))
+ negation := expr.ComplementNoWith()
+
+ child := evalPool.Get()
+ defer evalPool.Put(child)
+
+ e.closure(ast.NewBody(negation), child)
// Unknowns is the set of variables that are marked as unknown. The variables
// are namespaced with the query ID that they originate in. This ensures that
@@ -710,9 +856,7 @@ func (e *eval) evalNotPartialSupport(negationID uint64, expr *ast.Expr, unknowns
args = append(args, ast.NewTerm(v))
}
- sort.Slice(args, func(i, j int) bool {
- return args[i].Value.Compare(args[j].Value) < 0
- })
+ slices.SortFunc(args, ast.TermValueCompare)
if len(args) > 0 {
head.Args = args
@@ -747,7 +891,6 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error {
ref := terms[0].Value.(ast.Ref)
- var mocked bool
mock, mocked := e.functionMocks.Get(ref)
if mocked {
if m, ok := mock.Value.(ast.Ref); ok && isFunction(e.compiler.TypeEnv, m) { // builtin or data function
@@ -770,7 +913,7 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error {
if ref[0].Equal(ast.DefaultRootDocument) {
if mocked {
f := e.compiler.TypeEnv.Get(ref).(*types.Function)
- return e.evalCallValue(len(f.FuncArgs().Args), terms, mock, iter)
+ return e.evalCallValue(f.Arity(), terms, mock, iter)
}
var ir *ast.IndexResult
@@ -780,13 +923,13 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error {
} else {
ir, err = e.getRules(ref, terms[1:])
}
+ defer ast.IndexResultPool.Put(ir)
if err != nil {
return err
}
eval := evalFunc{
e: e,
- ref: ref,
terms: terms,
ir: ir,
}
@@ -800,42 +943,52 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error {
}
if mocked { // value replacement of built-in call
- return e.evalCallValue(len(bi.Decl.Args()), terms, mock, iter)
+ return e.evalCallValue(bi.Decl.Arity(), terms, mock, iter)
}
if e.unknown(e.query[e.index], e.bindings) {
- return e.saveCall(len(bi.Decl.Args()), terms, iter)
+ return e.saveCall(bi.Decl.Arity(), terms, iter)
}
- var parentID uint64
- if e.parent != nil {
- parentID = e.parent.queryID
- }
+ var bctx *BuiltinContext
- var capabilities *ast.Capabilities
- if e.compiler != nil {
- capabilities = e.compiler.Capabilities()
- }
-
- bctx := BuiltinContext{
- Context: e.ctx,
- Metrics: e.metrics,
- Seed: e.seed,
- Time: e.time,
- Cancel: e.cancel,
- Runtime: e.runtime,
- Cache: e.builtinCache,
- InterQueryBuiltinCache: e.interQueryBuiltinCache,
- InterQueryBuiltinValueCache: e.interQueryBuiltinValueCache,
- NDBuiltinCache: e.ndBuiltinCache,
- Location: e.query[e.index].Location,
- QueryTracers: e.tracers,
- TraceEnabled: e.traceEnabled,
- QueryID: e.queryID,
- ParentID: parentID,
- PrintHook: e.printHook,
- DistributedTracingOpts: e.tracingOpts,
- Capabilities: capabilities,
+ // Creating a BuiltinContext is expensive, so only do it if the builtin depends on it.
+ if !bi.CanSkipBctx {
+ var parentID uint64
+ if e.parent != nil {
+ parentID = e.parent.queryID
+ }
+
+ var capabilities *ast.Capabilities
+ if e.compiler != nil {
+ capabilities = e.compiler.Capabilities()
+ }
+
+ if e.time == nil {
+ e.time = ast.NumberTerm(int64ToJSONNumber(e.timeStart))
+ }
+
+ bctx = &BuiltinContext{
+ Context: e.ctx,
+ Metrics: e.metrics,
+ Seed: e.seed,
+ Time: e.time,
+ Cancel: e.cancel,
+ Runtime: e.runtime,
+ Cache: e.builtinCache,
+ InterQueryBuiltinCache: e.interQueryBuiltinCache,
+ InterQueryBuiltinValueCache: e.interQueryBuiltinValueCache,
+ NDBuiltinCache: e.ndBuiltinCache,
+ Location: e.query[e.index].Location,
+ QueryTracers: e.tracers,
+ TraceEnabled: e.traceEnabled,
+ QueryID: e.queryID,
+ ParentID: parentID,
+ PrintHook: e.printHook,
+ DistributedTracingOpts: e.tracingOpts,
+ Capabilities: capabilities,
+ RoundTripper: e.roundTripper,
+ }
}
eval := evalBuiltin{
@@ -855,7 +1008,7 @@ func (e *eval) evalCallValue(arity int, terms []*ast.Term, mock *ast.Term, iter
return e.unify(terms[len(terms)-1], mock, iter)
case len(terms) == arity+1:
- if mock.Value.Compare(ast.Boolean(false)) != 0 {
+ if !ast.Boolean(false).Equal(mock.Value) {
return iter()
}
return nil
@@ -932,6 +1085,22 @@ func (e *eval) biunifyArraysRec(a, b *ast.Array, b1, b2 *bindings, iter unifyIte
})
}
+func (e *eval) biunifyTerms(a, b []*ast.Term, b1, b2 *bindings, iter unifyIterator) error {
+ if len(a) != len(b) {
+ return nil
+ }
+ return e.biunifyTermsRec(a, b, b1, b2, iter, 0)
+}
+
+func (e *eval) biunifyTermsRec(a, b []*ast.Term, b1, b2 *bindings, iter unifyIterator, idx int) error {
+ if idx == len(a) {
+ return iter()
+ }
+ return e.biunify(a[idx], b[idx], b1, b2, func() error {
+ return e.biunifyTermsRec(a, b, b1, b2, iter, idx+1)
+ })
+}
+
func (e *eval) biunifyObjects(a, b ast.Object, b1, b2 *bindings, iter unifyIterator) error {
if a.Len() != b.Len() {
return nil
@@ -1057,7 +1226,7 @@ func (e *eval) biunifyRef(a, b *ast.Term, b1, b2 *bindings, iter unifyIterator)
e: e,
ref: ref,
pos: 1,
- plugged: ref.Copy(),
+ plugged: ref.CopyNonGround(),
bindings: b1,
rterm: b,
rbindings: b2,
@@ -1133,6 +1302,10 @@ func (e *eval) buildComprehensionCache(a *ast.Term) (*ast.Term, error) {
return nil, nil
}
+ if e.comprehensionCache == nil {
+ e.comprehensionCache = newComprehensionCache()
+ }
+
cache, ok := e.comprehensionCache.Elem(a)
if !ok {
var err error
@@ -1165,7 +1338,10 @@ func (e *eval) buildComprehensionCache(a *ast.Term) (*ast.Term, error) {
}
func (e *eval) buildComprehensionCacheArray(x *ast.ArrayComprehension, keys []*ast.Term) (*comprehensionCacheElem, error) {
- child := e.child(x.Body)
+ child := evalPool.Get()
+ defer evalPool.Put(child)
+
+ e.child(x.Body, child)
node := newComprehensionCacheElem()
return node, child.Run(func(child *eval) error {
values := make([]*ast.Term, len(keys))
@@ -1184,7 +1360,10 @@ func (e *eval) buildComprehensionCacheArray(x *ast.ArrayComprehension, keys []*a
}
func (e *eval) buildComprehensionCacheSet(x *ast.SetComprehension, keys []*ast.Term) (*comprehensionCacheElem, error) {
- child := e.child(x.Body)
+ child := evalPool.Get()
+ defer evalPool.Put(child)
+
+ e.child(x.Body, child)
node := newComprehensionCacheElem()
return node, child.Run(func(child *eval) error {
values := make([]*ast.Term, len(keys))
@@ -1204,7 +1383,10 @@ func (e *eval) buildComprehensionCacheSet(x *ast.SetComprehension, keys []*ast.T
}
func (e *eval) buildComprehensionCacheObject(x *ast.ObjectComprehension, keys []*ast.Term) (*comprehensionCacheElem, error) {
- child := e.child(x.Body)
+ child := evalPool.Get()
+ defer evalPool.Put(child)
+
+ e.child(x.Body, child)
node := newComprehensionCacheElem()
return node, child.Run(func(child *eval) error {
values := make([]*ast.Term, len(keys))
@@ -1285,7 +1467,11 @@ func (e *eval) amendComprehension(a *ast.Term, b1 *bindings) (*ast.Term, error)
func (e *eval) biunifyComprehensionArray(x *ast.ArrayComprehension, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error {
result := ast.NewArray()
- child := e.closure(x.Body)
+ child := evalPool.Get()
+
+ e.closure(x.Body, child)
+ defer evalPool.Put(child)
+
err := child.Run(func(child *eval) error {
result = result.Append(child.bindings.Plug(x.Term))
return nil
@@ -1298,7 +1484,11 @@ func (e *eval) biunifyComprehensionArray(x *ast.ArrayComprehension, b *ast.Term,
func (e *eval) biunifyComprehensionSet(x *ast.SetComprehension, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error {
result := ast.NewSet()
- child := e.closure(x.Body)
+ child := evalPool.Get()
+
+ e.closure(x.Body, child)
+ defer evalPool.Put(child)
+
err := child.Run(func(child *eval) error {
result.Add(child.bindings.Plug(x.Term))
return nil
@@ -1310,8 +1500,13 @@ func (e *eval) biunifyComprehensionSet(x *ast.SetComprehension, b *ast.Term, b1,
}
func (e *eval) biunifyComprehensionObject(x *ast.ObjectComprehension, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error {
+ child := evalPool.Get()
+ defer evalPool.Put(child)
+
+ e.closure(x.Body, child)
+
result := ast.NewObject()
- child := e.closure(x.Body)
+
err := child.Run(func(child *eval) error {
key := child.bindings.Plug(x.Key)
value := child.bindings.Plug(x.Value)
@@ -1354,7 +1549,7 @@ func (e *eval) saveExprMarkUnknowns(expr *ast.Expr, b *bindings, iter unifyItera
e.traceSave(expr)
err = iter()
e.saveStack.Pop()
- for i := 0; i < pops; i++ {
+ for range pops {
e.saveSet.Pop()
}
return err
@@ -1384,7 +1579,7 @@ func (e *eval) saveUnify(a, b *ast.Term, b1, b2 *bindings, iter unifyIterator) e
err := iter()
e.saveStack.Pop()
- for i := 0; i < pops; i++ {
+ for range pops {
e.saveSet.Pop()
}
@@ -1411,7 +1606,7 @@ func (e *eval) saveCall(declArgsLen int, terms []*ast.Term, iter unifyIterator)
err := iter()
e.saveStack.Pop()
- for i := 0; i < pops; i++ {
+ for range pops {
e.saveSet.Pop()
}
return err
@@ -1433,7 +1628,7 @@ func (e *eval) saveInlinedNegatedExprs(exprs []*ast.Expr, iter unifyIterator) er
e.traceSave(expr)
}
err := iter()
- for i := 0; i < len(exprs); i++ {
+ for range exprs {
e.saveStack.Pop()
}
return err
@@ -1448,12 +1643,21 @@ func (e *eval) getRules(ref ast.Ref, args []*ast.Term) (*ast.IndexResult, error)
return nil, nil
}
+ resolver := resolverPool.Get().(*evalResolver)
+ defer func() {
+ resolver.e = nil
+ resolver.args = nil
+ resolverPool.Put(resolver)
+ }()
+
var result *ast.IndexResult
var err error
+ resolver.e = e
if e.indexing {
- result, err = index.Lookup(&evalResolver{e: e, args: args})
+ resolver.args = args
+ result, err = index.Lookup(resolver)
} else {
- result, err = index.AllRules(&evalResolver{e: e})
+ result, err = index.AllRules(resolver)
}
if err != nil {
return nil, err
@@ -1461,20 +1665,27 @@ func (e *eval) getRules(ref ast.Ref, args []*ast.Term) (*ast.IndexResult, error)
result.EarlyExit = result.EarlyExit && e.earlyExit
- var msg strings.Builder
- if len(result.Rules) == 1 {
- msg.WriteString("(matched 1 rule")
- } else {
- msg.Grow(len("(matched NNNN rules)"))
- msg.WriteString("(matched ")
- msg.WriteString(strconv.Itoa(len(result.Rules)))
- msg.WriteString(" rules")
- }
- if result.EarlyExit {
- msg.WriteString(", early exit")
+ if e.traceEnabled {
+ var msg strings.Builder
+ if len(result.Rules) == 1 {
+ msg.WriteString("(matched 1 rule")
+ } else {
+ msg.Grow(len("(matched NNNN rules)"))
+ msg.WriteString("(matched ")
+ msg.WriteString(strconv.Itoa(len(result.Rules)))
+ msg.WriteString(" rules")
+ }
+ if result.EarlyExit {
+ msg.WriteString(", early exit")
+ }
+ msg.WriteRune(')')
+
+ // Copy ref here as ref otherwise always escapes to the heap,
+ // whether tracing is enabled or not.
+ r := ref.Copy()
+ e.traceIndex(e.query[e.index], msg.String(), &r)
}
- msg.WriteRune(')')
- e.traceIndex(e.query[e.index], msg.String(), &ref)
+
return result, err
}
@@ -1487,10 +1698,20 @@ type evalResolver struct {
args []*ast.Term
}
+var (
+ resolverPool = sync.Pool{
+ New: func() any {
+ return &evalResolver{}
+ },
+ }
+)
+
func (e *evalResolver) Resolve(ref ast.Ref) (ast.Value, error) {
e.e.instr.startTimer(evalOpResolve)
- if e.e.inliningControl.Disabled(ref, true) || e.e.saveSet.Contains(ast.NewTerm(ref), nil) {
+ // NOTE(ae): nil check on saveSet to avoid ast.NewTerm allocation when not needed
+ if e.e.inliningControl.Disabled(ref, true) || (e.e.saveSet != nil &&
+ e.e.saveSet.Contains(ast.NewTerm(ref), nil)) {
e.e.instr.stopTimer(evalOpResolve)
return nil, ast.UnknownValueErr{}
}
@@ -1568,7 +1789,7 @@ func (e *evalResolver) Resolve(ref ast.Ref) (ast.Value, error) {
return merged, err
}
e.e.instr.stopTimer(evalOpResolve)
- return nil, fmt.Errorf("illegal ref")
+ return nil, errors.New("illegal ref")
}
func (e *eval) resolveReadFromStorage(ref ast.Ref, a ast.Value) (ast.Value, error) {
@@ -1599,9 +1820,9 @@ func (e *eval) resolveReadFromStorage(ref ast.Ref, a ast.Value) (ast.Value, erro
if len(path) == 0 {
switch obj := blob.(type) {
- case map[string]interface{}:
+ case map[string]any:
if len(obj) > 0 {
- cpy := make(map[string]interface{}, len(obj)-1)
+ cpy := make(map[string]any, len(obj)-1)
for k, v := range obj {
if string(ast.SystemDocumentKey) != k {
cpy[k] = v
@@ -1611,16 +1832,7 @@ func (e *eval) resolveReadFromStorage(ref ast.Ref, a ast.Value) (ast.Value, erro
}
case ast.Object:
if obj.Len() > 0 {
- cpy := ast.NewObject()
- if err := obj.Iter(func(k *ast.Term, v *ast.Term) error {
- if !ast.SystemDocumentKey.Equal(k.Value) {
- cpy.Insert(k, v)
- }
- return nil
- }); err != nil {
- return nil, err
- }
- blob = cpy
+ blob, _ = obj.Map(systemDocumentKeyRemoveMapper)
}
}
}
@@ -1629,7 +1841,7 @@ func (e *eval) resolveReadFromStorage(ref ast.Ref, a ast.Value) (ast.Value, erro
case ast.Value:
v = blob
default:
- if blob, ok := blob.(map[string]interface{}); ok && !e.strictObjects {
+ if blob, ok := blob.(map[string]any); ok && !e.strictObjects {
v = ast.LazyObject(blob)
break
}
@@ -1653,8 +1865,21 @@ func (e *eval) resolveReadFromStorage(ref ast.Ref, a ast.Value) (ast.Value, erro
return merged, nil
}
+func systemDocumentKeyRemoveMapper(k, v *ast.Term) (*ast.Term, *ast.Term, error) {
+ if ast.SystemDocumentKey.Equal(k.Value) {
+ return nil, nil, nil
+ }
+ return k, v, nil
+}
+
func (e *eval) generateVar(suffix string) *ast.Term {
- return ast.VarTerm(fmt.Sprintf("%v_%v", e.genvarprefix, suffix))
+ buf := make([]byte, 0, len(e.genvarprefix)+len(suffix)+1)
+
+ buf = append(buf, e.genvarprefix...)
+ buf = append(buf, '_')
+ buf = append(buf, suffix...)
+
+ return ast.VarTerm(util.ByteSliceToString(buf))
}
func (e *eval) rewrittenVar(v ast.Var) (ast.Var, bool) {
@@ -1681,10 +1906,11 @@ func (e *eval) getDeclArgsLen(x *ast.Expr) (int, error) {
bi, _, ok := e.builtinFunc(operator.String())
if ok {
- return len(bi.Decl.Args()), nil
+ return bi.Decl.Arity(), nil
}
ir, err := e.getRules(operator, nil)
+ defer ast.IndexResultPool.Put(ir)
if err != nil {
return -1, err
} else if ir == nil || ir.Empty() {
@@ -1706,17 +1932,17 @@ func (e *eval) updateFromQuery(expr *ast.Expr) {
type evalBuiltin struct {
e *eval
bi *ast.Builtin
- bctx BuiltinContext
+ bctx *BuiltinContext
f BuiltinFunc
terms []*ast.Term
}
// Is this builtin non-deterministic, and did the caller provide an NDBCache?
func (e *evalBuiltin) canUseNDBCache(bi *ast.Builtin) bool {
- return bi.Nondeterministic && e.bctx.NDBuiltinCache != nil
+ return bi.Nondeterministic && e.bctx != nil && e.bctx.NDBuiltinCache != nil
}
-func (e evalBuiltin) eval(iter unifyIterator) error {
+func (e *evalBuiltin) eval(iter unifyIterator) error {
operands := make([]*ast.Term, len(e.terms))
@@ -1724,10 +1950,9 @@ func (e evalBuiltin) eval(iter unifyIterator) error {
operands[i] = e.e.bindings.Plug(e.terms[i])
}
- numDeclArgs := len(e.bi.Decl.FuncArgs().Args)
+ numDeclArgs := e.bi.Decl.Arity()
e.e.instr.startTimer(evalOpBuiltinCall)
- var err error
// NOTE(philipc): We sometimes have to drop the very last term off
// the args list for cases where a builtin's result is used/assigned,
@@ -1749,7 +1974,7 @@ func (e evalBuiltin) eval(iter unifyIterator) error {
case e.bi.Decl.Result() == nil:
return iter()
case len(operands) == numDeclArgs:
- if v.Compare(ast.Boolean(false)) == 0 {
+ if ast.Boolean(false).Equal(v) {
return nil // nothing to do
}
return iter()
@@ -1762,8 +1987,18 @@ func (e evalBuiltin) eval(iter unifyIterator) error {
e.e.instr.startTimer(evalOpBuiltinCall)
}
+ var bctx BuiltinContext
+ if e.bctx == nil {
+ bctx = BuiltinContext{
+ // Location potentially needed for error reporting.
+ Location: e.e.query[e.e.index].Location,
+ }
+ } else {
+ bctx = *e.bctx
+ }
+
// Normal unification flow for builtins:
- err = e.f(e.bctx, operands, func(output *ast.Term) error {
+ err := e.f(bctx, operands, func(output *ast.Term) error {
e.e.instr.stopTimer(evalOpBuiltinCall)
@@ -1773,7 +2008,7 @@ func (e evalBuiltin) eval(iter unifyIterator) error {
case e.bi.Decl.Result() == nil:
err = iter()
case len(operands) == numDeclArgs:
- if output.Value.Compare(ast.Boolean(false)) != 0 {
+ if !ast.Boolean(false).Equal(output.Value) {
err = iter()
} // else: nothing to do, don't iter()
default:
@@ -1813,9 +2048,8 @@ func (e evalBuiltin) eval(iter unifyIterator) error {
type evalFunc struct {
e *eval
- ref ast.Ref
- terms []*ast.Term
ir *ast.IndexResult
+ terms []*ast.Term
}
func (e evalFunc) eval(iter unifyIterator) error {
@@ -1837,15 +2071,38 @@ func (e evalFunc) eval(iter unifyIterator) error {
return e.e.saveCall(argCount, e.terms, iter)
}
- if e.e.partial() && (e.e.inliningControl.shallow || e.e.inliningControl.Disabled(e.ref, false)) {
- // check if the function definitions, or any of the arguments
- // contain something unknown
- unknown := e.e.unknown(e.ref, e.e.bindings)
- for i := 1; !unknown && i <= argCount; i++ {
- unknown = e.e.unknown(e.terms[i], e.e.bindings)
+ if e.e.partial() {
+ var mustGenerateSupport bool
+
+ if defRule := e.ir.Default; defRule != nil {
+ // The presence of a default func might force us to generate support
+ if len(defRule.Head.Args) == len(e.terms)-1 {
+ // The function is called without collecting the result in an output term,
+ // therefore any successful evaluation of the function is of interest, including the default value ...
+ if ret := defRule.Head.Value; ret == nil || !ret.Equal(ast.InternedTerm(false)) {
+ // ... unless the default value is false,
+ mustGenerateSupport = true
+ }
+ } else {
+ // The function is called with an output term, therefore any successful evaluation of the function is of interest.
+ // NOTE: Because of how the compiler rewrites function calls, we can't know if the result value is compared
+ // to a constant value, so we can't be as clever as we are for rules.
+ mustGenerateSupport = true
+ }
}
- if unknown {
- return e.partialEvalSupport(argCount, iter)
+
+ ref := e.terms[0].Value.(ast.Ref)
+
+ if mustGenerateSupport || e.e.inliningControl.shallow || e.e.inliningControl.Disabled(ref, false) {
+ // check if the function definitions, or any of the arguments
+ // contain something unknown
+ unknown := e.e.unknownRef(ref, e.e.bindings)
+ for i := 1; !unknown && i <= argCount; i++ {
+ unknown = e.e.unknown(e.terms[i], e.e.bindings)
+ }
+ if unknown {
+ return e.partialEvalSupport(argCount, iter)
+ }
}
}
@@ -1854,9 +2111,9 @@ func (e evalFunc) eval(iter unifyIterator) error {
func (e evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) error {
var cacheKey ast.Ref
- var hit bool
- var err error
if !e.e.partial() {
+ var hit bool
+ var err error
cacheKey, hit, err = e.evalCache(argCount, iter)
if err != nil {
return err
@@ -1865,12 +2122,23 @@ func (e evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) erro
}
}
+ // NOTE(anders): While it makes the code a bit more complex, reusing the
+ // args slice across each function increment saves a lot of resources
+ // compared to creating a new one inside each call to evalOneRule... so
+ // think twice before simplifying this :)
+ args := make([]*ast.Term, len(e.terms)-1)
+
var prev *ast.Term
return withSuppressEarlyExit(func() error {
var outerEe *deferredEarlyExitError
for _, rule := range e.ir.Rules {
- next, err := e.evalOneRule(iter, rule, cacheKey, prev, findOne)
+ copy(args, rule.Head.Args)
+ if len(args) == len(rule.Head.Args)+1 {
+ args[len(args)-1] = rule.Head.Value
+ }
+
+ next, err := e.evalOneRule(iter, rule, args, cacheKey, prev, findOne)
if err != nil {
if oee, ok := err.(*deferredEarlyExitError); ok {
if outerEe == nil {
@@ -1882,7 +2150,12 @@ func (e evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) erro
}
if next == nil {
for _, erule := range e.ir.Else[rule] {
- next, err = e.evalOneRule(iter, erule, cacheKey, prev, findOne)
+ copy(args, erule.Head.Args)
+ if len(args) == len(erule.Head.Args)+1 {
+ args[len(args)-1] = erule.Head.Value
+ }
+
+ next, err = e.evalOneRule(iter, erule, args, cacheKey, prev, findOne)
if err != nil {
if oee, ok := err.(*deferredEarlyExitError); ok {
if outerEe == nil {
@@ -1903,7 +2176,13 @@ func (e evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) erro
}
if e.ir.Default != nil && prev == nil {
- _, err := e.evalOneRule(iter, e.ir.Default, cacheKey, prev, findOne)
+ copy(args, e.ir.Default.Head.Args)
+ if len(args) == len(e.ir.Default.Head.Args)+1 {
+ args[len(args)-1] = e.ir.Default.Head.Value
+ }
+
+ _, err := e.evalOneRule(iter, e.ir.Default, args, cacheKey, prev, findOne)
+
return err
}
@@ -1916,15 +2195,19 @@ func (e evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) erro
}
func (e evalFunc) evalCache(argCount int, iter unifyIterator) (ast.Ref, bool, error) {
- var plen int
- if len(e.terms) == argCount+2 { // func name + output = 2
- plen = len(e.terms) - 1
- } else {
- plen = len(e.terms)
+ plen := len(e.terms)
+ if plen == argCount+2 { // func name + output = 2
+ plen -= 1
}
+
cacheKey := make([]*ast.Term, plen)
- for i := 0; i < plen; i++ {
- cacheKey[i] = e.e.bindings.Plug(e.terms[i])
+ for i := range plen {
+ if e.terms[i].IsGround() {
+ // Avoid expensive copying of ref if it is ground.
+ cacheKey[i] = e.terms[i]
+ } else {
+ cacheKey[i] = e.e.bindings.Plug(e.terms[i])
+ }
}
cached, _ := e.e.virtualCache.Get(cacheKey)
@@ -1943,23 +2226,18 @@ func (e evalFunc) evalCache(argCount int, iter unifyIterator) (ast.Ref, bool, er
return cacheKey, false, nil
}
-func (e evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, cacheKey ast.Ref, prev *ast.Term, findOne bool) (*ast.Term, error) {
+func (e evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, args []*ast.Term, cacheKey ast.Ref, prev *ast.Term, findOne bool) (*ast.Term, error) {
+ child := evalPool.Get()
+ defer evalPool.Put(child)
- child := e.e.child(rule.Body)
+ e.e.child(rule.Body, child)
child.findOne = findOne
- args := make([]*ast.Term, len(e.terms)-1)
- copy(args, rule.Head.Args)
-
- if len(args) == len(rule.Head.Args)+1 {
- args[len(args)-1] = rule.Head.Value
- }
-
var result *ast.Term
child.traceEnter(rule)
- err := child.biunifyArrays(ast.NewArray(e.terms[1:]...), ast.NewArray(args...), e.e.bindings, child.bindings, func() error {
+ err := child.biunifyTerms(e.terms[1:], args, e.e.bindings, child.bindings, func() error {
return child.eval(func(child *eval) error {
child.traceExit(rule)
@@ -1976,28 +2254,24 @@ func (e evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, cacheKey ast.R
e.e.virtualCache.Put(cacheKey, result) // the redos confirm this, or the evaluation is aborted
}
- if len(rule.Head.Args) == len(e.terms)-1 {
- if result.Value.Compare(ast.Boolean(false)) == 0 {
- if prev != nil && ast.Compare(prev, result) != 0 {
- return functionConflictErr(rule.Location)
- }
- prev = result
- return nil
+ if len(rule.Head.Args) == len(e.terms)-1 && ast.Boolean(false).Equal(result.Value) {
+ if prev != nil && !prev.Equal(result) {
+ return functionConflictErr(rule.Location)
}
+ prev = result
+ return nil
}
// Partial evaluation should explore all rules and may not produce
// a ground result so we do not perform conflict detection or
// deduplication. See "ignore conflicts: functions" test case for
// an example.
- if !e.e.partial() {
- if prev != nil {
- if ast.Compare(prev, result) != 0 {
- return functionConflictErr(rule.Location)
- }
- child.traceRedo(rule)
- return nil
+ if !e.e.partial() && prev != nil {
+ if !prev.Equal(result) {
+ return functionConflictErr(rule.Location)
}
+ child.traceRedo(rule)
+ return nil
}
prev = result
@@ -2015,9 +2289,7 @@ func (e evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, cacheKey ast.R
}
func (e evalFunc) partialEvalSupport(declArgsLen int, iter unifyIterator) error {
-
- path := e.e.namespaceRef(e.ref)
- term := ast.NewTerm(path)
+ path := e.e.namespaceRef(e.terms[0].Value.(ast.Ref))
if !e.e.saveSupport.Exists(path) {
for _, rule := range e.ir.Rules {
@@ -2026,18 +2298,29 @@ func (e evalFunc) partialEvalSupport(declArgsLen int, iter unifyIterator) error
return err
}
}
+
+ if e.ir.Default != nil {
+ err := e.partialEvalSupportRule(e.ir.Default, path)
+ if err != nil {
+ return err
+ }
+ }
}
if !e.e.saveSupport.Exists(path) { // we haven't saved anything, nothing to call
return nil
}
+ term := ast.NewTerm(path)
+
return e.e.saveCall(declArgsLen, append([]*ast.Term{term}, e.terms[1:]...), iter)
}
func (e evalFunc) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) error {
+ child := evalPool.Get()
+ defer evalPool.Put(child)
- child := e.e.child(rule.Body)
+ e.e.child(rule.Body, child)
child.traceEnter(rule)
e.e.saveStack.PushQuery(nil)
@@ -2070,8 +2353,9 @@ func (e evalFunc) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) error {
}
e.e.saveSupport.Insert(path, &ast.Rule{
- Head: head,
- Body: plugged,
+ Head: head,
+ Body: plugged,
+ Default: rule.Default,
})
}
child.traceRedo(rule)
@@ -2084,15 +2368,48 @@ func (e evalFunc) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) error {
return err
}
+type deferredEarlyExitContainer struct {
+ deferred *deferredEarlyExitError
+}
+
+func (dc *deferredEarlyExitContainer) handleErr(err error) error {
+ if err == nil {
+ return nil
+ }
+
+ if dc.deferred == nil && errors.As(err, &dc.deferred) && dc.deferred != nil {
+ return nil
+ }
+
+ return err
+}
+
+// copyError returns a copy of the deferred early exit error if one is present.
+// This exists only to allow the container to be reused.
+func (dc *deferredEarlyExitContainer) copyError() *deferredEarlyExitError {
+ if dc.deferred == nil {
+ return nil
+ }
+
+ cpy := *dc.deferred
+ return &cpy
+}
+
+var deecPool = sync.Pool{
+ New: func() any {
+ return &deferredEarlyExitContainer{}
+ },
+}
+
type evalTree struct {
e *eval
- ref ast.Ref
- plugged ast.Ref
- pos int
bindings *bindings
rterm *ast.Term
rbindings *bindings
node *ast.TreeNode
+ ref ast.Ref
+ plugged ast.Ref
+ pos int
}
func (e evalTree) eval(iter unifyIterator) error {
@@ -2115,9 +2432,7 @@ func (e evalTree) finish(iter unifyIterator) error {
// In some cases, it may not be possible to PE the ref. If the path refers
// to virtual docs that PE does not support or base documents where inlining
// has been disabled, then we have to save.
- save := e.e.unknown(e.plugged, e.e.bindings)
-
- if save {
+ if e.e.partial() && e.e.unknownRef(e.plugged, e.e.bindings) {
return e.e.saveUnify(ast.NewTerm(e.plugged), e.rterm, e.bindings, e.rbindings, iter)
}
@@ -2171,28 +2486,20 @@ func (e evalTree) enumerate(iter unifyIterator) error {
return err
}
- var deferredEe *deferredEarlyExitError
- handleErr := func(err error) error {
- var dee *deferredEarlyExitError
- if errors.As(err, &dee) {
- if deferredEe == nil {
- deferredEe = dee
- }
- return nil
- }
- return err
- }
+ dc := deecPool.Get().(*deferredEarlyExitContainer)
+ dc.deferred = nil
+ defer deecPool.Put(dc)
if doc != nil {
switch doc := doc.(type) {
case *ast.Array:
- for i := 0; i < doc.Len(); i++ {
- k := ast.IntNumberTerm(i)
+ for i := range doc.Len() {
+ k := ast.InternedTerm(i)
err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error {
return e.next(iter, k)
})
- if err := handleErr(err); err != nil {
+ if err := dc.handleErr(err); err != nil {
return err
}
}
@@ -2202,7 +2509,7 @@ func (e evalTree) enumerate(iter unifyIterator) error {
err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error {
return e.next(iter, k)
})
- if err := handleErr(err); err != nil {
+ if err := dc.handleErr(err); err != nil {
return err
}
}
@@ -2211,15 +2518,15 @@ func (e evalTree) enumerate(iter unifyIterator) error {
err := e.e.biunify(elem, e.ref[e.pos], e.bindings, e.bindings, func() error {
return e.next(iter, elem)
})
- return handleErr(err)
+ return dc.handleErr(err)
}); err != nil {
return err
}
}
}
- if deferredEe != nil {
- return deferredEe
+ if dc.deferred != nil {
+ return dc.copyError()
}
if e.node == nil {
@@ -2317,24 +2624,25 @@ func (e evalTree) leaves(plugged ast.Ref, node *ast.TreeNode) (ast.Object, error
type evalVirtual struct {
e *eval
- ref ast.Ref
- plugged ast.Ref
- pos int
bindings *bindings
rterm *ast.Term
rbindings *bindings
+ ref ast.Ref
+ plugged ast.Ref
+ pos int
}
func (e evalVirtual) eval(iter unifyIterator) error {
ir, err := e.e.getRules(e.plugged[:e.pos+1], nil)
+ defer ast.IndexResultPool.Put(ir)
if err != nil {
return err
}
// Partial evaluation of ordered rules is not supported currently. Save the
// expression and continue. This could be revisited in the future.
- if len(ir.Else) > 0 && e.e.unknown(e.ref, e.bindings) {
+ if len(ir.Else) > 0 && e.e.unknownRef(e.ref, e.bindings) {
return e.e.saveUnify(ast.NewTerm(e.ref), e.rterm, e.bindings, e.rbindings, iter)
}
@@ -2393,14 +2701,14 @@ func (e evalVirtual) eval(iter unifyIterator) error {
type evalVirtualPartial struct {
e *eval
- ref ast.Ref
- plugged ast.Ref
- pos int
ir *ast.IndexResult
bindings *bindings
rterm *ast.Term
rbindings *bindings
empty *ast.Term
+ ref ast.Ref
+ plugged ast.Ref
+ pos int
}
type evalVirtualPartialCacheHint struct {
@@ -2442,7 +2750,7 @@ func maxRefLength(rules []*ast.Rule, ceil int) int {
for _, r := range rules {
rl := len(r.Ref())
if r.Head.RuleKind() == ast.MultiValue {
- rl = rl + 1
+ rl++
}
if rl >= ceil {
return ceil
@@ -2459,14 +2767,16 @@ func (e evalVirtualPartial) evalEachRule(iter unifyIterator, unknown bool) error
return nil
}
- m := maxRefLength(e.ir.Rules, len(e.ref))
- if e.e.unknown(e.ref[e.pos+1:m], e.bindings) {
- for _, rule := range e.ir.Rules {
- if err := e.evalOneRulePostUnify(iter, rule); err != nil {
- return err
+ if e.e.partial() {
+ m := maxRefLength(e.ir.Rules, len(e.ref))
+ if e.e.unknown(e.ref[e.pos+1:m], e.bindings) {
+ for _, rule := range e.ir.Rules {
+ if err := e.evalOneRulePostUnify(iter, rule); err != nil {
+ return err
+ }
}
+ return nil
}
- return nil
}
hint, err := e.evalCache(iter)
@@ -2536,8 +2846,11 @@ func (e evalVirtualPartial) evalAllRulesNoCache(rules []*ast.Rule) (*ast.Term, e
var visitedRefs []ast.Ref
+ child := evalPool.Get()
+ defer evalPool.Put(child)
+
for _, rule := range rules {
- child := e.e.child(rule.Body)
+ e.e.child(rule.Body, child)
child.traceEnter(rule)
err := child.eval(func(*eval) error {
child.traceExit(rule)
@@ -2570,8 +2883,10 @@ func wrapInObjects(leaf *ast.Term, ref ast.Ref) *ast.Term {
}
func (e evalVirtualPartial) evalOneRulePreUnify(iter unifyIterator, rule *ast.Rule, result *ast.Term, unknown bool, visitedRefs *[]ast.Ref) (*ast.Term, error) {
+ child := evalPool.Get()
+ defer evalPool.Put(child)
- child := e.e.child(rule.Body)
+ e.e.child(rule.Body, child)
child.traceEnter(rule)
var defined bool
@@ -2663,7 +2978,10 @@ func (e *eval) biunifyDynamicRef(pos int, a, b ast.Ref, b1, b2 *bindings, iter u
}
func (e evalVirtualPartial) evalOneRulePostUnify(iter unifyIterator, rule *ast.Rule) error {
- child := e.e.child(rule.Body)
+ child := evalPool.Get()
+ defer evalPool.Put(child)
+
+ e.e.child(rule.Body, child)
child.traceEnter(rule)
var defined bool
@@ -2747,8 +3065,10 @@ func (e evalVirtualPartial) partialEvalSupport(iter unifyIterator) error {
}
func (e evalVirtualPartial) partialEvalSupportRule(rule *ast.Rule, _ ast.Ref) (bool, error) {
+ child := evalPool.Get()
+ defer evalPool.Put(child)
- child := e.e.child(rule.Body)
+ e.e.child(rule.Body, child)
child.traceEnter(rule)
e.e.saveStack.PushQuery(nil)
@@ -2977,7 +3297,7 @@ func (q vcKeyScope) Hash() int {
return hash
}
-func (q vcKeyScope) IsGround() bool {
+func (vcKeyScope) IsGround() bool {
return false
}
@@ -3111,13 +3431,13 @@ func (e evalVirtualPartial) reduce(rule *ast.Rule, b *bindings, result *ast.Term
type evalVirtualComplete struct {
e *eval
- ref ast.Ref
- plugged ast.Ref
- pos int
ir *ast.IndexResult
bindings *bindings
rterm *ast.Term
rbindings *bindings
+ ref ast.Ref
+ plugged ast.Ref
+ pos int
}
func (e evalVirtualComplete) eval(iter unifyIterator) error {
@@ -3132,20 +3452,24 @@ func (e evalVirtualComplete) eval(iter unifyIterator) error {
return nil
}
- if !e.e.unknown(e.ref, e.bindings) {
+ if !e.e.unknownRef(e.ref, e.bindings) {
return e.evalValue(iter, e.ir.EarlyExit)
}
var generateSupport bool
if e.ir.Default != nil {
- // If the other term is not constant OR it's equal to the default value, then
- // a support rule must be produced as the default value _may_ be required. On
- // the other hand, if the other term is constant (i.e., it does not require
- // evaluation) and it differs from the default value then the default value is
- // _not_ required, so partially evaluate the rule normally.
- rterm := e.rbindings.Plug(e.rterm)
- generateSupport = !ast.IsConstant(rterm.Value) || e.ir.Default.Head.Value.Equal(rterm)
+ // If inlining has been disabled for the rterm, and the default rule has a 'false' result value,
+ // the default value is inconsequential, and support does not need to be generated.
+ if !(e.ir.Default.Head.Value.Equal(ast.InternedTerm(false)) && e.e.inliningControl.Disabled(e.rterm.Value, false)) {
+ // If the other term is not constant OR it's equal to the default value, then
+ // a support rule must be produced as the default value _may_ be required. On
+ // the other hand, if the other term is constant (i.e., it does not require
+ // evaluation) and it differs from the default value then the default value is
+ // _not_ required, so partially evaluate the rule normally.
+ rterm := e.rbindings.Plug(e.rterm)
+ generateSupport = !ast.IsConstant(rterm.Value) || e.ir.Default.Head.Value.Equal(rterm)
+ }
}
if generateSupport || e.e.inliningControl.shallow || e.e.inliningControl.Disabled(e.plugged[:e.pos+1], false) {
@@ -3226,8 +3550,10 @@ func (e evalVirtualComplete) evalValue(iter unifyIterator, findOne bool) error {
}
func (e evalVirtualComplete) evalValueRule(iter unifyIterator, rule *ast.Rule, prev *ast.Term, findOne bool) (*ast.Term, error) {
+ child := evalPool.Get()
+ defer evalPool.Put(child)
- child := e.e.child(rule.Body)
+ e.e.child(rule.Body, child)
child.findOne = findOne
child.traceEnter(rule)
var result *ast.Term
@@ -3262,9 +3588,11 @@ func (e evalVirtualComplete) evalValueRule(iter unifyIterator, rule *ast.Rule, p
}
func (e evalVirtualComplete) partialEval(iter unifyIterator) error {
+ child := evalPool.Get()
+ defer evalPool.Put(child)
for _, rule := range e.ir.Rules {
- child := e.e.child(rule.Body)
+ e.e.child(rule.Body, child)
child.traceEnter(rule)
err := child.eval(func(child *eval) error {
@@ -3327,8 +3655,10 @@ func (e evalVirtualComplete) partialEvalSupport(iter unifyIterator) error {
}
func (e evalVirtualComplete) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) (bool, error) {
+ child := evalPool.Get()
+ defer evalPool.Put(child)
- child := e.e.child(rule.Body)
+ e.e.child(rule.Body, child)
child.traceEnter(rule)
e.e.saveStack.PushQuery(nil)
@@ -3383,13 +3713,13 @@ func (e evalVirtualComplete) evalTerm(iter unifyIterator, term *ast.Term, termbi
type evalTerm struct {
e *eval
- ref ast.Ref
- pos int
bindings *bindings
term *ast.Term
termbindings *bindings
rterm *ast.Term
rbindings *bindings
+ ref ast.Ref
+ pos int
}
func (e evalTerm) eval(iter unifyIterator) error {
@@ -3440,33 +3770,56 @@ func (e evalTerm) enumerate(iter unifyIterator) error {
switch v := e.term.Value.(type) {
case *ast.Array:
- for i := 0; i < v.Len(); i++ {
- k := ast.IntNumberTerm(i)
- err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error {
- return e.next(iter, k)
- })
+ // Note(anders):
+ // For this case (e.g. input.foo[_]), we can avoid the (quite expensive) overhead of a callback
+ // function literal escaping to the heap in each iteration by inlining the biunification logic,
+ // meaning a 10x reduction in both the number of allocations made as well as the memory consumed.
+ // It is possible that such inlining could be done for the set/object cases as well, and that's
+ // worth looking into later, as I imagine set iteration in particular would be an even greater
+ // win across most policies. Those cases are however much more complex, as we need to deal with
+ // any type on either side, not just int/var as is the case here.
+ for i := range v.Len() {
+ a := ast.InternedTerm(i)
+ b := e.ref[e.pos]
+
+ if _, ok := b.Value.(ast.Var); ok {
+ if e.e.traceEnabled {
+ e.e.traceUnify(a, b)
+ }
+ var undo undo
+ b, e.bindings = e.bindings.apply(b)
+ e.bindings.bind(b, a, e.bindings, &undo)
- if err := handleErr(err); err != nil {
- return err
+ err := e.next(iter, a)
+ undo.Undo()
+ if err != nil {
+ if err := handleErr(err); err != nil {
+ return err
+ }
+ }
}
}
case ast.Object:
- if err := v.Iter(func(k, _ *ast.Term) error {
+ for _, k := range v.Keys() {
err := e.e.biunify(k, e.ref[e.pos], e.termbindings, e.bindings, func() error {
return e.next(iter, e.termbindings.Plug(k))
})
- return handleErr(err)
- }); err != nil {
- return err
+ if err != nil {
+ if err := handleErr(err); err != nil {
+ return err
+ }
+ }
}
case ast.Set:
- if err := v.Iter(func(elem *ast.Term) error {
+ for _, elem := range v.Slice() {
err := e.e.biunify(elem, e.ref[e.pos], e.termbindings, e.bindings, func() error {
return e.next(iter, e.termbindings.Plug(elem))
})
- return handleErr(err)
- }); err != nil {
- return err
+ if err != nil {
+ if err := handleErr(err); err != nil {
+ return err
+ }
+ }
}
}
@@ -3569,7 +3922,11 @@ func (e evalEvery) eval(iter unifyIterator) error {
).SetLocation(e.Domain.Location),
)
- domain := e.e.closure(generator)
+ domain := evalPool.Get()
+ defer evalPool.Put(domain)
+
+ e.e.closure(generator, domain)
+
all := true // all generator evaluations yield one successful body evaluation
domain.traceEnter(e.expr)
@@ -3580,7 +3937,11 @@ func (e evalEvery) eval(iter unifyIterator) error {
// This would do extra work, like iterating needlessly if domain was a large array.
return nil
}
- body := child.closure(e.Body)
+
+ body := evalPool.Get()
+ defer evalPool.Put(body)
+
+ child.closure(e.Body, body)
body.findOne = true
body.traceEnter(e.Body)
done := false
@@ -3707,10 +4068,12 @@ func applyCopyPropagation(p *copypropagation.CopyPropagator, instr *Instrumentat
return result
}
+func nonGroundKey(k, _ *ast.Term) bool {
+ return !k.IsGround()
+}
+
func nonGroundKeys(a ast.Object) bool {
- return a.Until(func(k, _ *ast.Term) bool {
- return !k.IsGround()
- })
+ return a.Until(nonGroundKey)
}
func plugKeys(a ast.Object, b *bindings) ast.Object {
@@ -3747,8 +4110,7 @@ func canInlineNegation(safe ast.VarSet, queries []ast.Body) bool {
SkipClosures: true,
})
vis.Walk(expr)
- unsafe := vis.Vars().Diff(safe).Diff(ast.ReservedVars)
- if len(unsafe) > 0 {
+ if vis.Vars().Diff(safe).DiffCount(ast.ReservedVars) > 0 {
return false
}
}
@@ -3772,7 +4134,7 @@ func newNestedCheckVisitor() *nestedCheckVisitor {
return v
}
-func (v *nestedCheckVisitor) visit(x interface{}) bool {
+func (v *nestedCheckVisitor) visit(x any) bool {
switch x.(type) {
case ast.Ref, ast.Call:
v.found = true
@@ -3863,7 +4225,7 @@ func isOtherRef(term *ast.Term) bool {
return !ref.HasPrefix(ast.DefaultRootRef) && !ref.HasPrefix(ast.InputRootRef)
}
-func isFunction(env *ast.TypeEnv, ref interface{}) bool {
+func isFunction(env *ast.TypeEnv, ref any) bool {
var r ast.Ref
switch v := ref.(type) {
case ast.Ref:
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/glob.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/glob.go
similarity index 85%
rename from vendor/github.com/open-policy-agent/opa/topdown/glob.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/glob.go
index baf092ab6d..4e80c519ba 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/glob.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/glob.go
@@ -6,15 +6,17 @@ import (
"github.com/gobwas/glob"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
const globCacheMaxSize = 100
const globInterQueryValueCacheHits = "rego_builtin_glob_interquery_value_cache_hits"
-var globCacheLock = sync.Mutex{}
-var globCache map[string]glob.Glob
+var noDelimiters = []rune{}
+var dotDelimiters = []rune{'.'}
+var globCacheLock = sync.RWMutex{}
+var globCache = map[string]glob.Glob{}
func builtinGlobMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
pattern, err := builtins.StringOperand(operands[0].Value, 1)
@@ -25,14 +27,14 @@ func builtinGlobMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.
var delimiters []rune
switch operands[1].Value.(type) {
case ast.Null:
- delimiters = []rune{}
+ delimiters = noDelimiters
case *ast.Array:
delimiters, err = builtins.RuneSliceOperand(operands[1].Value, 2)
if err != nil {
return err
}
if len(delimiters) == 0 {
- delimiters = []rune{'.'}
+ delimiters = dotDelimiters
}
default:
return builtins.NewOperandTypeErr(2, operands[1].Value, "array", "null")
@@ -55,12 +57,13 @@ func builtinGlobMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.
if err != nil {
return err
}
- return iter(ast.BooleanTerm(m))
+ return iter(ast.InternedTerm(m))
}
func globCompileAndMatch(bctx BuiltinContext, id, pattern, match string, delimiters []rune) (bool, error) {
if bctx.InterQueryBuiltinValueCache != nil {
+ // TODO: Use named cache
val, ok := bctx.InterQueryBuiltinValueCache.Get(ast.String(id))
if ok {
pat, valid := val.(glob.Glob)
@@ -86,14 +89,15 @@ func globCompileAndMatch(bctx BuiltinContext, id, pattern, match string, delimit
return res.Match(match), nil
}
- globCacheLock.Lock()
- defer globCacheLock.Unlock()
+ globCacheLock.RLock()
p, ok := globCache[id]
+ globCacheLock.RUnlock()
if !ok {
var err error
if p, err = glob.Compile(pattern, delimiters...); err != nil {
return false, err
}
+ globCacheLock.Lock()
if len(globCache) >= globCacheMaxSize {
// Delete a (semi-)random key to make room for the new one.
for k := range globCache {
@@ -102,9 +106,10 @@ func globCompileAndMatch(bctx BuiltinContext, id, pattern, match string, delimit
}
}
globCache[id] = p
+ globCacheLock.Unlock()
}
- out := p.Match(match)
- return out, nil
+
+ return p.Match(match), nil
}
func builtinGlobQuoteMeta(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -117,7 +122,6 @@ func builtinGlobQuoteMeta(_ BuiltinContext, operands []*ast.Term, iter func(*ast
}
func init() {
- globCache = map[string]glob.Glob{}
RegisterBuiltinFunc(ast.GlobMatch.Name, builtinGlobMatch)
RegisterBuiltinFunc(ast.GlobQuoteMeta.Name, builtinGlobQuoteMeta)
}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go
new file mode 100644
index 0000000000..f3bdf8e414
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go
@@ -0,0 +1,690 @@
+// Copyright 2022 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+
+ gqlast "github.com/vektah/gqlparser/v2/ast"
+ gqlparser "github.com/vektah/gqlparser/v2/parser"
+ gqlvalidator "github.com/vektah/gqlparser/v2/validator"
+
+ // Side-effecting import. Triggers GraphQL library's validation rule init() functions.
+ _ "github.com/vektah/gqlparser/v2/validator/rules"
+
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/topdown/cache"
+)
+
+// Parses a GraphQL schema, and returns the GraphQL AST for the schema.
+func parseSchema(schema string) (*gqlast.SchemaDocument, error) {
+ // NOTE(philipc): We don't include the "built-in schema defs" from the
+ // underlying graphql parsing library here, because those definitions
+ // generate enormous AST blobs. In the future, if there is demand for
+ // a "full-spec" version of schema ASTs, we may need to provide a
+ // version of this function that includes the built-in schema
+ // definitions.
+ schemaAST, err := gqlparser.ParseSchema(&gqlast.Source{Input: schema})
+ if err != nil {
+ return nil, formatGqlParserError(err)
+ }
+ return schemaAST, nil
+}
+
+// Parses a GraphQL query, and returns the GraphQL AST for the query.
+func parseQuery(query string) (*gqlast.QueryDocument, error) {
+ queryAST, err := gqlparser.ParseQuery(&gqlast.Source{Input: query})
+ if err != nil {
+ return nil, formatGqlParserError(err)
+ }
+ return queryAST, nil
+}
+
+// Validates a GraphQL query against a schema, and returns an error.
+// In this case, we get a wrappered error list type, and pluck out
+// just the first error message in the list.
+func validateQuery(schema *gqlast.Schema, query *gqlast.QueryDocument) error {
+ // Validate the query against the schema, erroring if there's an issue.
+ err := gqlvalidator.Validate(schema, query)
+ if err != nil {
+ return formatGqlParserError(err)
+ }
+ return nil
+}
+
+func getBuiltinSchema() *gqlast.SchemaDocument {
+ schema, err := gqlparser.ParseSchema(gqlvalidator.Prelude)
+ if err != nil {
+ panic(fmt.Errorf("Error in gqlparser Prelude (should be impossible): %w", err))
+ }
+ return schema
+}
+
+// NOTE(philipc): This function expects *validated* schema documents, and will break
+// if it is fed arbitrary structures.
+func mergeSchemaDocuments(docA *gqlast.SchemaDocument, docB *gqlast.SchemaDocument) *gqlast.SchemaDocument {
+ ast := &gqlast.SchemaDocument{}
+ ast.Merge(docA)
+ ast.Merge(docB)
+ return ast
+}
+
+// Converts a SchemaDocument into a gqlast.Schema object that can be used for validation.
+// It merges in the builtin schema typedefs exactly as gqltop.LoadSchema did internally.
+func convertSchema(schemaDoc *gqlast.SchemaDocument) (*gqlast.Schema, error) {
+ // Merge builtin schema + schema we were provided.
+ builtinsSchemaDoc := getBuiltinSchema()
+ mergedSchemaDoc := mergeSchemaDocuments(builtinsSchemaDoc, schemaDoc)
+ schema, err := gqlvalidator.ValidateSchemaDocument(mergedSchemaDoc)
+ if err != nil {
+ return nil, fmt.Errorf("Error in gqlparser SchemaDocument to Schema conversion: %w", err)
+ }
+ return schema, nil
+}
+
+// Converts an ast.Object into a gqlast.QueryDocument object.
+func objectToQueryDocument(value ast.Object) (*gqlast.QueryDocument, error) {
+ // Convert ast.Term to any for JSON encoding below.
+ asJSON, err := ast.JSON(value)
+ if err != nil {
+ return nil, err
+ }
+ // Marshal to JSON.
+ bs, err := json.Marshal(asJSON)
+ if err != nil {
+ return nil, err
+ }
+ // Unmarshal from JSON -> gqlast.QueryDocument.
+ var result gqlast.QueryDocument
+ err = json.Unmarshal(bs, &result)
+ if err != nil {
+ return nil, err
+ }
+ return &result, nil
+}
+
+// Converts an ast.Object into a gqlast.SchemaDocument object.
+func objectToSchemaDocument(value ast.Object) (*gqlast.SchemaDocument, error) {
+ // Convert ast.Term to any for JSON encoding below.
+ asJSON, err := ast.JSON(value)
+ if err != nil {
+ return nil, err
+ }
+ // Marshal to JSON.
+ bs, err := json.Marshal(asJSON)
+ if err != nil {
+ return nil, err
+ }
+ // Unmarshal from JSON -> gqlast.SchemaDocument.
+ var result gqlast.SchemaDocument
+ err = json.Unmarshal(bs, &result)
+ if err != nil {
+ return nil, err
+ }
+ return &result, nil
+}
+
+// Recursively traverses an AST that has been run through InterfaceToValue,
+// and prunes away the fields with null or empty values, and all `Position`
+// structs.
+// NOTE(philipc): We currently prune away null values to reduce the level
+// of clutter in the returned AST objects. In the future, if there is demand
+// for ASTs that have a more regular/fixed structure, we may need to provide
+// a "raw" version of the AST, where we still prune away the `Position`
+// structs, but leave in the null fields.
+func pruneIrrelevantGraphQLASTNodes(value ast.Value) ast.Value {
+ // We iterate over the Value we've been provided, and recurse down
+ // in the case of complex types, such as Arrays/Objects.
+ // We are guaranteed to only have to deal with standard JSON types,
+ // so this is much less ugly than what we'd need for supporting every
+ // extant ast type!
+ switch x := value.(type) {
+ case *ast.Array:
+ result := ast.NewArray()
+ // Iterate over the array's elements, and do the following:
+ // - Drop any Nulls
+ // - Drop any any empty object/array value (after running the pruner)
+ for i := range x.Len() {
+ vTerm := x.Elem(i)
+ switch v := vTerm.Value.(type) {
+ case ast.Null:
+ continue
+ case *ast.Array:
+ // Safe, because we knew the type before going to prune it.
+ va := pruneIrrelevantGraphQLASTNodes(v).(*ast.Array)
+ if va.Len() > 0 {
+ result = result.Append(ast.NewTerm(va))
+ }
+ case ast.Object:
+ // Safe, because we knew the type before going to prune it.
+ vo := pruneIrrelevantGraphQLASTNodes(v).(ast.Object)
+ if vo.Len() > 0 {
+ result = result.Append(ast.NewTerm(vo))
+ }
+ default:
+ result = result.Append(vTerm)
+ }
+ }
+ return result
+ case ast.Object:
+ result := ast.NewObject()
+ // Iterate over our object's keys, and do the following:
+ // - Drop "Position".
+ // - Drop any key with a Null value.
+ // - Drop any key with an empty object/array value (after running the pruner)
+ keys := x.Keys()
+ for _, k := range keys {
+ // We drop the "Position" objects because we don't need the
+ // source-backref/location info they provide for policy rules.
+ // Note that keys are ast.Strings.
+ if ast.String("Position").Equal(k.Value) {
+ continue
+ }
+ vTerm := x.Get(k)
+ switch v := vTerm.Value.(type) {
+ case ast.Null:
+ continue
+ case *ast.Array:
+ // Safe, because we knew the type before going to prune it.
+ va := pruneIrrelevantGraphQLASTNodes(v).(*ast.Array)
+ if va.Len() > 0 {
+ result.Insert(k, ast.NewTerm(va))
+ }
+ case ast.Object:
+ // Safe, because we knew the type before going to prune it.
+ vo := pruneIrrelevantGraphQLASTNodes(v).(ast.Object)
+ if vo.Len() > 0 {
+ result.Insert(k, ast.NewTerm(vo))
+ }
+ default:
+ result.Insert(k, vTerm)
+ }
+ }
+ return result
+ default:
+ return x
+ }
+}
+
+func formatGqlParserError(err error) error {
+ // We use strings.TrimSuffix to remove the '.' characters that the library
+ // authors include on most of their validation errors. This should be safe,
+ // since variable names in their error messages are usually quoted, and
+ // this affects only the last character(s) in the string.
+ // NOTE(philipc): We know the error location will be in the query string,
+ // because schema validation always happens before this function is called.
+ // NOTE(rm): gqlparser does not _always_ return the error location
+ // so only populate location if it is available
+ if err == nil {
+ return nil
+ }
+ // If the error contains location information, format it nicely
+ errorParts := strings.SplitN(err.Error(), ":", 4)
+ if len(errorParts) >= 4 {
+ row, err := strconv.ParseUint(errorParts[1], 10, 64)
+ if err == nil {
+ col, err := strconv.ParseUint(errorParts[2], 10, 64)
+ if err == nil {
+ msg := strings.TrimSuffix(strings.TrimLeft(errorParts[len(errorParts)-1], " "), ".\n")
+ return fmt.Errorf("%s in GraphQL string at location %d:%d", msg, row, col)
+ }
+ }
+ }
+ // Wrap and return the full error if location information is not available
+ return fmt.Errorf("GraphQL parse error: %w", err)
+}
+
+// Reports errors from parsing/validation.
+func builtinGraphQLParse(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ var queryDoc *gqlast.QueryDocument
+ var schemaDoc *gqlast.SchemaDocument
+ var schemaASTValue ast.Value
+ var querySchema ast.Value
+ var err error
+
+ // Parse/translate query if it's a string/object.
+ switch x := operands[0].Value.(type) {
+ case ast.String:
+ queryDoc, err = parseQuery(string(x))
+ case ast.Object:
+ queryDoc, err = objectToQueryDocument(x)
+ default:
+ // Error if wrong type.
+ return builtins.NewOperandTypeErr(0, x, "string", "object")
+ }
+ if err != nil {
+ return err
+ }
+
+ schemaCacheKey, schema := cacheGetSchema(bctx, operands[1])
+ schemaASTCacheKey, querySchema := cacheGetSchemaAST(bctx, operands[1])
+ if schema == nil || querySchema == nil {
+ // Parse/translate schema if it's a string/object.
+ switch x := operands[1].Value.(type) {
+ case ast.String:
+ schemaDoc, err = parseSchema(string(x))
+ case ast.Object:
+ schemaDoc, err = objectToSchemaDocument(x)
+ default:
+ // Error if wrong type.
+ return builtins.NewOperandTypeErr(1, x, "string", "object")
+ }
+ if err != nil {
+ return err
+ }
+
+ // Convert SchemaDoc to Object before validating and converting it to a Schema
+ // This precludes inclusion of extra definitions from the default GraphQL schema
+ if querySchema == nil {
+ schemaASTValue, err = ast.InterfaceToValue(schemaDoc)
+ if err != nil {
+ return err
+ }
+ querySchema = pruneIrrelevantGraphQLASTNodes(schemaASTValue.(ast.Object))
+ cacheInsertSchemaAST(bctx, schemaASTCacheKey, querySchema)
+ }
+
+ // Validate the query against the schema, erroring if there's an issue.
+ if schema == nil {
+ schema, err = convertSchema(schemaDoc)
+ if err != nil {
+ return err
+ }
+ cacheInsertSchema(bctx, schemaCacheKey, schema)
+ }
+
+ }
+ // Transform the ASTs into Objects.
+ queryASTValue, err := ast.InterfaceToValue(queryDoc)
+ if err != nil {
+ return err
+ }
+
+ if err := validateQuery(schema, queryDoc); err != nil {
+ return err
+ }
+
+ // Recursively remove irrelevant AST structures.
+ queryResult := pruneIrrelevantGraphQLASTNodes(queryASTValue.(ast.Object))
+
+ // Construct return value.
+ verified := ast.ArrayTerm(
+ ast.NewTerm(queryResult),
+ ast.NewTerm(querySchema),
+ )
+
+ return iter(verified)
+}
+
+// Returns default value when errors occur.
+func builtinGraphQLParseAndVerify(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ var queryDoc *gqlast.QueryDocument
+ var schemaDoc *gqlast.SchemaDocument
+ var schemaASTValue ast.Value
+ var querySchema ast.Value
+ var err error
+
+ unverified := ast.ArrayTerm(
+ ast.InternedTerm(false),
+ ast.NewTerm(ast.NewObject()),
+ ast.NewTerm(ast.NewObject()),
+ )
+
+ // Parse/translate query if it's a string/object.
+ switch x := operands[0].Value.(type) {
+ case ast.String:
+ queryDoc, err = parseQuery(string(x))
+ case ast.Object:
+ queryDoc, err = objectToQueryDocument(x)
+ default:
+ // Error if wrong type.
+ return iter(unverified)
+ }
+ if err != nil {
+ return iter(unverified)
+ }
+
+ // Transform the ASTs into Objects.
+ queryASTValue, err := ast.InterfaceToValue(queryDoc)
+ if err != nil {
+ return iter(unverified)
+ }
+
+ schemaCacheKey, schema := cacheGetSchema(bctx, operands[1])
+ schemaASTCacheKey, querySchema := cacheGetSchemaAST(bctx, operands[1])
+ if schema == nil || querySchema == nil {
+ // Parse/translate schema if it's a string/object.
+ switch x := operands[1].Value.(type) {
+ case ast.String:
+ schemaDoc, err = parseSchema(string(x))
+ case ast.Object:
+ schemaDoc, err = objectToSchemaDocument(x)
+ default:
+ // Error if wrong type.
+ return iter(unverified)
+ }
+ if err != nil {
+ return iter(unverified)
+ }
+
+ // Convert SchemaDoc to Object before validating and converting it to a Schema
+ // This precludes inclusion of extra definitions from the default GraphQL schema
+ if querySchema == nil {
+ schemaASTValue, err = ast.InterfaceToValue(schemaDoc)
+ if err != nil {
+ return iter(unverified)
+ }
+ querySchema = pruneIrrelevantGraphQLASTNodes(schemaASTValue.(ast.Object))
+ cacheInsertSchemaAST(bctx, schemaASTCacheKey, querySchema)
+ }
+
+ if schema == nil {
+ schema, err = convertSchema(schemaDoc)
+ if err != nil {
+ return iter(unverified)
+ }
+ cacheInsertSchema(bctx, schemaCacheKey, schema)
+ }
+
+ }
+
+ // Validate the query against the schema, erroring if there's an issue.
+ if err := validateQuery(schema, queryDoc); err != nil {
+ return iter(unverified)
+ }
+
+ // Recursively remove irrelevant AST structures.
+ queryResult := pruneIrrelevantGraphQLASTNodes(queryASTValue.(ast.Object))
+
+ // Construct return value.
+ verified := ast.ArrayTerm(
+ ast.InternedTerm(true),
+ ast.NewTerm(queryResult),
+ ast.NewTerm(querySchema),
+ )
+
+ return iter(verified)
+}
+
+func builtinGraphQLParseQuery(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ raw, err := builtins.StringOperand(operands[0].Value, 1)
+ if err != nil {
+ return err
+ }
+
+ // Get the highly-nested AST struct, along with any errors generated.
+ query, err := parseQuery(string(raw))
+ if err != nil {
+ return err
+ }
+
+ // Transform the AST into an Object.
+ value, err := ast.InterfaceToValue(query)
+ if err != nil {
+ return err
+ }
+
+ // Recursively remove irrelevant AST structures.
+ result := pruneIrrelevantGraphQLASTNodes(value.(ast.Object))
+
+ return iter(ast.NewTerm(result))
+}
+
+func builtinGraphQLParseSchema(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ schemaDocCacheKey, schemaDoc := cacheGetSchemaDoc(bctx, operands[0])
+ if schemaDoc == nil {
+ raw, err := builtins.StringOperand(operands[0].Value, 1)
+ if err != nil {
+ return err
+ }
+
+ // Get the highly-nested AST struct, along with any errors generated.
+ schemaDoc, err = parseSchema(string(raw))
+ if err != nil {
+ return err
+ }
+ // Note SchemaDoc is not validated
+ cacheInsertSchemaDoc(bctx, schemaDocCacheKey, schemaDoc)
+ }
+
+ schemaASTCacheKey, schemaAST := cacheGetSchemaAST(bctx, operands[0])
+ if schemaAST == nil {
+
+ // Transform the AST into an Object.
+ value, err := ast.InterfaceToValue(schemaDoc)
+ if err != nil {
+ return err
+ }
+
+ // Recursively remove irrelevant AST structures.
+ schemaAST = pruneIrrelevantGraphQLASTNodes(value.(ast.Object))
+ cacheInsertSchemaAST(bctx, schemaASTCacheKey, schemaAST)
+ }
+ return iter(ast.NewTerm(schemaAST))
+}
+
+func builtinGraphQLIsValid(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ var queryDoc *gqlast.QueryDocument
+ var schemaDoc *gqlast.SchemaDocument
+ var schema *gqlast.Schema
+ var err error
+
+ switch x := operands[0].Value.(type) {
+ case ast.String:
+ queryDoc, err = parseQuery(string(x))
+ case ast.Object:
+ queryDoc, err = objectToQueryDocument(x)
+ default:
+ // Error if wrong type.
+ return iter(ast.InternedTerm(false))
+ }
+ if err != nil {
+ return iter(ast.InternedTerm(false))
+ }
+
+ schemaCacheKey, schema := cacheGetSchema(bctx, operands[1])
+ if schema == nil {
+ switch x := operands[1].Value.(type) {
+ case ast.String:
+ schemaDoc, err = parseSchema(string(x))
+ case ast.Object:
+ schemaDoc, err = objectToSchemaDocument(x)
+ default:
+ // Error if wrong type.
+ return iter(ast.InternedTerm(false))
+ }
+ if err != nil {
+ return iter(ast.InternedTerm(false))
+ }
+
+ // Validate the query against the schema, erroring if there's an issue.
+ schema, err = convertSchema(schemaDoc)
+ if err != nil {
+ return iter(ast.InternedTerm(false))
+ }
+ cacheInsertSchema(bctx, schemaCacheKey, schema)
+ }
+
+ if err := validateQuery(schema, queryDoc); err != nil {
+ return iter(ast.InternedTerm(false))
+ }
+
+ // If we got this far, the GraphQL query passed validation.
+ return iter(ast.InternedTerm(true))
+}
+
+func builtinGraphQLSchemaIsValid(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ var err error
+
+ // Schemas are only cached if they are valid
+ schemaCacheKey, schema := cacheGetSchema(bctx, operands[0])
+ if schema == nil {
+ var schemaDoc *gqlast.SchemaDocument
+ var validatedSchema *gqlast.Schema
+
+ switch x := operands[0].Value.(type) {
+ case ast.String:
+ schemaDoc, err = parseSchema(string(x))
+ case ast.Object:
+ schemaDoc, err = objectToSchemaDocument(x)
+ default:
+ // Error if wrong type.
+ return iter(ast.InternedTerm(false))
+ }
+ if err != nil {
+ return iter(ast.InternedTerm(false))
+ }
+ // Validate the schema, this determines the result
+ // and whether there is a schema to cache
+ validatedSchema, err = convertSchema(schemaDoc)
+ if err == nil {
+ cacheInsertSchema(bctx, schemaCacheKey, validatedSchema)
+ }
+ }
+
+ return iter(ast.InternedTerm(err == nil))
+}
+
+// Insert Schema into cache
+func cacheInsertSchema(bctx BuiltinContext, key string, schema *gqlast.Schema) {
+ if bctx.InterQueryBuiltinValueCache == nil || key == "" {
+ return
+ }
+ cacheKey := ast.String(key)
+ c := bctx.InterQueryBuiltinValueCache.GetCache(gqlCacheName)
+ if c == nil {
+ return
+ }
+ c.Insert(cacheKey, schema)
+}
+
+// Insert SchemaAST into cache
+func cacheInsertSchemaAST(bctx BuiltinContext, key string, schemaAST ast.Value) {
+ if bctx.InterQueryBuiltinValueCache == nil || key == "" {
+ return
+ }
+ cacheKeyAST := ast.String(key)
+ c := bctx.InterQueryBuiltinValueCache.GetCache(gqlCacheName)
+ if c == nil {
+ return
+ }
+ c.Insert(cacheKeyAST, schemaAST)
+}
+
+// Insert SchemaDocument into cache
+func cacheInsertSchemaDoc(bctx BuiltinContext, key string, schemaDoc *gqlast.SchemaDocument) {
+ if bctx.InterQueryBuiltinValueCache == nil || key == "" {
+ return
+ }
+ cacheKey := ast.String(key)
+ c := bctx.InterQueryBuiltinValueCache.GetCache(gqlCacheName)
+ if c == nil {
+ return
+ }
+ c.Insert(cacheKey, schemaDoc)
+}
+
+// Returns the cache key and a Schema if this key already exists in the cache
+func cacheGetSchema(bctx BuiltinContext, t *ast.Term) (string, *gqlast.Schema) {
+ if bctx.InterQueryBuiltinValueCache != nil {
+ if c := bctx.InterQueryBuiltinValueCache.GetCache(gqlCacheName); c != nil {
+ if key, keyOk := cacheKeyWithPrefix(bctx, t, "gql_schema-"); keyOk {
+ if val, ok := c.Get(ast.String(key)); ok {
+ if schema, isSchema := val.(*gqlast.Schema); isSchema {
+ return key, schema
+ }
+ }
+ return key, nil
+ }
+ }
+ }
+ return "", nil
+}
+
+// Returns the cache key and a SchemaDocument if this key already exists in the cache
+// Note: the SchemaDocument is not a validated Schema
+func cacheGetSchemaDoc(bctx BuiltinContext, t *ast.Term) (string, *gqlast.SchemaDocument) {
+ if bctx.InterQueryBuiltinValueCache != nil {
+ if c := bctx.InterQueryBuiltinValueCache.GetCache(gqlCacheName); c != nil {
+ if key, keyOk := cacheKeyWithPrefix(bctx, t, "gql_schema_doc-"); keyOk {
+ if val, ok := c.Get(ast.String(key)); ok {
+ if schemaDoc, isSchemaDoc := val.(*gqlast.SchemaDocument); isSchemaDoc {
+ return key, schemaDoc
+ }
+ }
+ return key, nil
+ }
+ }
+ }
+ return "", nil
+}
+
+// Returns the cache key and a SchemaDocument if this key already exists in the cache
+// Note: the AST should be pruned
+func cacheGetSchemaAST(bctx BuiltinContext, t *ast.Term) (string, ast.Value) {
+ if bctx.InterQueryBuiltinValueCache != nil {
+ if c := bctx.InterQueryBuiltinValueCache.GetCache(gqlCacheName); c != nil {
+ if key, keyOk := cacheKeyWithPrefix(bctx, t, "gql_schema_ast-"); keyOk {
+ if val, ok := c.Get(ast.String(key)); ok {
+ if schemaAST, isSchemaAST := val.(ast.Value); isSchemaAST {
+ return key, schemaAST
+ }
+ }
+ return key, nil
+ }
+ }
+ }
+ return "", nil
+}
+
+// Compute a constant size key for use with the cache
+func cacheKeyWithPrefix(bctx BuiltinContext, t *ast.Term, prefix string) (string, bool) {
+ var cacheKey ast.String
+ var ok = false
+
+ if bctx.InterQueryBuiltinValueCache != nil {
+ switch t.Value.(type) {
+ case ast.String:
+ err := builtinCryptoSha256(bctx, []*ast.Term{t}, func(term *ast.Term) error {
+ cacheKey = term.Value.(ast.String)
+ return nil
+ })
+ ok = (len(cacheKey) > 0) && (err == nil)
+ case ast.Object:
+ objTerm := ast.StringTerm(t.String())
+ err := builtinCryptoSha256(bctx, []*ast.Term{objTerm}, func(term *ast.Term) error {
+ cacheKey = term.Value.(ast.String)
+ return nil
+ })
+ ok = (len(cacheKey) > 0) && (err == nil)
+ default:
+ ok = false
+ }
+ }
+
+ return prefix + string(cacheKey), ok
+}
+
+const gqlCacheName = "graphql"
+
+func init() {
+
+ var defaultCacheEntries = 10
+ var graphqlCacheConfig = cache.NamedValueCacheConfig{
+ MaxNumEntries: &defaultCacheEntries,
+ }
+ cache.RegisterDefaultInterQueryBuiltinValueCacheConfig(gqlCacheName, &graphqlCacheConfig)
+
+ RegisterBuiltinFunc(ast.GraphQLParse.Name, builtinGraphQLParse)
+ RegisterBuiltinFunc(ast.GraphQLParseAndVerify.Name, builtinGraphQLParseAndVerify)
+ RegisterBuiltinFunc(ast.GraphQLParseQuery.Name, builtinGraphQLParseQuery)
+ RegisterBuiltinFunc(ast.GraphQLParseSchema.Name, builtinGraphQLParseSchema)
+ RegisterBuiltinFunc(ast.GraphQLIsValid.Name, builtinGraphQLIsValid)
+ RegisterBuiltinFunc(ast.GraphQLSchemaIsValid.Name, builtinGraphQLSchemaIsValid)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/http.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/http.go
new file mode 100644
index 0000000000..36c622e5a4
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/http.go
@@ -0,0 +1,1648 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "mime"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "runtime"
+ "slices"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/open-policy-agent/opa/internal/version"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/topdown/cache"
+ "github.com/open-policy-agent/opa/v1/tracing"
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+type cachingMode string
+
+const (
+ defaultHTTPRequestTimeoutEnv = "HTTP_SEND_TIMEOUT"
+ defaultCachingMode cachingMode = "serialized"
+ cachingModeDeserialized cachingMode = "deserialized"
+)
+
+var defaultHTTPRequestTimeout = time.Second * 5
+
+var allowedKeyNames = [...]string{
+ "method",
+ "url",
+ "body",
+ "enable_redirect",
+ "force_json_decode",
+ "force_yaml_decode",
+ "headers",
+ "raw_body",
+ "tls_use_system_certs",
+ "tls_ca_cert",
+ "tls_ca_cert_file",
+ "tls_ca_cert_env_variable",
+ "tls_client_cert",
+ "tls_client_cert_file",
+ "tls_client_cert_env_variable",
+ "tls_client_key",
+ "tls_client_key_file",
+ "tls_client_key_env_variable",
+ "tls_insecure_skip_verify",
+ "tls_server_name",
+ "timeout",
+ "cache",
+ "force_cache",
+ "force_cache_duration_seconds",
+ "raise_error",
+ "caching_mode",
+ "max_retry_attempts",
+ "cache_ignored_headers",
+}
+
+// ref: https://www.rfc-editor.org/rfc/rfc7231#section-6.1
+var cacheableHTTPStatusCodes = [...]int{
+ http.StatusOK,
+ http.StatusNonAuthoritativeInfo,
+ http.StatusNoContent,
+ http.StatusPartialContent,
+ http.StatusMultipleChoices,
+ http.StatusMovedPermanently,
+ http.StatusNotFound,
+ http.StatusMethodNotAllowed,
+ http.StatusGone,
+ http.StatusRequestURITooLong,
+ http.StatusNotImplemented,
+}
+
+var (
+ httpSendNetworkErrTerm = ast.StringTerm(HTTPSendNetworkErr)
+ httpSendInternalErrTerm = ast.StringTerm(HTTPSendInternalErr)
+
+ allowedKeys = ast.NewSet()
+ keyCache = make(map[string]*ast.Term, len(allowedKeyNames))
+ cacheableCodes = ast.NewSet()
+ requiredKeys = ast.NewSet(ast.InternedTerm("method"), ast.InternedTerm("url"))
+ httpSendLatencyMetricKey = "rego_builtin_http_send"
+ httpSendInterQueryCacheHits = httpSendLatencyMetricKey + "_interquery_cache_hits"
+ httpSendNetworkRequests = httpSendLatencyMetricKey + "_network_requests"
+)
+
+type httpSendKey string
+
+// CustomizeRoundTripper allows customizing an existing http.Transport,
+// to the returned value, which could be the same Transport or a new one.
+type CustomizeRoundTripper func(*http.Transport) http.RoundTripper
+
+const (
+ // httpSendBuiltinCacheKey is the key in the builtin context cache that
+ // points to the http.send() specific cache resides at.
+ httpSendBuiltinCacheKey httpSendKey = "HTTP_SEND_CACHE_KEY"
+
+ // HTTPSendInternalErr represents a runtime evaluation error.
+ HTTPSendInternalErr string = "eval_http_send_internal_error"
+
+ // HTTPSendNetworkErr represents a network error.
+ HTTPSendNetworkErr string = "eval_http_send_network_error"
+
+ // minRetryDelay is amount of time to backoff after the first failure.
+ minRetryDelay = time.Millisecond * 100
+
+ // maxRetryDelay is the upper bound of backoff delay.
+ maxRetryDelay = time.Second * 60
+)
+
+func builtinHTTPSend(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+
+ obj, err := builtins.ObjectOperand(operands[0].Value, 1)
+ if err != nil {
+ return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err)
+ }
+
+ raiseError, err := getRaiseErrorValue(obj)
+ if err != nil {
+ return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err)
+ }
+
+ req, err := validateHTTPRequestOperand(operands[0], 1)
+ if err != nil {
+ if raiseError {
+ return handleHTTPSendErr(bctx, err)
+ }
+
+ return iter(generateRaiseErrorResult(handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err)))
+ }
+
+ result, err := getHTTPResponse(bctx, req)
+ if err != nil {
+ if raiseError {
+ return handleHTTPSendErr(bctx, err)
+ }
+
+ result = generateRaiseErrorResult(err)
+ }
+ return iter(result)
+}
+
+func generateRaiseErrorResult(err error) *ast.Term {
+ var errObj ast.Object
+ switch err.(type) {
+ case *url.Error:
+ errObj = ast.NewObject(
+ ast.Item(ast.InternedTerm("code"), httpSendNetworkErrTerm),
+ ast.Item(ast.InternedTerm("message"), ast.StringTerm(err.Error())),
+ )
+ default:
+ errObj = ast.NewObject(
+ ast.Item(ast.InternedTerm("code"), httpSendInternalErrTerm),
+ ast.Item(ast.InternedTerm("message"), ast.StringTerm(err.Error())),
+ )
+ }
+
+ return ast.ObjectTerm(
+ ast.Item(ast.InternedTerm("status_code"), ast.InternedTerm(0)),
+ ast.Item(ast.InternedTerm("error"), ast.NewTerm(errObj)),
+ )
+}
+
+func getHTTPResponse(bctx BuiltinContext, req ast.Object) (*ast.Term, error) {
+
+ bctx.Metrics.Timer(httpSendLatencyMetricKey).Start()
+ defer bctx.Metrics.Timer(httpSendLatencyMetricKey).Stop()
+
+ key, err := getKeyFromRequest(req)
+ if err != nil {
+ return nil, err
+ }
+
+ reqExecutor, err := newHTTPRequestExecutor(bctx, req, key)
+ if err != nil {
+ return nil, err
+ }
+ // Check if cache already has a response for this query
+ // set headers to exclude cache_ignored_headers
+ resp, err := reqExecutor.CheckCache()
+ if err != nil {
+ return nil, err
+ }
+
+ if resp == nil {
+ httpResp, err := reqExecutor.ExecuteHTTPRequest()
+ if err != nil {
+ reqExecutor.InsertErrorIntoCache(err)
+ return nil, err
+ }
+ defer util.Close(httpResp)
+ // Add result to intra/inter-query cache.
+ resp, err = reqExecutor.InsertIntoCache(httpResp)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return ast.NewTerm(resp), nil
+}
+
+// getKeyFromRequest returns a key to be used for caching HTTP responses
+// deletes headers from request object mentioned in cache_ignored_headers
+func getKeyFromRequest(req ast.Object) (ast.Object, error) {
+ // deep copy so changes to key do not reflect in the request object
+ key := req.Copy()
+ cacheIgnoredHeadersTerm := req.Get(keyCache["cache_ignored_headers"])
+ allHeadersTerm := req.Get(ast.StringTerm("headers"))
+ // skip because no headers to delete
+ if cacheIgnoredHeadersTerm == nil || allHeadersTerm == nil {
+ // need to explicitly set cache_ignored_headers to null
+ // equivalent requests might have different sets of exclusion lists
+ key.Insert(ast.StringTerm("cache_ignored_headers"), ast.InternedNullTerm)
+ return key, nil
+ }
+ var cacheIgnoredHeaders []string
+ err := ast.As(cacheIgnoredHeadersTerm.Value, &cacheIgnoredHeaders)
+ if err != nil {
+ return nil, err
+ }
+ var allHeaders map[string]any
+ err = ast.As(allHeadersTerm.Value, &allHeaders)
+ if err != nil {
+ return nil, err
+ }
+ for _, header := range cacheIgnoredHeaders {
+ delete(allHeaders, header)
+ }
+ val, err := ast.InterfaceToValue(allHeaders)
+ if err != nil {
+ return nil, err
+ }
+ key.Insert(keyCache["headers"], ast.NewTerm(val))
+ // remove cache_ignored_headers key
+ key.Insert(keyCache["cache_ignored_headers"], ast.InternedNullTerm)
+ return key, nil
+}
+
+func init() {
+ createKeys()
+ createCacheableHTTPStatusCodes()
+ initDefaults()
+ RegisterBuiltinFunc(ast.HTTPSend.Name, builtinHTTPSend)
+}
+
+func handleHTTPSendErr(bctx BuiltinContext, err error) error {
+ // Return HTTP client timeout errors in a generic error message to avoid confusion about what happened.
+ // Do not do this if the builtin context was cancelled and is what caused the request to stop.
+ if urlErr, ok := err.(*url.Error); ok && urlErr.Timeout() && bctx.Context.Err() == nil {
+ err = fmt.Errorf("%s %s: request timed out", urlErr.Op, urlErr.URL)
+ }
+ if err := bctx.Context.Err(); err != nil {
+ return Halt{
+ Err: &Error{
+ Code: CancelErr,
+ Message: fmt.Sprintf("http.send: timed out (%s)", err.Error()),
+ },
+ }
+ }
+ return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err)
+}
+
+func initDefaults() {
+ timeoutDuration := os.Getenv(defaultHTTPRequestTimeoutEnv)
+ if timeoutDuration != "" {
+ var err error
+ defaultHTTPRequestTimeout, err = time.ParseDuration(timeoutDuration)
+ if err != nil {
+ // If it is set to something not valid don't let the process continue in a state
+ // that will almost definitely give unexpected results by having it set at 0
+ // which means no timeout..
+ // This environment variable isn't considered part of the public API.
+ // TODO(patrick-east): Remove the environment variable
+ panic(fmt.Sprintf("invalid value for HTTP_SEND_TIMEOUT: %s", err))
+ }
+ }
+}
+
+func validateHTTPRequestOperand(term *ast.Term, pos int) (ast.Object, error) {
+
+ obj, err := builtins.ObjectOperand(term.Value, pos)
+ if err != nil {
+ return nil, err
+ }
+
+ requestKeys := ast.NewSet(obj.Keys()...)
+
+ invalidKeys := requestKeys.Diff(allowedKeys)
+ if invalidKeys.Len() != 0 {
+ return nil, builtins.NewOperandErr(pos, "invalid request parameters(s): %v", invalidKeys)
+ }
+
+ missingKeys := requiredKeys.Diff(requestKeys)
+ if missingKeys.Len() != 0 {
+ return nil, builtins.NewOperandErr(pos, "missing required request parameters(s): %v", missingKeys)
+ }
+
+ return obj, nil
+
+}
+
+// canonicalizeHeaders returns a copy of the headers where the keys are in
+// canonical HTTP form.
+func canonicalizeHeaders(headers map[string]any) map[string]any {
+ canonicalized := map[string]any{}
+
+ for k, v := range headers {
+ canonicalized[http.CanonicalHeaderKey(k)] = v
+ }
+
+ return canonicalized
+}
+
+// useSocket examines the url for "unix://" and returns a *http.Transport with
+// a DialContext that opens a socket (specified in the http call).
+// The url is expected to contain socket=/path/to/socket (url encoded)
+// Ex. "unix://localhost/end/point?socket=%2Ftmp%2Fhttp.sock"
+func useSocket(rawURL string, tlsConfig *tls.Config) (bool, string, *http.Transport) {
+ u, err := url.Parse(rawURL)
+ if err != nil {
+ return false, "", nil
+ }
+
+ if u.Scheme != "unix" || u.RawQuery == "" {
+ return false, rawURL, nil
+ }
+
+ v, err := url.ParseQuery(u.RawQuery)
+ if err != nil {
+ return false, rawURL, nil
+ }
+
+ // Rewrite URL targeting the UNIX domain socket.
+ u.Scheme = "http"
+
+ // Extract the path to the socket.
+ // Only retrieve the first value. Subsequent values are ignored and removed
+ // to prevent HTTP parameter pollution.
+ socket := v.Get("socket")
+ v.Del("socket")
+ u.RawQuery = v.Encode()
+
+ tr := http.DefaultTransport.(*http.Transport).Clone()
+ tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) {
+ return http.DefaultTransport.(*http.Transport).DialContext(ctx, "unix", socket)
+ }
+ tr.TLSClientConfig = tlsConfig
+ tr.DisableKeepAlives = true
+
+ return true, u.String(), tr
+}
+
+func verifyHost(bctx BuiltinContext, host string) error {
+ if bctx.Capabilities == nil || bctx.Capabilities.AllowNet == nil {
+ return nil
+ }
+
+ if slices.Contains(bctx.Capabilities.AllowNet, host) {
+ return nil
+ }
+
+ return fmt.Errorf("unallowed host: %s", host)
+}
+
+func verifyURLHost(bctx BuiltinContext, unverifiedURL string) error {
+ // Eager return to avoid unnecessary URL parsing
+ if bctx.Capabilities == nil || bctx.Capabilities.AllowNet == nil {
+ return nil
+ }
+
+ parsedURL, err := url.Parse(unverifiedURL)
+ if err != nil {
+ return err
+ }
+
+ host := strings.Split(parsedURL.Host, ":")[0]
+
+ return verifyHost(bctx, host)
+}
+
+func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *http.Client, error) {
+ var (
+ url, method string
+ // Additional CA certificates loading options.
+ tlsCaCert []byte
+ tlsCaCertEnvVar, tlsCaCertFile string
+ // Client TLS certificate and key options. Each input source
+ // comes in a matched pair.
+ tlsClientCert, tlsClientKey []byte
+ tlsClientCertEnvVar, tlsClientKeyEnvVar string
+ tlsClientCertFile, tlsClientKeyFile, tlsServerName string
+
+ body, rawBody *bytes.Buffer
+ enableRedirect, tlsInsecureSkipVerify bool
+ tlsUseSystemCerts *bool
+ tlsConfig tls.Config
+ customHeaders map[string]any
+ )
+
+ timeout := defaultHTTPRequestTimeout
+
+ for _, val := range obj.Keys() {
+ key, err := ast.JSON(val.Value)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ key = key.(string)
+
+ var strVal string
+
+ if s, ok := obj.Get(val).Value.(ast.String); ok {
+ strVal = strings.Trim(string(s), "\"")
+ } else {
+ // Most parameters are strings, so consolidate the type checking.
+ switch key {
+ case "method",
+ "url",
+ "raw_body",
+ "tls_ca_cert",
+ "tls_ca_cert_file",
+ "tls_ca_cert_env_variable",
+ "tls_client_cert",
+ "tls_client_cert_file",
+ "tls_client_cert_env_variable",
+ "tls_client_key",
+ "tls_client_key_file",
+ "tls_client_key_env_variable",
+ "tls_server_name":
+ return nil, nil, fmt.Errorf("%q must be a string", key)
+ }
+ }
+
+ switch key {
+ case "method":
+ method = strings.ToUpper(strVal)
+ case "url":
+ err := verifyURLHost(bctx, strVal)
+ if err != nil {
+ return nil, nil, err
+ }
+ url = strVal
+ case "enable_redirect":
+ enableRedirect, err = strconv.ParseBool(obj.Get(val).String())
+ if err != nil {
+ return nil, nil, err
+ }
+ case "body":
+ bodyVal := obj.Get(val).Value
+ bodyValInterface, err := ast.JSON(bodyVal)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ bodyValBytes, err := json.Marshal(bodyValInterface)
+ if err != nil {
+ return nil, nil, err
+ }
+ body = bytes.NewBuffer(bodyValBytes)
+ case "raw_body":
+ rawBody = bytes.NewBufferString(strVal)
+ case "tls_use_system_certs":
+ tempTLSUseSystemCerts, err := strconv.ParseBool(obj.Get(val).String())
+ if err != nil {
+ return nil, nil, err
+ }
+ tlsUseSystemCerts = &tempTLSUseSystemCerts
+ case "tls_ca_cert":
+ tlsCaCert = []byte(strVal)
+ case "tls_ca_cert_file":
+ tlsCaCertFile = strVal
+ case "tls_ca_cert_env_variable":
+ tlsCaCertEnvVar = strVal
+ case "tls_client_cert":
+ tlsClientCert = []byte(strVal)
+ case "tls_client_cert_file":
+ tlsClientCertFile = strVal
+ case "tls_client_cert_env_variable":
+ tlsClientCertEnvVar = strVal
+ case "tls_client_key":
+ tlsClientKey = []byte(strVal)
+ case "tls_client_key_file":
+ tlsClientKeyFile = strVal
+ case "tls_client_key_env_variable":
+ tlsClientKeyEnvVar = strVal
+ case "tls_server_name":
+ tlsServerName = strVal
+ case "headers":
+ headersVal := obj.Get(val).Value
+ headersValInterface, err := ast.JSON(headersVal)
+ if err != nil {
+ return nil, nil, err
+ }
+ var ok bool
+ customHeaders, ok = headersValInterface.(map[string]any)
+ if !ok {
+ return nil, nil, errors.New("invalid type for headers key")
+ }
+ case "tls_insecure_skip_verify":
+ tlsInsecureSkipVerify, err = strconv.ParseBool(obj.Get(val).String())
+ if err != nil {
+ return nil, nil, err
+ }
+ case "timeout":
+ timeout, err = parseTimeout(obj.Get(val).Value)
+ if err != nil {
+ return nil, nil, err
+ }
+ case "cache", "caching_mode",
+ "force_cache", "force_cache_duration_seconds",
+ "force_json_decode", "force_yaml_decode",
+ "raise_error", "max_retry_attempts", "cache_ignored_headers": // no-op
+ default:
+ return nil, nil, fmt.Errorf("invalid parameter %q", key)
+ }
+ }
+
+ isTLS := false
+ client := &http.Client{
+ Timeout: timeout,
+ CheckRedirect: func(*http.Request, []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
+ }
+
+ if tlsInsecureSkipVerify {
+ isTLS = true
+ tlsConfig.InsecureSkipVerify = tlsInsecureSkipVerify
+ }
+
+ if len(tlsClientCert) > 0 && len(tlsClientKey) > 0 {
+ cert, err := tls.X509KeyPair(tlsClientCert, tlsClientKey)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ isTLS = true
+ tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
+ }
+
+ if tlsClientCertFile != "" && tlsClientKeyFile != "" {
+ cert, err := tls.LoadX509KeyPair(tlsClientCertFile, tlsClientKeyFile)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ isTLS = true
+ tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
+ }
+
+ if tlsClientCertEnvVar != "" && tlsClientKeyEnvVar != "" {
+ cert, err := tls.X509KeyPair(
+ []byte(os.Getenv(tlsClientCertEnvVar)),
+ []byte(os.Getenv(tlsClientKeyEnvVar)))
+ if err != nil {
+ return nil, nil, fmt.Errorf("cannot extract public/private key pair from envvars %q, %q: %w",
+ tlsClientCertEnvVar, tlsClientKeyEnvVar, err)
+ }
+
+ isTLS = true
+ tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
+ }
+
+ // Use system certs if no CA cert is provided
+ // or system certs flag is not set
+ if len(tlsCaCert) == 0 && tlsCaCertFile == "" && tlsCaCertEnvVar == "" && tlsUseSystemCerts == nil {
+ trueValue := true
+ tlsUseSystemCerts = &trueValue
+ }
+
+ // Check the system certificates config first so that we
+ // load additional certificated into the correct pool.
+ if tlsUseSystemCerts != nil && *tlsUseSystemCerts && runtime.GOOS != "windows" {
+ pool, err := x509.SystemCertPool()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ isTLS = true
+ tlsConfig.RootCAs = pool
+ }
+
+ if len(tlsCaCert) != 0 {
+ tlsCaCert = bytes.ReplaceAll(tlsCaCert, []byte("\\n"), []byte("\n"))
+ pool, err := addCACertsFromBytes(tlsConfig.RootCAs, tlsCaCert)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ isTLS = true
+ tlsConfig.RootCAs = pool
+ }
+
+ if tlsCaCertFile != "" {
+ pool, err := addCACertsFromFile(tlsConfig.RootCAs, tlsCaCertFile)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ isTLS = true
+ tlsConfig.RootCAs = pool
+ }
+
+ if tlsCaCertEnvVar != "" {
+ pool, err := addCACertsFromEnv(tlsConfig.RootCAs, tlsCaCertEnvVar)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ isTLS = true
+ tlsConfig.RootCAs = pool
+ }
+
+ var transport *http.Transport
+ if isTLS {
+ if ok, parsedURL, tr := useSocket(url, &tlsConfig); ok {
+ transport = tr
+ url = parsedURL
+ } else {
+ transport = http.DefaultTransport.(*http.Transport).Clone()
+ transport.TLSClientConfig = &tlsConfig
+ transport.DisableKeepAlives = true
+ }
+ } else {
+ if ok, parsedURL, tr := useSocket(url, nil); ok {
+ transport = tr
+ url = parsedURL
+ }
+ }
+
+ if bctx.RoundTripper != nil {
+ client.Transport = bctx.RoundTripper(transport)
+ } else if transport != nil {
+ client.Transport = transport
+ }
+
+ // check if redirects are enabled
+ if enableRedirect {
+ client.CheckRedirect = func(req *http.Request, _ []*http.Request) error {
+ return verifyURLHost(bctx, req.URL.String())
+ }
+ }
+
+ if rawBody != nil {
+ body = rawBody
+ } else if body == nil {
+ body = bytes.NewBufferString("")
+ }
+
+ // create the http request, use the builtin context's context to ensure
+ // the request is cancelled if evaluation is cancelled.
+ req, err := http.NewRequest(method, url, body)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req = req.WithContext(bctx.Context)
+
+ // Add custom headers
+ if len(customHeaders) != 0 {
+ customHeaders = canonicalizeHeaders(customHeaders)
+
+ for k, v := range customHeaders {
+ header, ok := v.(string)
+ if !ok {
+ return nil, nil, fmt.Errorf("invalid type for headers value %q", v)
+ }
+
+ req.Header.Add(k, header)
+ }
+
+ // Don't overwrite or append to one that was set in the custom headers
+ if _, hasUA := customHeaders["User-Agent"]; !hasUA {
+ req.Header.Add("User-Agent", version.UserAgent)
+ }
+
+ // If the caller specifies the Host header, use it for the HTTP
+ // request host and the TLS server name.
+ if host, hasHost := customHeaders["Host"]; hasHost {
+ host := host.(string) // We already checked that it's a string.
+ req.Host = host
+
+ // Only default the ServerName if the caller has
+ // specified the host. If we don't specify anything,
+ // Go will default to the target hostname. This name
+ // is not the same as the default that Go populates
+ // `req.Host` with, which is why we don't just set
+ // this unconditionally.
+ tlsConfig.ServerName = host
+ }
+ }
+
+ if tlsServerName != "" {
+ tlsConfig.ServerName = tlsServerName
+ }
+
+ if len(bctx.DistributedTracingOpts) > 0 {
+ client.Transport = tracing.NewTransport(client.Transport, bctx.DistributedTracingOpts)
+ }
+
+ return req, client, nil
+}
+
+func executeHTTPRequest(req *http.Request, client *http.Client, inputReqObj ast.Object) (*http.Response, error) {
+ var err error
+ var retry int
+
+ retry, err = getNumberValFromReqObj(inputReqObj, keyCache["max_retry_attempts"])
+ if err != nil {
+ return nil, err
+ }
+
+ for i := 0; true; i++ {
+
+ var resp *http.Response
+ resp, err = client.Do(req)
+ if err == nil {
+ return resp, nil
+ }
+
+ // final attempt
+ if i == retry {
+ break
+ }
+
+ if err == context.Canceled {
+ return nil, err
+ }
+
+ delay := util.DefaultBackoff(float64(minRetryDelay), float64(maxRetryDelay), i)
+ timer, timerCancel := util.TimerWithCancel(delay)
+ select {
+ case <-timer.C:
+ case <-req.Context().Done():
+ timerCancel() // explicitly cancel the timer.
+ return nil, context.Canceled
+ }
+ }
+ return nil, err
+}
+
+func isJSONType(header http.Header) bool {
+ t, _, err := mime.ParseMediaType(header.Get("Content-Type"))
+ if err != nil {
+ return false
+ }
+
+ mediaType := strings.Split(t, "/")
+ if len(mediaType) != 2 {
+ return false
+ }
+
+ if mediaType[0] == "application" {
+ if mediaType[1] == "json" || strings.HasSuffix(mediaType[1], "+json") {
+ return true
+ }
+ }
+
+ return false
+}
+
+func isContentType(header http.Header, typ ...string) bool {
+ for _, t := range typ {
+ if strings.Contains(header.Get("Content-Type"), t) {
+ return true
+ }
+ }
+ return false
+}
+
+type httpSendCacheEntry struct {
+ response *ast.Value
+ error error
+}
+
+// The httpSendCache is used for intra-query caching of http.send results.
+type httpSendCache struct {
+ entries *util.HasherMap[ast.Value, httpSendCacheEntry]
+}
+
+func newHTTPSendCache() *httpSendCache {
+ return &httpSendCache{
+ entries: util.NewHasherMap[ast.Value, httpSendCacheEntry](ast.ValueEqual),
+ }
+}
+
+func (cache *httpSendCache) get(k ast.Value) *httpSendCacheEntry {
+ if v, ok := cache.entries.Get(k); ok {
+ return &v
+ }
+ return nil
+}
+
+func (cache *httpSendCache) putResponse(k ast.Value, v *ast.Value) {
+ cache.entries.Put(k, httpSendCacheEntry{response: v})
+}
+
+func (cache *httpSendCache) putError(k ast.Value, v error) {
+ cache.entries.Put(k, httpSendCacheEntry{error: v})
+}
+
+// In the BuiltinContext cache we only store a single entry that points to
+// our ValueMap which is the "real" http.send() cache.
+func getHTTPSendCache(bctx BuiltinContext) *httpSendCache {
+ raw, ok := bctx.Cache.Get(httpSendBuiltinCacheKey)
+ if !ok {
+ // Initialize if it isn't there
+ c := newHTTPSendCache()
+ bctx.Cache.Put(httpSendBuiltinCacheKey, c)
+ return c
+ }
+
+ c, ok := raw.(*httpSendCache)
+ if !ok {
+ return nil
+ }
+ return c
+}
+
+// checkHTTPSendCache checks for the given key's value in the cache
+func checkHTTPSendCache(bctx BuiltinContext, key ast.Object) (ast.Value, error) {
+ requestCache := getHTTPSendCache(bctx)
+ if requestCache == nil {
+ return nil, nil
+ }
+
+ v := requestCache.get(key)
+ if v != nil {
+ if v.error != nil {
+ return nil, v.error
+ }
+ if v.response != nil {
+ return *v.response, nil
+ }
+ // This should never happen
+ }
+
+ return nil, nil
+}
+
+func insertIntoHTTPSendCache(bctx BuiltinContext, key ast.Object, value ast.Value) {
+ requestCache := getHTTPSendCache(bctx)
+ if requestCache == nil {
+ // Should never happen.. if it does just skip caching the value
+ // FIXME: return error instead, to prevent inconsistencies?
+ return
+ }
+ requestCache.putResponse(key, &value)
+}
+
+func insertErrorIntoHTTPSendCache(bctx BuiltinContext, key ast.Object, err error) {
+ requestCache := getHTTPSendCache(bctx)
+ if requestCache == nil {
+ // Should never happen.. if it does just skip caching the value
+ // FIXME: return error instead, to prevent inconsistencies?
+ return
+ }
+ requestCache.putError(key, err)
+}
+
+// checkHTTPSendInterQueryCache checks for the given key's value in the inter-query cache
+func (c *interQueryCache) checkHTTPSendInterQueryCache() (ast.Value, error) {
+ requestCache := c.bctx.InterQueryBuiltinCache
+
+ cachedValue, found := requestCache.Get(c.key)
+ if !found {
+ return nil, nil
+ }
+
+ value, cerr := requestCache.Clone(cachedValue)
+ if cerr != nil {
+ return nil, handleHTTPSendErr(c.bctx, cerr)
+ }
+
+ c.bctx.Metrics.Counter(httpSendInterQueryCacheHits).Incr()
+ var cachedRespData *interQueryCacheData
+
+ switch v := value.(type) {
+ case *interQueryCacheValue:
+ var err error
+ cachedRespData, err = v.copyCacheData()
+ if err != nil {
+ return nil, err
+ }
+ case *interQueryCacheData:
+ cachedRespData = v
+ default:
+ return nil, nil
+ }
+
+ if getCurrentTime(c.bctx).Before(cachedRespData.ExpiresAt) {
+ return cachedRespData.formatToAST(c.forceJSONDecode, c.forceYAMLDecode)
+ }
+
+ var err error
+ c.httpReq, c.httpClient, err = createHTTPRequest(c.bctx, c.key)
+ if err != nil {
+ return nil, handleHTTPSendErr(c.bctx, err)
+ }
+
+ headers := parseResponseHeaders(cachedRespData.Headers)
+
+ // check with the server if the stale response is still up-to-date.
+ // If server returns a new response (ie. status_code=200), update the cache with the new response
+ // If server returns an unmodified response (ie. status_code=304), update the headers for the existing response
+ result, modified, err := revalidateCachedResponse(c.httpReq, c.httpClient, c.key, headers)
+ requestCache.Delete(c.key)
+ if err != nil || result == nil {
+ return nil, err
+ }
+
+ defer result.Body.Close()
+
+ if !modified {
+ // update the headers in the cached response with their corresponding values from the 304 (Not Modified) response
+ for headerName, values := range result.Header {
+ cachedRespData.Headers.Del(headerName)
+ for _, v := range values {
+ cachedRespData.Headers.Add(headerName, v)
+ }
+ }
+
+ if forceCaching(c.forceCacheParams) {
+ createdAt := getCurrentTime(c.bctx)
+ cachedRespData.ExpiresAt = createdAt.Add(time.Second * time.Duration(c.forceCacheParams.forceCacheDurationSeconds))
+ } else {
+ expiresAt, err := expiryFromHeaders(result.Header)
+ if err != nil {
+ return nil, err
+ }
+ cachedRespData.ExpiresAt = expiresAt
+ }
+
+ cachingMode, err := getCachingMode(c.key)
+ if err != nil {
+ return nil, err
+ }
+
+ var pcv cache.InterQueryCacheValue
+
+ if cachingMode == defaultCachingMode {
+ pcv, err = cachedRespData.toCacheValue()
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ pcv = cachedRespData
+ }
+
+ c.bctx.InterQueryBuiltinCache.InsertWithExpiry(c.key, pcv, cachedRespData.ExpiresAt)
+
+ return cachedRespData.formatToAST(c.forceJSONDecode, c.forceYAMLDecode)
+ }
+
+ newValue, respBody, err := formatHTTPResponseToAST(result, c.forceJSONDecode, c.forceYAMLDecode)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := insertIntoHTTPSendInterQueryCache(c.bctx, c.key, result, respBody, c.forceCacheParams); err != nil {
+ return nil, err
+ }
+
+ return newValue, nil
+}
+
+// insertIntoHTTPSendInterQueryCache inserts given key and value in the inter-query cache
+func insertIntoHTTPSendInterQueryCache(bctx BuiltinContext, key ast.Value, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) error {
+ if resp == nil || (!forceCaching(cacheParams) && !canStore(resp.Header)) || !cacheableCodes.Contains(ast.InternedTerm(resp.StatusCode)) {
+ return nil
+ }
+
+ requestCache := bctx.InterQueryBuiltinCache
+
+ obj, ok := key.(ast.Object)
+ if !ok {
+ return errors.New("interface conversion error")
+ }
+
+ cachingMode, err := getCachingMode(obj)
+ if err != nil {
+ return err
+ }
+
+ var pcv cache.InterQueryCacheValue
+ var pcvData *interQueryCacheData
+ if cachingMode == defaultCachingMode {
+ pcv, pcvData, err = newInterQueryCacheValue(bctx, resp, respBody, cacheParams)
+ } else {
+ pcvData, err = newInterQueryCacheData(bctx, resp, respBody, cacheParams)
+ pcv = pcvData
+ }
+
+ if err != nil {
+ return err
+ }
+
+ requestCache.InsertWithExpiry(key, pcv, pcvData.ExpiresAt)
+ return nil
+}
+
+func createKeys() {
+ for _, element := range allowedKeyNames {
+ term := ast.StringTerm(element)
+
+ allowedKeys.Add(term)
+ keyCache[element] = term
+ }
+}
+
+func createCacheableHTTPStatusCodes() {
+ for _, element := range cacheableHTTPStatusCodes {
+ cacheableCodes.Add(ast.InternedTerm(element))
+ }
+}
+
+func parseTimeout(timeoutVal ast.Value) (time.Duration, error) {
+ var timeout time.Duration
+ switch t := timeoutVal.(type) {
+ case ast.Number:
+ timeoutInt, ok := t.Int64()
+ if !ok {
+ return timeout, fmt.Errorf("invalid timeout number value %v, must be int64", timeoutVal)
+ }
+ return time.Duration(timeoutInt), nil
+ case ast.String:
+ // Support strings without a unit, treat them the same as just a number value (ns)
+ var err error
+ timeoutInt, err := strconv.ParseInt(string(t), 10, 64)
+ if err == nil {
+ return time.Duration(timeoutInt), nil
+ }
+
+ // Try parsing it as a duration (requires a supported units suffix)
+ timeout, err = time.ParseDuration(string(t))
+ if err != nil {
+ return timeout, fmt.Errorf("invalid timeout value %v: %s", timeoutVal, err)
+ }
+ return timeout, nil
+ default:
+ return timeout, builtins.NewOperandErr(1, "'timeout' must be one of {string, number} but got %s", ast.ValueName(t))
+ }
+}
+
+func getBoolValFromReqObj(req ast.Object, key *ast.Term) (bool, error) {
+ var b ast.Boolean
+ var ok bool
+ if v := req.Get(key); v != nil {
+ if b, ok = v.Value.(ast.Boolean); !ok {
+ return false, fmt.Errorf("invalid value for %v field", key.String())
+ }
+ }
+ return bool(b), nil
+}
+
+func getNumberValFromReqObj(req ast.Object, key *ast.Term) (int, error) {
+ term := req.Get(key)
+ if term == nil {
+ return 0, nil
+ }
+
+ if t, ok := term.Value.(ast.Number); ok {
+ num, ok := t.Int()
+ if !ok || num < 0 {
+ return 0, fmt.Errorf("invalid value %v for field %v", t.String(), key.String())
+ }
+ return num, nil
+ }
+
+ return 0, fmt.Errorf("invalid value %v for field %v", term.String(), key.String())
+}
+
+func getCachingMode(req ast.Object) (cachingMode, error) {
+ key := keyCache["caching_mode"]
+ var s ast.String
+ var ok bool
+ if v := req.Get(key); v != nil {
+ if s, ok = v.Value.(ast.String); !ok {
+ return "", fmt.Errorf("invalid value for %v field", key.String())
+ }
+
+ switch cachingMode(s) {
+ case defaultCachingMode, cachingModeDeserialized:
+ return cachingMode(s), nil
+ default:
+ return "", fmt.Errorf("invalid value specified for %v field: %v", key.String(), string(s))
+ }
+ }
+ return defaultCachingMode, nil
+}
+
+type interQueryCacheValue struct {
+ Data []byte
+}
+
+func newInterQueryCacheValue(bctx BuiltinContext, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) (*interQueryCacheValue, *interQueryCacheData, error) {
+ data, err := newInterQueryCacheData(bctx, resp, respBody, cacheParams)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ b, err := json.Marshal(data)
+ if err != nil {
+ return nil, nil, err
+ }
+ return &interQueryCacheValue{Data: b}, data, nil
+}
+
+func (cb interQueryCacheValue) Clone() (cache.InterQueryCacheValue, error) {
+ dup := make([]byte, len(cb.Data))
+ copy(dup, cb.Data)
+ return &interQueryCacheValue{Data: dup}, nil
+}
+
+func (cb interQueryCacheValue) SizeInBytes() int64 {
+ return int64(len(cb.Data))
+}
+
+func (cb *interQueryCacheValue) copyCacheData() (*interQueryCacheData, error) {
+ var res interQueryCacheData
+ err := util.UnmarshalJSON(cb.Data, &res)
+ if err != nil {
+ return nil, err
+ }
+ return &res, nil
+}
+
+type interQueryCacheData struct {
+ RespBody []byte
+ Status string
+ StatusCode int
+ Headers http.Header
+ ExpiresAt time.Time
+}
+
+func forceCaching(cacheParams *forceCacheParams) bool {
+ return cacheParams != nil && cacheParams.forceCacheDurationSeconds > 0
+}
+
+func expiryFromHeaders(headers http.Header) (time.Time, error) {
+ var expiresAt time.Time
+ maxAge, err := parseMaxAgeCacheDirective(parseCacheControlHeader(headers))
+ if err != nil {
+ return time.Time{}, err
+ }
+ if maxAge != -1 {
+ createdAt, err := getResponseHeaderDate(headers)
+ if err != nil {
+ return time.Time{}, err
+ }
+ expiresAt = createdAt.Add(time.Second * time.Duration(maxAge))
+ } else {
+ expiresAt = getResponseHeaderExpires(headers)
+ }
+ return expiresAt, nil
+}
+
+func newInterQueryCacheData(bctx BuiltinContext, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) (*interQueryCacheData, error) {
+ var expiresAt time.Time
+
+ if forceCaching(cacheParams) {
+ createdAt := getCurrentTime(bctx)
+ expiresAt = createdAt.Add(time.Second * time.Duration(cacheParams.forceCacheDurationSeconds))
+ } else {
+ var err error
+ expiresAt, err = expiryFromHeaders(resp.Header)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ cv := interQueryCacheData{
+ ExpiresAt: expiresAt,
+ RespBody: respBody,
+ Status: resp.Status,
+ StatusCode: resp.StatusCode,
+ Headers: resp.Header}
+
+ return &cv, nil
+}
+
+func (c *interQueryCacheData) formatToAST(forceJSONDecode, forceYAMLDecode bool) (ast.Value, error) {
+ return prepareASTResult(c.Headers, forceJSONDecode, forceYAMLDecode, c.RespBody, c.Status, c.StatusCode)
+}
+
+func (c *interQueryCacheData) toCacheValue() (*interQueryCacheValue, error) {
+ b, err := json.Marshal(c)
+ if err != nil {
+ return nil, err
+ }
+ return &interQueryCacheValue{Data: b}, nil
+}
+
+func (*interQueryCacheData) SizeInBytes() int64 {
+ return 0
+}
+
+func (c *interQueryCacheData) Clone() (cache.InterQueryCacheValue, error) {
+ dup := make([]byte, len(c.RespBody))
+ copy(dup, c.RespBody)
+
+ return &interQueryCacheData{
+ ExpiresAt: c.ExpiresAt,
+ RespBody: dup,
+ Status: c.Status,
+ StatusCode: c.StatusCode,
+ Headers: c.Headers.Clone()}, nil
+}
+
+type responseHeaders struct {
+ etag string // identifier for a specific version of the response
+ lastModified string // date and time response was last modified as per origin server
+}
+
+// deltaSeconds specifies a non-negative integer, representing
+// time in seconds: http://tools.ietf.org/html/rfc7234#section-1.2.1
+type deltaSeconds int32
+
+func parseResponseHeaders(headers http.Header) *responseHeaders {
+ result := responseHeaders{}
+
+ result.etag = headers.Get("etag")
+
+ result.lastModified = headers.Get("last-modified")
+
+ return &result
+}
+
+func revalidateCachedResponse(req *http.Request, client *http.Client, inputReqObj ast.Object, headers *responseHeaders) (*http.Response, bool, error) {
+ etag := headers.etag
+ lastModified := headers.lastModified
+
+ if etag == "" && lastModified == "" {
+ return nil, false, nil
+ }
+
+ cloneReq := req.Clone(req.Context())
+
+ if etag != "" {
+ cloneReq.Header.Set("if-none-match", etag)
+ }
+
+ if lastModified != "" {
+ cloneReq.Header.Set("if-modified-since", lastModified)
+ }
+
+ response, err := executeHTTPRequest(cloneReq, client, inputReqObj)
+ if err != nil {
+ return nil, false, err
+ }
+
+ switch response.StatusCode {
+ case http.StatusOK:
+ return response, true, nil
+
+ case http.StatusNotModified:
+ return response, false, nil
+ }
+ util.Close(response)
+ return nil, false, nil
+}
+
+func canStore(headers http.Header) bool {
+ ccHeaders := parseCacheControlHeader(headers)
+
+ // Check "no-store" cache directive
+ // The "no-store" response directive indicates that a cache MUST NOT
+ // store any part of either the immediate request or response.
+ if _, ok := ccHeaders["no-store"]; ok {
+ return false
+ }
+ return true
+}
+
+func getCurrentTime(bctx BuiltinContext) time.Time {
+ var current time.Time
+
+ value, err := ast.JSON(bctx.Time.Value)
+ if err != nil {
+ return current
+ }
+
+ valueNum, ok := value.(json.Number)
+ if !ok {
+ return current
+ }
+
+ valueNumInt, err := valueNum.Int64()
+ if err != nil {
+ return current
+ }
+
+ current = time.Unix(0, valueNumInt).UTC()
+ return current
+}
+
+func parseCacheControlHeader(headers http.Header) map[string]string {
+ ccDirectives := map[string]string{}
+ ccHeader := headers.Get("cache-control")
+
+ for part := range strings.SplitSeq(ccHeader, ",") {
+ part = strings.Trim(part, " ")
+ if part == "" {
+ continue
+ }
+ if strings.ContainsRune(part, '=') {
+ items := strings.Split(part, "=")
+ if len(items) != 2 {
+ continue
+ }
+ ccDirectives[strings.Trim(items[0], " ")] = strings.Trim(items[1], ",")
+ } else {
+ ccDirectives[part] = ""
+ }
+ }
+
+ return ccDirectives
+}
+
+func getResponseHeaderDate(headers http.Header) (date time.Time, err error) {
+ dateHeader := headers.Get("date")
+ if dateHeader == "" {
+ err = errors.New("no date header")
+ return
+ }
+ return http.ParseTime(dateHeader)
+}
+
+func getResponseHeaderExpires(headers http.Header) time.Time {
+ expiresHeader := headers.Get("expires")
+ if expiresHeader == "" {
+ return time.Time{}
+ }
+
+ date, err := http.ParseTime(expiresHeader)
+ if err != nil {
+ // servers can set `Expires: 0` which is an invalid date to indicate expired content
+ return time.Time{}
+ }
+
+ return date
+}
+
+// parseMaxAgeCacheDirective parses the max-age directive expressed in delta-seconds as per
+// https://tools.ietf.org/html/rfc7234#section-1.2.1
+func parseMaxAgeCacheDirective(cc map[string]string) (deltaSeconds, error) {
+ maxAge, ok := cc["max-age"]
+ if !ok {
+ return deltaSeconds(-1), nil
+ }
+
+ val, err := strconv.ParseUint(maxAge, 10, 32)
+ if err != nil {
+ if numError, ok := err.(*strconv.NumError); ok {
+ if numError.Err == strconv.ErrRange {
+ return deltaSeconds(math.MaxInt32), nil
+ }
+ }
+ return deltaSeconds(-1), err
+ }
+
+ if val > math.MaxInt32 {
+ return deltaSeconds(math.MaxInt32), nil
+ }
+ return deltaSeconds(val), nil
+}
+
+func formatHTTPResponseToAST(resp *http.Response, forceJSONDecode, forceYAMLDecode bool) (ast.Value, []byte, error) {
+
+ resultRawBody, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ resultObj, err := prepareASTResult(resp.Header, forceJSONDecode, forceYAMLDecode, resultRawBody, resp.Status, resp.StatusCode)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return resultObj, resultRawBody, nil
+}
+
+func prepareASTResult(headers http.Header, forceJSONDecode, forceYAMLDecode bool, body []byte, status string, statusCode int) (ast.Value, error) {
+ var resultBody any
+
+ // If the response body cannot be JSON/YAML decoded,
+ // an error will not be returned. Instead, the "body" field
+ // in the result will be null.
+ switch {
+ case forceJSONDecode || isJSONType(headers):
+ _ = util.UnmarshalJSON(body, &resultBody)
+ case forceYAMLDecode || isContentType(headers, "application/yaml", "application/x-yaml"):
+ _ = util.Unmarshal(body, &resultBody)
+ }
+
+ result := make(map[string]any)
+ result["status"] = status
+ result["status_code"] = statusCode
+ result["body"] = resultBody
+ result["raw_body"] = string(body)
+ result["headers"] = getResponseHeaders(headers)
+
+ resultObj, err := ast.InterfaceToValue(result)
+ if err != nil {
+ return nil, err
+ }
+
+ return resultObj, nil
+}
+
+func getResponseHeaders(headers http.Header) map[string]any {
+ respHeaders := map[string]any{}
+ for headerName, values := range headers {
+ var respValues []any
+ for _, v := range values {
+ respValues = append(respValues, v)
+ }
+ respHeaders[strings.ToLower(headerName)] = respValues
+ }
+ return respHeaders
+}
+
+// httpRequestExecutor defines an interface for the http send cache
+type httpRequestExecutor interface {
+ CheckCache() (ast.Value, error)
+ InsertIntoCache(value *http.Response) (ast.Value, error)
+ InsertErrorIntoCache(err error)
+ ExecuteHTTPRequest() (*http.Response, error)
+}
+
+// newHTTPRequestExecutor returns a new HTTP request executor that wraps either an inter-query or
+// intra-query cache implementation
+func newHTTPRequestExecutor(bctx BuiltinContext, req ast.Object, key ast.Object) (httpRequestExecutor, error) {
+ useInterQueryCache, forceCacheParams, err := useInterQueryCache(req)
+ if err != nil {
+ return nil, handleHTTPSendErr(bctx, err)
+ }
+
+ if useInterQueryCache && bctx.InterQueryBuiltinCache != nil {
+ return newInterQueryCache(bctx, req, key, forceCacheParams)
+ }
+ return newIntraQueryCache(bctx, req, key)
+}
+
+type interQueryCache struct {
+ bctx BuiltinContext
+ req ast.Object
+ key ast.Object
+ httpReq *http.Request
+ httpClient *http.Client
+ forceJSONDecode bool
+ forceYAMLDecode bool
+ forceCacheParams *forceCacheParams
+}
+
+func newInterQueryCache(bctx BuiltinContext, req ast.Object, key ast.Object, forceCacheParams *forceCacheParams) (*interQueryCache, error) {
+ return &interQueryCache{bctx: bctx, req: req, key: key, forceCacheParams: forceCacheParams}, nil
+}
+
+// CheckCache checks the cache for the value of the key set on this object
+func (c *interQueryCache) CheckCache() (ast.Value, error) {
+ var err error
+
+ // Checking the intra-query cache first ensures consistency of errors and HTTP responses within a query.
+ resp, err := checkHTTPSendCache(c.bctx, c.key)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ return resp, nil
+ }
+
+ c.forceJSONDecode, err = getBoolValFromReqObj(c.key, keyCache["force_json_decode"])
+ if err != nil {
+ return nil, handleHTTPSendErr(c.bctx, err)
+ }
+ c.forceYAMLDecode, err = getBoolValFromReqObj(c.key, keyCache["force_yaml_decode"])
+ if err != nil {
+ return nil, handleHTTPSendErr(c.bctx, err)
+ }
+
+ resp, err = c.checkHTTPSendInterQueryCache()
+ // Always insert the result of the inter-query cache into the intra-query cache, to maintain consistency within the same query.
+ if err != nil {
+ insertErrorIntoHTTPSendCache(c.bctx, c.key, err)
+ }
+ if resp != nil {
+ insertIntoHTTPSendCache(c.bctx, c.key, resp)
+ }
+ return resp, err
+}
+
+// InsertIntoCache inserts the key set on this object into the cache with the given value
+func (c *interQueryCache) InsertIntoCache(value *http.Response) (ast.Value, error) {
+ result, respBody, err := formatHTTPResponseToAST(value, c.forceJSONDecode, c.forceYAMLDecode)
+ if err != nil {
+ return nil, handleHTTPSendErr(c.bctx, err)
+ }
+
+ // Always insert into the intra-query cache, to maintain consistency within the same query.
+ insertIntoHTTPSendCache(c.bctx, c.key, result)
+
+ // We ignore errors when populating the inter-query cache, because we've already populated the intra-cache,
+ // and query consistency is our primary concern.
+ _ = insertIntoHTTPSendInterQueryCache(c.bctx, c.key, value, respBody, c.forceCacheParams)
+ return result, nil
+}
+
+func (c *interQueryCache) InsertErrorIntoCache(err error) {
+ insertErrorIntoHTTPSendCache(c.bctx, c.key, err)
+}
+
+// ExecuteHTTPRequest executes a HTTP request
+func (c *interQueryCache) ExecuteHTTPRequest() (*http.Response, error) {
+ var err error
+ c.httpReq, c.httpClient, err = createHTTPRequest(c.bctx, c.req)
+ if err != nil {
+ return nil, handleHTTPSendErr(c.bctx, err)
+ }
+
+ // Increment counter for actual network requests
+ c.bctx.Metrics.Counter(httpSendNetworkRequests).Incr()
+
+ return executeHTTPRequest(c.httpReq, c.httpClient, c.req)
+}
+
+type intraQueryCache struct {
+ bctx BuiltinContext
+ req ast.Object
+ key ast.Object
+}
+
+func newIntraQueryCache(bctx BuiltinContext, req ast.Object, key ast.Object) (*intraQueryCache, error) {
+ return &intraQueryCache{bctx: bctx, req: req, key: key}, nil
+}
+
+// CheckCache checks the cache for the value of the key set on this object
+func (c *intraQueryCache) CheckCache() (ast.Value, error) {
+ return checkHTTPSendCache(c.bctx, c.key)
+}
+
+// InsertIntoCache inserts the key set on this object into the cache with the given value
+func (c *intraQueryCache) InsertIntoCache(value *http.Response) (ast.Value, error) {
+ forceJSONDecode, err := getBoolValFromReqObj(c.key, keyCache["force_json_decode"])
+ if err != nil {
+ return nil, handleHTTPSendErr(c.bctx, err)
+ }
+ forceYAMLDecode, err := getBoolValFromReqObj(c.key, keyCache["force_yaml_decode"])
+ if err != nil {
+ return nil, handleHTTPSendErr(c.bctx, err)
+ }
+
+ result, _, err := formatHTTPResponseToAST(value, forceJSONDecode, forceYAMLDecode)
+ if err != nil {
+ return nil, handleHTTPSendErr(c.bctx, err)
+ }
+
+ if cacheableCodes.Contains(ast.InternedTerm(value.StatusCode)) {
+ insertIntoHTTPSendCache(c.bctx, c.key, result)
+ }
+
+ return result, nil
+}
+
+func (c *intraQueryCache) InsertErrorIntoCache(err error) {
+ insertErrorIntoHTTPSendCache(c.bctx, c.key, err)
+}
+
+// ExecuteHTTPRequest executes a HTTP request
+func (c *intraQueryCache) ExecuteHTTPRequest() (*http.Response, error) {
+ httpReq, httpClient, err := createHTTPRequest(c.bctx, c.req)
+ if err != nil {
+ return nil, handleHTTPSendErr(c.bctx, err)
+ }
+
+ // Increment counter for actual network requests
+ c.bctx.Metrics.Counter(httpSendNetworkRequests).Incr()
+
+ return executeHTTPRequest(httpReq, httpClient, c.req)
+}
+
+func useInterQueryCache(req ast.Object) (bool, *forceCacheParams, error) {
+ value, err := getBoolValFromReqObj(req, keyCache["cache"])
+ if err != nil {
+ return false, nil, err
+ }
+
+ valueForceCache, err := getBoolValFromReqObj(req, keyCache["force_cache"])
+ if err != nil {
+ return false, nil, err
+ }
+
+ if valueForceCache {
+ forceCacheParams, err := newForceCacheParams(req)
+ return true, forceCacheParams, err
+ }
+
+ return value, nil, nil
+}
+
+type forceCacheParams struct {
+ forceCacheDurationSeconds int32
+}
+
+func newForceCacheParams(req ast.Object) (*forceCacheParams, error) {
+ term := req.Get(keyCache["force_cache_duration_seconds"])
+ if term == nil {
+ return nil, errors.New("'force_cache' set but 'force_cache_duration_seconds' parameter is missing")
+ }
+
+ forceCacheDurationSeconds := term.String()
+
+ value, err := strconv.ParseInt(forceCacheDurationSeconds, 10, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ return &forceCacheParams{forceCacheDurationSeconds: int32(value)}, nil
+}
+
+func getRaiseErrorValue(req ast.Object) (bool, error) {
+ result := ast.Boolean(true)
+ var ok bool
+ if v := req.Get(keyCache["raise_error"]); v != nil {
+ if result, ok = v.Value.(ast.Boolean); !ok {
+ return false, errors.New("invalid value for raise_error field")
+ }
+ }
+ return bool(result), nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/http_fixup.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup.go
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/topdown/http_fixup.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup.go
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/http_fixup_darwin.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup_darwin.go
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/topdown/http_fixup_darwin.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup_darwin.go
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/input.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/input.go
similarity index 95%
rename from vendor/github.com/open-policy-agent/opa/topdown/input.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/input.go
index cb70aeb71e..ec37b36451 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/input.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/input.go
@@ -5,12 +5,12 @@
package topdown
import (
- "fmt"
+ "errors"
- "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/v1/ast"
)
-var errBadPath = fmt.Errorf("bad document path")
+var errBadPath = errors.New("bad document path")
func mergeTermWithValues(exist *ast.Term, pairs [][2]*ast.Term) (*ast.Term, error) {
diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/instrumentation.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/instrumentation.go
new file mode 100644
index 0000000000..93da1d0022
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/instrumentation.go
@@ -0,0 +1,63 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import "github.com/open-policy-agent/opa/v1/metrics"
+
+const (
+ evalOpPlug = "eval_op_plug"
+ evalOpResolve = "eval_op_resolve"
+ evalOpRuleIndex = "eval_op_rule_index"
+ evalOpBuiltinCall = "eval_op_builtin_call"
+ evalOpVirtualCacheHit = "eval_op_virtual_cache_hit"
+ evalOpVirtualCacheMiss = "eval_op_virtual_cache_miss"
+ evalOpBaseCacheHit = "eval_op_base_cache_hit"
+ evalOpBaseCacheMiss = "eval_op_base_cache_miss"
+ evalOpComprehensionCacheSkip = "eval_op_comprehension_cache_skip"
+ evalOpComprehensionCacheBuild = "eval_op_comprehension_cache_build"
+ evalOpComprehensionCacheHit = "eval_op_comprehension_cache_hit"
+ evalOpComprehensionCacheMiss = "eval_op_comprehension_cache_miss"
+ partialOpSaveUnify = "partial_op_save_unify"
+ partialOpSaveSetContains = "partial_op_save_set_contains"
+ partialOpSaveSetContainsRec = "partial_op_save_set_contains_rec"
+ partialOpCopyPropagation = "partial_op_copy_propagation"
+)
+
+// Instrumentation implements helper functions to instrument query evaluation
+// to diagnose performance issues. Instrumentation may be expensive in some
+// cases, so it is disabled by default.
+type Instrumentation struct {
+ m metrics.Metrics
+}
+
+// NewInstrumentation returns a new Instrumentation object. Performance
+// diagnostics recorded on this Instrumentation object will stored in m.
+func NewInstrumentation(m metrics.Metrics) *Instrumentation {
+ return &Instrumentation{
+ m: m,
+ }
+}
+
+func (instr *Instrumentation) startTimer(name string) {
+ if instr == nil {
+ return
+ }
+ instr.m.Timer(name).Start()
+}
+
+func (instr *Instrumentation) stopTimer(name string) {
+ if instr == nil {
+ return
+ }
+ delta := instr.m.Timer(name).Stop()
+ instr.m.Histogram(name).Update(delta)
+}
+
+func (instr *Instrumentation) counterIncr(name string) {
+ if instr == nil {
+ return
+ }
+ instr.m.Counter(name).Incr()
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/json.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/json.go
similarity index 94%
rename from vendor/github.com/open-policy-agent/opa/topdown/json.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/json.go
index 8a5d232836..2c7d642883 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/json.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/json.go
@@ -5,12 +5,12 @@
package topdown
import (
+ "errors"
"fmt"
- "strconv"
"strings"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
"github.com/open-policy-agent/opa/internal/edittree"
)
@@ -98,11 +98,11 @@ func jsonRemove(a *ast.Term, b *ast.Term) (*ast.Term, error) {
// When indexes are removed we shift left to close empty spots in the array
// as per the JSON patch spec.
newArray := ast.NewArray()
- for i := 0; i < aValue.Len(); i++ {
+ for i := range aValue.Len() {
v := aValue.Elem(i)
// recurse and add the diff of sub objects as needed
// Note: Keys in b will be strings for the index, eg path /a/1/b => {"a": {"1": {"b": null}}}
- diffValue, err := jsonRemove(v, bObj.Get(ast.StringTerm(strconv.Itoa(i))))
+ diffValue, err := jsonRemove(v, bObj.Get(ast.InternedIntegerString(i)))
if err != nil {
return nil, err
}
@@ -144,7 +144,7 @@ func getJSONPaths(operand ast.Value) ([]ast.Ref, error) {
switch v := operand.(type) {
case *ast.Array:
- for i := 0; i < v.Len(); i++ {
+ for i := range v.Len() {
filter, err := parsePath(v.Elem(i))
if err != nil {
return nil, err
@@ -189,7 +189,7 @@ func parsePath(path *ast.Term) (ast.Ref, error) {
pathSegments = append(pathSegments, term)
})
default:
- return nil, builtins.NewOperandErr(2, "must be one of {set, array} containing string paths or array of path segments but got %v", ast.TypeName(p))
+ return nil, builtins.NewOperandErr(2, "must be one of {set, array} containing string paths or array of path segments but got %v", ast.ValueName(p))
}
return pathSegments, nil
@@ -231,7 +231,7 @@ func pathsToObject(paths []ast.Ref) ast.Object {
}
if !done {
- node.Insert(path[len(path)-1], ast.NullTerm())
+ node.Insert(path[len(path)-1], ast.InternedNullTerm)
}
}
@@ -263,7 +263,7 @@ func getPatch(o ast.Object) (jsonPatch, error) {
}
op, ok := opTerm.Value.(ast.String)
if !ok {
- return out, fmt.Errorf("attribute 'op' must be a string")
+ return out, errors.New("attribute 'op' must be a string")
}
out.op = string(op)
if _, found := validOps[out.op]; !found {
@@ -302,10 +302,10 @@ func getPatch(o ast.Object) (jsonPatch, error) {
func applyPatches(source *ast.Term, operations *ast.Array) (*ast.Term, error) {
et := edittree.NewEditTree(source)
- for i := 0; i < operations.Len(); i++ {
+ for i := range operations.Len() {
object, ok := operations.Elem(i).Value.(ast.Object)
if !ok {
- return nil, fmt.Errorf("must be an array of JSON-Patch objects, but at least one element is not an object")
+ return nil, errors.New("must be an array of JSON-Patch objects, but at least one element is not an object")
}
patch, err := getPatch(object)
if err != nil {
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/jsonschema.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/jsonschema.go
similarity index 96%
rename from vendor/github.com/open-policy-agent/opa/topdown/jsonschema.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/jsonschema.go
index d319bc0b0d..699f1d0d99 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/jsonschema.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/jsonschema.go
@@ -8,8 +8,8 @@ import (
"encoding/json"
"errors"
- "github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/internal/gojsonschema"
+ "github.com/open-policy-agent/opa/v1/ast"
)
// astValueToJSONSchemaLoader converts a value to JSON Loader.
@@ -29,7 +29,7 @@ func astValueToJSONSchemaLoader(value ast.Value) (gojsonschema.JSONLoader, error
loader = gojsonschema.NewStringLoader(string(x))
case ast.Object:
// In case of object serialize it to JSON representation.
- var data interface{}
+ var data any
data, err = ast.JSON(value)
if err != nil {
return nil, err
@@ -44,7 +44,7 @@ func astValueToJSONSchemaLoader(value ast.Value) (gojsonschema.JSONLoader, error
}
func newResultTerm(valid bool, data *ast.Term) *ast.Term {
- return ast.ArrayTerm(ast.BooleanTerm(valid), data)
+ return ast.ArrayTerm(ast.InternedTerm(valid), data)
}
// builtinJSONSchemaVerify accepts 1 argument which can be string or object and checks if it is valid JSON schema.
@@ -61,7 +61,7 @@ func builtinJSONSchemaVerify(_ BuiltinContext, operands []*ast.Term, iter func(*
return iter(newResultTerm(false, ast.StringTerm("jsonschema: "+err.Error())))
}
- return iter(newResultTerm(true, ast.NullTerm()))
+ return iter(newResultTerm(true, ast.InternedNullTerm))
}
// builtinJSONMatchSchema accepts 2 arguments both can be string or object and verifies if the document matches the JSON schema.
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/net.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/net.go
similarity index 93%
rename from vendor/github.com/open-policy-agent/opa/topdown/net.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/net.go
index 534520529a..17ed779844 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/net.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/net.go
@@ -8,8 +8,8 @@ import (
"net"
"strings"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
type lookupIPAddrCacheKey string
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/numbers.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go
similarity index 57%
rename from vendor/github.com/open-policy-agent/opa/topdown/numbers.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go
index 27f3156b8a..1e05a247a9 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/numbers.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go
@@ -5,18 +5,25 @@
package topdown
import (
+ "errors"
"fmt"
"math/big"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
type randIntCachingKey string
-var one = big.NewInt(1)
+var (
+ zero = big.NewInt(0)
+ one = big.NewInt(1)
+)
func builtinNumbersRange(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ if canGenerateCheapRange(operands) {
+ return generateCheapRange(operands, 1, iter)
+ }
x, err := builtins.BigIntOperand(operands[0].Value, 1)
if err != nil {
@@ -37,6 +44,14 @@ func builtinNumbersRange(bctx BuiltinContext, operands []*ast.Term, iter func(*a
}
func builtinNumbersRangeStep(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ if canGenerateCheapRangeStep(operands) {
+ step, _ := builtins.IntOperand(operands[2].Value, 3)
+ if step <= 0 {
+ return errors.New("numbers.range_step: step must be a positive integer")
+ }
+
+ return generateCheapRange(operands, step, iter)
+ }
x, err := builtins.BigIntOperand(operands[0].Value, 1)
if err != nil {
@@ -53,8 +68,8 @@ func builtinNumbersRangeStep(bctx BuiltinContext, operands []*ast.Term, iter fun
return err
}
- if step.Cmp(big.NewInt(0)) <= 0 {
- return fmt.Errorf("numbers.range_step: step must be a positive number above zero")
+ if step.Cmp(zero) <= 0 {
+ return errors.New("numbers.range_step: step must be a positive integer")
}
ast, err := generateRange(bctx, x, y, step, "numbers.range_step")
@@ -65,8 +80,58 @@ func builtinNumbersRangeStep(bctx BuiltinContext, operands []*ast.Term, iter fun
return iter(ast)
}
-func generateRange(bctx BuiltinContext, x *big.Int, y *big.Int, step *big.Int, funcName string) (*ast.Term, error) {
+func canGenerateCheapRange(operands []*ast.Term) bool {
+ x, err := builtins.IntOperand(operands[0].Value, 1)
+ if err != nil || !ast.HasInternedIntNumberTerm(x) {
+ return false
+ }
+
+ y, err := builtins.IntOperand(operands[1].Value, 2)
+ if err != nil || !ast.HasInternedIntNumberTerm(y) {
+ return false
+ }
+
+ return true
+}
+
+func canGenerateCheapRangeStep(operands []*ast.Term) bool {
+ if canGenerateCheapRange(operands) {
+ step, err := builtins.IntOperand(operands[1].Value, 3)
+ if err == nil && ast.HasInternedIntNumberTerm(step) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func generateCheapRange(operands []*ast.Term, step int, iter func(*ast.Term) error) error {
+ x, err := builtins.IntOperand(operands[0].Value, 1)
+ if err != nil {
+ return err
+ }
+ y, err := builtins.IntOperand(operands[1].Value, 2)
+ if err != nil {
+ return err
+ }
+
+ terms := make([]*ast.Term, 0, y+1)
+
+ if x <= y {
+ for i := x; i <= y; i += step {
+ terms = append(terms, ast.InternedTerm(i))
+ }
+ } else {
+ for i := x; i >= y; i -= step {
+ terms = append(terms, ast.InternedTerm(i))
+ }
+ }
+
+ return iter(ast.ArrayTerm(terms...))
+}
+
+func generateRange(bctx BuiltinContext, x *big.Int, y *big.Int, step *big.Int, funcName string) (*ast.Term, error) {
cmp := x.Cmp(y)
comp := func(i *big.Int, y *big.Int) bool { return i.Cmp(y) <= 0 }
@@ -81,7 +146,7 @@ func generateRange(bctx BuiltinContext, x *big.Int, y *big.Int, step *big.Int, f
haltErr := Halt{
Err: &Error{
Code: CancelErr,
- Message: fmt.Sprintf("%s: timed out before generating all numbers in range", funcName),
+ Message: funcName + ": timed out before generating all numbers in range",
},
}
@@ -96,11 +161,9 @@ func generateRange(bctx BuiltinContext, x *big.Int, y *big.Int, step *big.Int, f
}
func builtinRandIntn(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
-
strOp, err := builtins.StringOperand(operands[0].Value, 1)
if err != nil {
return err
-
}
n, err := builtins.IntOperand(operands[1].Value, 2)
@@ -109,14 +172,14 @@ func builtinRandIntn(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.T
}
if n == 0 {
- return iter(ast.IntNumberTerm(0))
+ return iter(ast.InternedTerm(0))
}
if n < 0 {
n = -n
}
- var key = randIntCachingKey(fmt.Sprintf("%s-%d", strOp, n))
+ key := randIntCachingKey(fmt.Sprintf("%s-%d", strOp, n))
if val, ok := bctx.Cache.Get(key); ok {
return iter(val.(*ast.Term))
@@ -126,7 +189,7 @@ func builtinRandIntn(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.T
if err != nil {
return err
}
- result := ast.IntNumberTerm(r.Intn(n))
+ result := ast.InternedTerm(r.Intn(n))
bctx.Cache.Put(key, result)
return iter(result)
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/object.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/object.go
similarity index 89%
rename from vendor/github.com/open-policy-agent/opa/topdown/object.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/object.go
index ba5d77ff37..c6fbe7022f 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/object.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/object.go
@@ -5,9 +5,9 @@
package topdown
import (
- "github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/internal/ref"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
func builtinObjectUnion(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -21,6 +21,16 @@ func builtinObjectUnion(_ BuiltinContext, operands []*ast.Term, iter func(*ast.T
return err
}
+ if objA.Len() == 0 {
+ return iter(operands[1])
+ }
+ if objB.Len() == 0 {
+ return iter(operands[0])
+ }
+ if objA.Compare(objB) == 0 {
+ return iter(operands[0])
+ }
+
r := mergeWithOverwrite(objA, objB)
return iter(ast.NewTerm(r))
@@ -50,9 +60,6 @@ func builtinObjectUnionN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.
return builtins.NewOperandElementErr(1, arr, arr.Elem(i).Value, "object")
}
mergewithOverwriteInPlace(result, o, frozenKeys)
- if err != nil {
- return err
- }
}
return iter(ast.NewTerm(result))
@@ -95,7 +102,7 @@ func builtinObjectFilter(_ BuiltinContext, operands []*ast.Term, iter func(*ast.
filterObj := ast.NewObject()
keys.Foreach(func(key *ast.Term) {
- filterObj.Insert(key, ast.NullTerm())
+ filterObj.Insert(key, ast.InternedNullTerm)
})
// Actually do the filtering
@@ -114,8 +121,8 @@ func builtinObjectGet(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter
}
// if the get key is not an array, attempt to get the top level key for the operand value in the object
- path, err := builtins.ArrayOperand(operands[1].Value, 2)
- if err != nil {
+ path, ok := operands[1].Value.(*ast.Array)
+ if !ok {
if ret := object.Get(operands[1]); ret != nil {
return iter(ret)
}
@@ -143,38 +150,28 @@ func builtinObjectKeys(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te
if err != nil {
return err
}
+ if object.Len() == 0 {
+ return iter(ast.InternedEmptySet)
+ }
- keys := ast.SetTerm(object.Keys()...)
-
- return iter(keys)
+ return iter(ast.SetTerm(object.Keys()...))
}
// getObjectKeysParam returns a set of key values
// from a supplied ast array, object, set value
func getObjectKeysParam(arrayOrSet ast.Value) (ast.Set, error) {
- keys := ast.NewSet()
-
switch v := arrayOrSet.(type) {
case *ast.Array:
- _ = v.Iter(func(f *ast.Term) error {
- keys.Add(f)
- return nil
- })
+ keys := ast.NewSet()
+ v.Foreach(keys.Add)
+ return keys, nil
case ast.Set:
- _ = v.Iter(func(f *ast.Term) error {
- keys.Add(f)
- return nil
- })
+ return ast.NewSet(v.Slice()...), nil
case ast.Object:
- _ = v.Iter(func(k *ast.Term, _ *ast.Term) error {
- keys.Add(k)
- return nil
- })
- default:
- return nil, builtins.NewOperandTypeErr(2, arrayOrSet, "object", "set", "array")
+ return ast.NewSet(v.Keys()...), nil
}
- return keys, nil
+ return nil, builtins.NewOperandTypeErr(2, arrayOrSet, "object", "set", "array")
}
func mergeWithOverwrite(objA, objB ast.Object) ast.Object {
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/parse.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/parse.go
similarity index 91%
rename from vendor/github.com/open-policy-agent/opa/topdown/parse.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/parse.go
index c46222b413..464e0141a2 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/parse.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/parse.go
@@ -9,8 +9,8 @@ import (
"encoding/json"
"fmt"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
func builtinRegoParseModule(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -25,6 +25,7 @@ func builtinRegoParseModule(_ BuiltinContext, operands []*ast.Term, iter func(*a
return err
}
+ // FIXME: Use configured rego-version?
module, err := ast.ParseModule(string(filename), string(input))
if err != nil {
return err
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/parse_bytes.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/parse_bytes.go
similarity index 74%
rename from vendor/github.com/open-policy-agent/opa/topdown/parse_bytes.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/parse_bytes.go
index 0cd4bc193a..cd36b87b17 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/parse_bytes.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/parse_bytes.go
@@ -10,8 +10,8 @@ import (
"strings"
"unicode"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
const (
@@ -109,7 +109,7 @@ func builtinNumBytes(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term
func formatString(s ast.String) string {
str := string(s)
lower := strings.ToLower(str)
- return strings.Replace(lower, "\"", "", -1)
+ return strings.ReplaceAll(lower, "\"", "")
}
// Splits the string into a number string à la "10" or "10.2" and a unit
@@ -121,21 +121,35 @@ func extractNumAndUnit(s string) (string, string) {
}
firstNonNumIdx := -1
- for idx, r := range s {
- if !isNum(r) {
+ for idx := 0; idx < len(s); idx++ {
+ r := rune(s[idx])
+ // Identify the first non-numeric character, marking the boundary between the number and the unit.
+ if !isNum(r) && r != 'e' && r != 'E' && r != '+' && r != '-' {
firstNonNumIdx = idx
break
}
+ if r == 'e' || r == 'E' {
+ // Check if the next character is a valid digit or +/- for scientific notation
+ if idx == len(s)-1 || (!unicode.IsDigit(rune(s[idx+1])) && rune(s[idx+1]) != '+' && rune(s[idx+1]) != '-') {
+ firstNonNumIdx = idx
+ break
+ }
+ // Skip the next character if it is '+' or '-'
+ if idx+1 < len(s) && (s[idx+1] == '+' || s[idx+1] == '-') {
+ idx++
+ }
+ }
}
- if firstNonNumIdx == -1 { // only digits and '.'
+ if firstNonNumIdx == -1 { // only digits, '.', or valid scientific notation
return s, ""
}
if firstNonNumIdx == 0 { // only units (starts with non-digit)
return "", s
}
- return s[0:firstNonNumIdx], s[firstNonNumIdx:]
+ // Return the number and the rest as the unit
+ return s[:firstNonNumIdx], s[firstNonNumIdx:]
}
func init() {
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/parse_units.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/parse_units.go
similarity index 95%
rename from vendor/github.com/open-policy-agent/opa/topdown/parse_units.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/parse_units.go
index daf240214c..44aec86299 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/parse_units.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/parse_units.go
@@ -10,8 +10,8 @@ import (
"math/big"
"strings"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
// Binary Si unit constants are borrowed from topdown/parse_bytes
@@ -50,7 +50,7 @@ func builtinUnits(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e
// We remove escaped quotes from strings here to retain parity with units.parse_bytes.
s := string(raw)
- s = strings.Replace(s, "\"", "", -1)
+ s = strings.ReplaceAll(s, "\"", "")
if strings.Contains(s, " ") {
return errIncludesSpaces
diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/print.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/print.go
new file mode 100644
index 0000000000..f852f3e320
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/print.go
@@ -0,0 +1,86 @@
+// Copyright 2021 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/topdown/print"
+)
+
+func NewPrintHook(w io.Writer) print.Hook {
+ return printHook{w: w}
+}
+
+type printHook struct {
+ w io.Writer
+}
+
+func (h printHook) Print(_ print.Context, msg string) error {
+ _, err := fmt.Fprintln(h.w, msg)
+ return err
+}
+
+func builtinPrint(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+
+ if bctx.PrintHook == nil {
+ return iter(nil)
+ }
+
+ arr, err := builtins.ArrayOperand(operands[0].Value, 1)
+ if err != nil {
+ return err
+ }
+
+ buf := make([]string, arr.Len())
+
+ err = builtinPrintCrossProductOperands(bctx, buf, arr, 0, func(buf []string) error {
+ pctx := print.Context{
+ Context: bctx.Context,
+ Location: bctx.Location,
+ }
+ return bctx.PrintHook.Print(pctx, strings.Join(buf, " "))
+ })
+ if err != nil {
+ return err
+ }
+
+ return iter(nil)
+}
+
+func builtinPrintCrossProductOperands(bctx BuiltinContext, buf []string, operands *ast.Array, i int, f func([]string) error) error {
+
+ if i >= operands.Len() {
+ return f(buf)
+ }
+
+ xs, ok := operands.Elem(i).Value.(ast.Set)
+ if !ok {
+ return Halt{Err: internalErr(bctx.Location, fmt.Sprintf("illegal argument type: %v", ast.ValueName(operands.Elem(i).Value)))}
+ }
+
+ if xs.Len() == 0 {
+ buf[i] = ""
+ return builtinPrintCrossProductOperands(bctx, buf, operands, i+1, f)
+ }
+
+ return xs.Iter(func(x *ast.Term) error {
+ switch v := x.Value.(type) {
+ case ast.String:
+ buf[i] = string(v)
+ default:
+ buf[i] = v.String()
+ }
+ return builtinPrintCrossProductOperands(bctx, buf, operands, i+1, f)
+ })
+}
+
+func init() {
+ RegisterBuiltinFunc(ast.InternalPrint.Name, builtinPrint)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/print/print.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/print/print.go
new file mode 100644
index 0000000000..ce684ae945
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/print/print.go
@@ -0,0 +1,21 @@
+package print
+
+import (
+ "context"
+
+ "github.com/open-policy-agent/opa/v1/ast"
+)
+
+// Context provides the Hook implementation context about the print() call.
+type Context struct {
+ Context context.Context // request context passed when query executed
+ Location *ast.Location // location of print call
+}
+
+// Hook defines the interface that callers can implement to receive print
+// statement outputs. If the hook returns an error, it will be surfaced if
+// strict builtin error checking is enabled (otherwise, it will not halt
+// execution.)
+type Hook interface {
+ Print(Context, string) error
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/providers.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/providers.go
similarity index 97%
rename from vendor/github.com/open-policy-agent/opa/topdown/providers.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/providers.go
index 77db917982..dd84026e4b 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/providers.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/providers.go
@@ -9,9 +9,9 @@ import (
"net/url"
"time"
- "github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/internal/providers/aws"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
var awsRequiredConfigKeyNames = ast.NewSet(
@@ -119,9 +119,6 @@ func builtinAWSSigV4SignReq(_ BuiltinContext, operands []*ast.Term, iter func(*a
}
signingTimestamp = time.Unix(0, ts)
- if err != nil {
- return err
- }
// Make sure our required keys exist!
// This check is stricter than required, but better to break here than downstream.
diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/query.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/query.go
new file mode 100644
index 0000000000..aadcc060cf
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/query.go
@@ -0,0 +1,639 @@
+package topdown
+
+import (
+ "context"
+ "crypto/rand"
+ "io"
+ "sort"
+ "time"
+
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/metrics"
+ "github.com/open-policy-agent/opa/v1/resolver"
+ "github.com/open-policy-agent/opa/v1/storage"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/topdown/cache"
+ "github.com/open-policy-agent/opa/v1/topdown/copypropagation"
+ "github.com/open-policy-agent/opa/v1/topdown/print"
+ "github.com/open-policy-agent/opa/v1/tracing"
+)
+
+// QueryResultSet represents a collection of results returned by a query.
+type QueryResultSet []QueryResult
+
+// QueryResult represents a single result returned by a query. The result
+// contains bindings for all variables that appear in the query.
+type QueryResult map[ast.Var]*ast.Term
+
+// Query provides a configurable interface for performing query evaluation.
+type Query struct {
+ seed io.Reader
+ time time.Time
+ cancel Cancel
+ query ast.Body
+ queryCompiler ast.QueryCompiler
+ compiler *ast.Compiler
+ store storage.Store
+ txn storage.Transaction
+ input *ast.Term
+ external *resolverTrie
+ tracers []QueryTracer
+ plugTraceVars bool
+ unknowns []*ast.Term
+ partialNamespace string
+ skipSaveNamespace bool
+ metrics metrics.Metrics
+ instr *Instrumentation
+ disableInlining []ast.Ref
+ shallowInlining bool
+ nondeterministicBuiltins bool
+ genvarprefix string
+ runtime *ast.Term
+ builtins map[string]*Builtin
+ indexing bool
+ earlyExit bool
+ interQueryBuiltinCache cache.InterQueryCache
+ interQueryBuiltinValueCache cache.InterQueryValueCache
+ ndBuiltinCache builtins.NDBCache
+ strictBuiltinErrors bool
+ builtinErrorList *[]Error
+ strictObjects bool
+ roundTripper CustomizeRoundTripper
+ printHook print.Hook
+ tracingOpts tracing.Options
+ virtualCache VirtualCache
+ baseCache BaseCache
+}
+
+// Builtin represents a built-in function that queries can call.
+type Builtin struct {
+ Decl *ast.Builtin
+ Func BuiltinFunc
+}
+
+// NewQuery returns a new Query object that can be run.
+func NewQuery(query ast.Body) *Query {
+ return &Query{
+ query: query,
+ genvarprefix: ast.WildcardPrefix,
+ indexing: true,
+ earlyExit: true,
+ }
+}
+
+// WithQueryCompiler sets the queryCompiler used for the query.
+func (q *Query) WithQueryCompiler(queryCompiler ast.QueryCompiler) *Query {
+ q.queryCompiler = queryCompiler
+ return q
+}
+
+// WithCompiler sets the compiler to use for the query.
+func (q *Query) WithCompiler(compiler *ast.Compiler) *Query {
+ q.compiler = compiler
+ return q
+}
+
+// WithStore sets the store to use for the query.
+func (q *Query) WithStore(store storage.Store) *Query {
+ q.store = store
+ return q
+}
+
+// WithTransaction sets the transaction to use for the query. All queries
+// should be performed over a consistent snapshot of the storage layer.
+func (q *Query) WithTransaction(txn storage.Transaction) *Query {
+ q.txn = txn
+ return q
+}
+
+// WithCancel sets the cancellation object to use for the query. Set this if
+// you need to abort queries based on a deadline. This is optional.
+func (q *Query) WithCancel(cancel Cancel) *Query {
+ q.cancel = cancel
+ return q
+}
+
+// WithInput sets the input object to use for the query. References rooted at
+// input will be evaluated against this value. This is optional.
+func (q *Query) WithInput(input *ast.Term) *Query {
+ q.input = input
+ return q
+}
+
+// WithTracer adds a query tracer to use during evaluation. This is optional.
+// Deprecated: Use WithQueryTracer instead.
+func (q *Query) WithTracer(tracer Tracer) *Query {
+ qt, ok := tracer.(QueryTracer)
+ if !ok {
+ qt = WrapLegacyTracer(tracer)
+ }
+ return q.WithQueryTracer(qt)
+}
+
+// WithQueryTracer adds a query tracer to use during evaluation. This is optional.
+// Disabled QueryTracers will be ignored.
+func (q *Query) WithQueryTracer(tracer QueryTracer) *Query {
+ if !tracer.Enabled() {
+ return q
+ }
+
+ q.tracers = append(q.tracers, tracer)
+
+ // If *any* of the tracers require local variable metadata we need to
+ // enabled plugging local trace variables.
+ conf := tracer.Config()
+ if conf.PlugLocalVars {
+ q.plugTraceVars = true
+ }
+
+ return q
+}
+
+// WithMetrics sets the metrics collection to add evaluation metrics to. This
+// is optional.
+func (q *Query) WithMetrics(m metrics.Metrics) *Query {
+ q.metrics = m
+ return q
+}
+
+// WithInstrumentation sets the instrumentation configuration to enable on the
+// evaluation process. By default, instrumentation is turned off.
+func (q *Query) WithInstrumentation(instr *Instrumentation) *Query {
+ q.instr = instr
+ return q
+}
+
+// WithUnknowns sets the initial set of variables or references to treat as
+// unknown during query evaluation. This is required for partial evaluation.
+func (q *Query) WithUnknowns(terms []*ast.Term) *Query {
+ q.unknowns = terms
+ return q
+}
+
+// WithPartialNamespace sets the namespace to use for supporting rules
+// generated as part of the partial evaluation process. The ns value must be a
+// valid package path component.
+func (q *Query) WithPartialNamespace(ns string) *Query {
+ q.partialNamespace = ns
+ return q
+}
+
+// WithSkipPartialNamespace disables namespacing of saved support rules that are generated
+// from the original policy (rules which are completely synthetic are still namespaced.)
+func (q *Query) WithSkipPartialNamespace(yes bool) *Query {
+ q.skipSaveNamespace = yes
+ return q
+}
+
+// WithDisableInlining adds a set of paths to the query that should be excluded from
+// inlining. Inlining during partial evaluation can be expensive in some cases
+// (e.g., when a cross-product is computed.) Disabling inlining avoids expensive
+// computation at the cost of generating support rules.
+func (q *Query) WithDisableInlining(paths []ast.Ref) *Query {
+ q.disableInlining = paths
+ return q
+}
+
+// WithShallowInlining disables aggressive inlining performed during partial evaluation.
+// When shallow inlining is enabled rules that depend (transitively) on unknowns are not inlined.
+// Only rules/values that are completely known will be inlined.
+func (q *Query) WithShallowInlining(yes bool) *Query {
+ q.shallowInlining = yes
+ return q
+}
+
+// WithRuntime sets the runtime data to execute the query with. The runtime data
+// can be returned by the `opa.runtime` built-in function.
+func (q *Query) WithRuntime(runtime *ast.Term) *Query {
+ q.runtime = runtime
+ return q
+}
+
+// WithBuiltins adds a set of built-in functions that can be called by the
+// query.
+func (q *Query) WithBuiltins(builtins map[string]*Builtin) *Query {
+ q.builtins = builtins
+ return q
+}
+
+// WithIndexing will enable or disable using rule indexing for the evaluation
+// of the query. The default is enabled.
+func (q *Query) WithIndexing(enabled bool) *Query {
+ q.indexing = enabled
+ return q
+}
+
+// WithEarlyExit will enable or disable using 'early exit' for the evaluation
+// of the query. The default is enabled.
+func (q *Query) WithEarlyExit(enabled bool) *Query {
+ q.earlyExit = enabled
+ return q
+}
+
+// WithSeed sets a reader that will seed randomization required by built-in functions.
+// If a seed is not provided crypto/rand.Reader is used.
+func (q *Query) WithSeed(r io.Reader) *Query {
+ q.seed = r
+ return q
+}
+
+// WithTime sets the time that will be returned by the time.now_ns() built-in function.
+func (q *Query) WithTime(x time.Time) *Query {
+ q.time = x
+ return q
+}
+
+// WithInterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize.
+func (q *Query) WithInterQueryBuiltinCache(c cache.InterQueryCache) *Query {
+ q.interQueryBuiltinCache = c
+ return q
+}
+
+// WithInterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize.
+func (q *Query) WithInterQueryBuiltinValueCache(c cache.InterQueryValueCache) *Query {
+ q.interQueryBuiltinValueCache = c
+ return q
+}
+
+// WithNDBuiltinCache sets the non-deterministic builtin cache.
+func (q *Query) WithNDBuiltinCache(c builtins.NDBCache) *Query {
+ q.ndBuiltinCache = c
+ return q
+}
+
+// WithStrictBuiltinErrors tells the evaluator to treat all built-in function errors as fatal errors.
+func (q *Query) WithStrictBuiltinErrors(yes bool) *Query {
+ q.strictBuiltinErrors = yes
+ return q
+}
+
+// WithBuiltinErrorList supplies a pointer to an Error slice to store built-in function errors
+// encountered during evaluation. This error slice can be inspected after evaluation to determine
+// which built-in function errors occurred.
+func (q *Query) WithBuiltinErrorList(list *[]Error) *Query {
+ q.builtinErrorList = list
+ return q
+}
+
+// WithResolver configures an external resolver to use for the given ref.
+func (q *Query) WithResolver(ref ast.Ref, r resolver.Resolver) *Query {
+ if q.external == nil {
+ q.external = newResolverTrie()
+ }
+ q.external.Put(ref, r)
+ return q
+}
+
+// WithHTTPRoundTripper configures a custom HTTP transport for built-in functions that make HTTP requests.
+func (q *Query) WithHTTPRoundTripper(t CustomizeRoundTripper) *Query {
+ q.roundTripper = t
+ return q
+}
+
+func (q *Query) WithPrintHook(h print.Hook) *Query {
+ q.printHook = h
+ return q
+}
+
+// WithDistributedTracingOpts sets the options to be used by distributed tracing.
+func (q *Query) WithDistributedTracingOpts(tr tracing.Options) *Query {
+ q.tracingOpts = tr
+ return q
+}
+
+// WithStrictObjects tells the evaluator to avoid the "lazy object" optimization
+// applied when reading objects from the store. It will result in higher memory
+// usage and should only be used temporarily while adjusting code that breaks
+// because of the optimization.
+func (q *Query) WithStrictObjects(yes bool) *Query {
+ q.strictObjects = yes
+ return q
+}
+
+// WithVirtualCache sets the VirtualCache to use during evaluation. This is
+// optional, and if not set, the default cache is used.
+func (q *Query) WithVirtualCache(vc VirtualCache) *Query {
+ q.virtualCache = vc
+ return q
+}
+
+// WithBaseCache sets the BaseCache to use during evaluation. This is
+// optional, and if not set, the default cache is used.
+func (q *Query) WithBaseCache(bc BaseCache) *Query {
+ q.baseCache = bc
+ return q
+}
+
+// WithNondeterministicBuiltins causes non-deterministic builtins to be evalued
+// during partial evaluation. This is needed to pull in external data, or validate
+// a JWT, during PE, so that the result informs what queries are returned.
+func (q *Query) WithNondeterministicBuiltins(yes bool) *Query {
+ q.nondeterministicBuiltins = yes
+ return q
+}
+
+// PartialRun executes partial evaluation on the query with respect to unknown
+// values. Partial evaluation attempts to evaluate as much of the query as
+// possible without requiring values for the unknowns set on the query. The
+// result of partial evaluation is a new set of queries that can be evaluated
+// once the unknown value is known. In addition to new queries, partial
+// evaluation may produce additional support modules that should be used in
+// conjunction with the partially evaluated queries.
+func (q *Query) PartialRun(ctx context.Context) (partials []ast.Body, support []*ast.Module, err error) {
+ if q.partialNamespace == "" {
+ q.partialNamespace = "partial" // lazily initialize partial namespace
+ }
+ if q.seed == nil {
+ q.seed = rand.Reader
+ }
+ if q.time.IsZero() {
+ q.time = time.Now()
+ }
+ if q.metrics == nil {
+ q.metrics = metrics.New()
+ }
+
+ f := &queryIDFactory{}
+ b := newBindings(0, q.instr)
+
+ var vc VirtualCache
+ if q.virtualCache != nil {
+ vc = q.virtualCache
+ } else {
+ vc = NewVirtualCache()
+ }
+
+ var bc BaseCache
+ if q.baseCache != nil {
+ bc = q.baseCache
+ } else {
+ bc = newBaseCache()
+ }
+
+ e := &eval{
+ ctx: ctx,
+ metrics: q.metrics,
+ seed: q.seed,
+ timeStart: q.time.UnixNano(),
+ cancel: q.cancel,
+ query: q.query,
+ queryCompiler: q.queryCompiler,
+ queryIDFact: f,
+ queryID: f.Next(),
+ bindings: b,
+ compiler: q.compiler,
+ store: q.store,
+ baseCache: bc,
+ txn: q.txn,
+ input: q.input,
+ external: q.external,
+ tracers: q.tracers,
+ traceEnabled: len(q.tracers) > 0,
+ plugTraceVars: q.plugTraceVars,
+ instr: q.instr,
+ builtins: q.builtins,
+ builtinCache: builtins.Cache{},
+ interQueryBuiltinCache: q.interQueryBuiltinCache,
+ interQueryBuiltinValueCache: q.interQueryBuiltinValueCache,
+ ndBuiltinCache: q.ndBuiltinCache,
+ virtualCache: vc,
+ saveSet: newSaveSet(q.unknowns, b, q.instr),
+ saveStack: newSaveStack(),
+ saveSupport: newSaveSupport(),
+ saveNamespace: ast.InternedTerm(q.partialNamespace),
+ skipSaveNamespace: q.skipSaveNamespace,
+ inliningControl: &inliningControl{
+ shallow: q.shallowInlining,
+ nondeterministicBuiltins: q.nondeterministicBuiltins,
+ },
+ genvarprefix: q.genvarprefix,
+ runtime: q.runtime,
+ indexing: q.indexing,
+ earlyExit: q.earlyExit,
+ builtinErrors: &builtinErrors{},
+ printHook: q.printHook,
+ strictObjects: q.strictObjects,
+ }
+
+ if len(q.disableInlining) > 0 {
+ e.inliningControl.PushDisable(q.disableInlining, false)
+ }
+
+ e.caller = e
+ q.metrics.Timer(metrics.RegoPartialEval).Start()
+ defer q.metrics.Timer(metrics.RegoPartialEval).Stop()
+
+ livevars := ast.NewVarSet()
+ for _, t := range q.unknowns {
+ switch v := t.Value.(type) {
+ case ast.Var:
+ livevars.Add(v)
+ case ast.Ref:
+ livevars.Add(v[0].Value.(ast.Var))
+ }
+ }
+
+ ast.WalkVars(q.query, func(x ast.Var) bool {
+ if !x.IsGenerated() {
+ livevars.Add(x)
+ }
+ return false
+ })
+
+ p := copypropagation.New(livevars).WithCompiler(q.compiler)
+
+ err = e.Run(func(e *eval) error {
+
+ // Build output from saved expressions.
+ body := ast.NewBody()
+
+ for _, elem := range e.saveStack.Stack[len(e.saveStack.Stack)-1] {
+ body.Append(elem.Plug(e.bindings))
+ }
+
+ // Include bindings as exprs so that when caller evals the result, they
+ // can obtain values for the vars in their query.
+ bindingExprs := []*ast.Expr{}
+ _ = e.bindings.Iter(e.bindings, func(a, b *ast.Term) error {
+ bindingExprs = append(bindingExprs, ast.Equality.Expr(a, b))
+ return nil
+ }) // cannot return error
+
+ // Sort binding expressions so that results are deterministic.
+ sort.Slice(bindingExprs, func(i, j int) bool {
+ return bindingExprs[i].Compare(bindingExprs[j]) < 0
+ })
+
+ for i := range bindingExprs {
+ body.Append(bindingExprs[i])
+ }
+
+ // Skip this rule body if it fails to type-check.
+ // Type-checking failure means the rule body will never succeed.
+ if !e.compiler.PassesTypeCheck(body) {
+ return nil
+ }
+
+ if !q.shallowInlining {
+ body = applyCopyPropagation(p, e.instr, body)
+ }
+
+ partials = append(partials, body)
+ return nil
+ })
+
+ support = e.saveSupport.List()
+
+ if len(e.builtinErrors.errs) > 0 {
+ if q.strictBuiltinErrors {
+ err = e.builtinErrors.errs[0]
+ } else if q.builtinErrorList != nil {
+ // If a builtinErrorList has been supplied, we must use pointer indirection
+ // to append to it. builtinErrorList is a slice pointer so that errors can be
+ // appended to it without returning a new slice and changing the interface
+ // of PartialRun.
+ for _, err := range e.builtinErrors.errs {
+ if tdError, ok := err.(*Error); ok {
+ *(q.builtinErrorList) = append(*(q.builtinErrorList), *tdError)
+ } else {
+ *(q.builtinErrorList) = append(*(q.builtinErrorList), Error{
+ Code: BuiltinErr,
+ Message: err.Error(),
+ })
+ }
+ }
+ }
+ }
+
+ for i, m := range support {
+ if regoVersion := q.compiler.DefaultRegoVersion(); regoVersion != ast.RegoUndefined {
+ ast.SetModuleRegoVersion(m, q.compiler.DefaultRegoVersion())
+ }
+
+ sort.Slice(support[i].Rules, func(j, k int) bool {
+ return support[i].Rules[j].Compare(support[i].Rules[k]) < 0
+ })
+ }
+
+ return partials, support, err
+}
+
+// Run is a wrapper around Iter that accumulates query results and returns them
+// in one shot.
+func (q *Query) Run(ctx context.Context) (QueryResultSet, error) {
+ qrs := QueryResultSet{}
+ return qrs, q.Iter(ctx, func(qr QueryResult) error {
+ qrs = append(qrs, qr)
+ return nil
+ })
+}
+
+// Iter executes the query and invokes the iter function with query results
+// produced by evaluating the query.
+func (q *Query) Iter(ctx context.Context, iter func(QueryResult) error) error {
+ // Query evaluation must not be allowed if the compiler has errors and is in an undefined, possibly inconsistent state
+ if q.compiler != nil && len(q.compiler.Errors) > 0 {
+ return &Error{
+ Code: InternalErr,
+ Message: "compiler has errors",
+ }
+ }
+
+ if q.seed == nil {
+ q.seed = rand.Reader
+ }
+ if q.time.IsZero() {
+ q.time = time.Now()
+ }
+ if q.metrics == nil {
+ q.metrics = metrics.New()
+ }
+
+ f := &queryIDFactory{}
+
+ var vc VirtualCache
+ if q.virtualCache != nil {
+ vc = q.virtualCache
+ } else {
+ vc = NewVirtualCache()
+ }
+
+ var bc BaseCache
+ if q.baseCache != nil {
+ bc = q.baseCache
+ } else {
+ bc = newBaseCache()
+ }
+
+ e := &eval{
+ ctx: ctx,
+ metrics: q.metrics,
+ seed: q.seed,
+ timeStart: q.time.UnixNano(),
+ cancel: q.cancel,
+ query: q.query,
+ queryCompiler: q.queryCompiler,
+ queryIDFact: f,
+ queryID: f.Next(),
+ bindings: newBindings(0, q.instr),
+ compiler: q.compiler,
+ store: q.store,
+ baseCache: bc,
+ txn: q.txn,
+ input: q.input,
+ external: q.external,
+ tracers: q.tracers,
+ traceEnabled: len(q.tracers) > 0,
+ plugTraceVars: q.plugTraceVars,
+ instr: q.instr,
+ builtins: q.builtins,
+ builtinCache: builtins.Cache{},
+ interQueryBuiltinCache: q.interQueryBuiltinCache,
+ interQueryBuiltinValueCache: q.interQueryBuiltinValueCache,
+ ndBuiltinCache: q.ndBuiltinCache,
+ virtualCache: vc,
+ genvarprefix: q.genvarprefix,
+ runtime: q.runtime,
+ indexing: q.indexing,
+ earlyExit: q.earlyExit,
+ builtinErrors: &builtinErrors{},
+ printHook: q.printHook,
+ tracingOpts: q.tracingOpts,
+ strictObjects: q.strictObjects,
+ roundTripper: q.roundTripper,
+ }
+ e.caller = e
+ q.metrics.Timer(metrics.RegoQueryEval).Start()
+ err := e.Run(func(e *eval) error {
+ qr := QueryResult{}
+ _ = e.bindings.Iter(nil, func(k, v *ast.Term) error {
+ qr[k.Value.(ast.Var)] = v
+ return nil
+ }) // cannot return error
+ return iter(qr)
+ })
+
+ if len(e.builtinErrors.errs) > 0 {
+ if q.strictBuiltinErrors {
+ err = e.builtinErrors.errs[0]
+ } else if q.builtinErrorList != nil {
+ // If a builtinErrorList has been supplied, we must use pointer indirection
+ // to append to it. builtinErrorList is a slice pointer so that errors can be
+ // appended to it without returning a new slice and changing the interface
+ // of Iter.
+ for _, err := range e.builtinErrors.errs {
+ if tdError, ok := err.(*Error); ok {
+ *(q.builtinErrorList) = append(*(q.builtinErrorList), *tdError)
+ } else {
+ *(q.builtinErrorList) = append(*(q.builtinErrorList), Error{
+ Code: BuiltinErr,
+ Message: err.Error(),
+ })
+ }
+ }
+ }
+ }
+
+ q.metrics.Timer(metrics.RegoQueryEval).Stop()
+ return err
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/reachable.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/reachable.go
similarity index 97%
rename from vendor/github.com/open-policy-agent/opa/topdown/reachable.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/reachable.go
index 8d61018e76..1c31019db9 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/reachable.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/reachable.go
@@ -5,8 +5,8 @@
package topdown
import (
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
// Helper: sets of vertices can be represented as Arrays or Sets.
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/regex.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/regex.go
similarity index 91%
rename from vendor/github.com/open-policy-agent/opa/topdown/regex.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/regex.go
index 452e7d58bf..1d2906ee2e 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/regex.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/regex.go
@@ -11,8 +11,8 @@ import (
gintersect "github.com/yashtewari/glob-intersection"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
const regexCacheMaxSize = 100
@@ -25,15 +25,15 @@ func builtinRegexIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.
s, err := builtins.StringOperand(operands[0].Value, 1)
if err != nil {
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
_, err = regexp.Compile(string(s))
if err != nil {
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
- return iter(ast.BooleanTerm(true))
+ return iter(ast.InternedTerm(true))
}
func builtinRegexMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -49,7 +49,7 @@ func builtinRegexMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast
if err != nil {
return err
}
- return iter(ast.BooleanTerm(re.MatchString(string(s2))))
+ return iter(ast.InternedTerm(re.MatchString(string(s2))))
}
func builtinRegexMatchTemplate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -79,7 +79,7 @@ func builtinRegexMatchTemplate(_ BuiltinContext, operands []*ast.Term, iter func
if err != nil {
return err
}
- return iter(ast.BooleanTerm(re.MatchString(string(match))))
+ return iter(ast.InternedTerm(re.MatchString(string(match))))
}
func builtinRegexSplit(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -101,11 +101,12 @@ func builtinRegexSplit(bctx BuiltinContext, operands []*ast.Term, iter func(*ast
for i := range elems {
arr[i] = ast.StringTerm(elems[i])
}
- return iter(ast.NewTerm(ast.NewArray(arr...)))
+ return iter(ast.ArrayTerm(arr...))
}
func getRegexp(bctx BuiltinContext, pat string) (*regexp.Regexp, error) {
if bctx.InterQueryBuiltinValueCache != nil {
+ // TODO: Use named cache
val, ok := bctx.InterQueryBuiltinValueCache.Get(ast.String(pat))
if ok {
res, valid := val.(*regexp.Regexp)
@@ -176,7 +177,7 @@ func builtinGlobsMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te
if err != nil {
return err
}
- return iter(ast.BooleanTerm(ne))
+ return iter(ast.InternedTerm(ne))
}
func builtinRegexFind(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -202,7 +203,7 @@ func builtinRegexFind(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.
for i := range elems {
arr[i] = ast.StringTerm(elems[i])
}
- return iter(ast.NewTerm(ast.NewArray(arr...)))
+ return iter(ast.ArrayTerm(arr...))
}
func builtinRegexFindAllStringSubmatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -231,10 +232,10 @@ func builtinRegexFindAllStringSubmatch(bctx BuiltinContext, operands []*ast.Term
for j := range matches[i] {
inner[j] = ast.StringTerm(matches[i][j])
}
- outer[i] = ast.NewTerm(ast.NewArray(inner...))
+ outer[i] = ast.ArrayTerm(inner...)
}
- return iter(ast.NewTerm(ast.NewArray(outer...)))
+ return iter(ast.ArrayTerm(outer...))
}
func builtinRegexReplace(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -259,8 +260,11 @@ func builtinRegexReplace(bctx BuiltinContext, operands []*ast.Term, iter func(*a
}
res := re.ReplaceAllString(string(base), string(value))
+ if res == string(base) {
+ return iter(operands[0])
+ }
- return iter(ast.StringTerm(res))
+ return iter(ast.InternedTerm(res))
}
func init() {
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/regex_template.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/regex_template.go
similarity index 99%
rename from vendor/github.com/open-policy-agent/opa/topdown/regex_template.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/regex_template.go
index 4bcddc060b..a1d946fd59 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/regex_template.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/regex_template.go
@@ -45,7 +45,7 @@ import (
func delimiterIndices(s string, delimiterStart, delimiterEnd byte) ([]int, error) {
var level, idx int
idxs := make([]int, 0)
- for i := 0; i < len(s); i++ {
+ for i := range len(s) {
switch s[i] {
case delimiterStart:
if level++; level == 1 {
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/resolver.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/resolver.go
similarity index 85%
rename from vendor/github.com/open-policy-agent/opa/topdown/resolver.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/resolver.go
index 5ed6c1e443..8fff22b1d3 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/resolver.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/resolver.go
@@ -5,9 +5,9 @@
package topdown
import (
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/metrics"
- "github.com/open-policy-agent/opa/resolver"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/metrics"
+ "github.com/open-policy-agent/opa/v1/resolver"
)
type resolverTrie struct {
@@ -35,6 +35,10 @@ func (t *resolverTrie) Put(ref ast.Ref, r resolver.Resolver) {
func (t *resolverTrie) Resolve(e *eval, ref ast.Ref) (ast.Value, error) {
e.metrics.Timer(metrics.RegoExternalResolve).Start()
defer e.metrics.Timer(metrics.RegoExternalResolve).Stop()
+
+ if t == nil {
+ return nil, nil
+ }
node := t
for i, t := range ref {
child, ok := node.children[t.Value]
@@ -48,7 +52,11 @@ func (t *resolverTrie) Resolve(e *eval, ref ast.Ref) (ast.Value, error) {
Input: e.input,
Metrics: e.metrics,
}
- e.traceWasm(e.query[e.index], &in.Ref)
+ if e.traceEnabled {
+ // avoid leaking pointer if trace is disabled
+ cpy := in.Ref
+ e.traceWasm(e.query[e.index], &cpy)
+ }
if e.data != nil {
return nil, errInScopeWithStmt
}
@@ -75,7 +83,10 @@ func (t *resolverTrie) Resolve(e *eval, ref ast.Ref) (ast.Value, error) {
func (t *resolverTrie) mktree(e *eval, in resolver.Input) (ast.Value, error) {
if t.r != nil {
- e.traceWasm(e.query[e.index], &in.Ref)
+ if e.traceEnabled {
+ cpy := in.Ref
+ e.traceWasm(e.query[e.index], &cpy)
+ }
if e.data != nil {
return nil, errInScopeWithStmt
}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/runtime.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/runtime.go
similarity index 69%
rename from vendor/github.com/open-policy-agent/opa/topdown/runtime.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/runtime.go
index 7d512f7c00..2bbfb43f39 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/runtime.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/runtime.go
@@ -5,25 +5,28 @@
package topdown
import (
+ "errors"
"fmt"
- "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/v1/ast"
)
+var nothingResolver ast.Resolver = illegalResolver{}
+
func builtinOPARuntime(bctx BuiltinContext, _ []*ast.Term, iter func(*ast.Term) error) error {
if bctx.Runtime == nil {
- return iter(ast.ObjectTerm())
+ return iter(ast.InternedEmptyObject)
}
- if bctx.Runtime.Get(ast.StringTerm("config")) != nil {
- iface, err := ast.ValueToInterface(bctx.Runtime.Value, illegalResolver{})
+ if bctx.Runtime.Get(ast.InternedTerm("config")) != nil {
+ iface, err := ast.ValueToInterface(bctx.Runtime.Value, nothingResolver)
if err != nil {
return err
}
- if object, ok := iface.(map[string]interface{}); ok {
+ if object, ok := iface.(map[string]any); ok {
if cfgRaw, ok := object["config"]; ok {
- if config, ok := cfgRaw.(map[string]interface{}); ok {
+ if config, ok := cfgRaw.(map[string]any); ok {
configPurged, err := activeConfig(config)
if err != nil {
return err
@@ -46,7 +49,7 @@ func init() {
RegisterBuiltinFunc(ast.OPARuntime.Name, builtinOPARuntime)
}
-func activeConfig(config map[string]interface{}) (interface{}, error) {
+func activeConfig(config map[string]any) (any, error) {
if config["services"] != nil {
err := removeServiceCredentials(config["services"])
@@ -65,10 +68,10 @@ func activeConfig(config map[string]interface{}) (interface{}, error) {
return config, nil
}
-func removeServiceCredentials(x interface{}) error {
+func removeServiceCredentials(x any) error {
switch x := x.(type) {
- case []interface{}:
+ case []any:
for _, v := range x {
err := removeKey(v, "credentials")
if err != nil {
@@ -76,7 +79,7 @@ func removeServiceCredentials(x interface{}) error {
}
}
- case map[string]interface{}:
+ case map[string]any:
for _, v := range x {
err := removeKey(v, "credentials")
if err != nil {
@@ -90,10 +93,10 @@ func removeServiceCredentials(x interface{}) error {
return nil
}
-func removeCryptoKeys(x interface{}) error {
+func removeCryptoKeys(x any) error {
switch x := x.(type) {
- case map[string]interface{}:
+ case map[string]any:
for _, v := range x {
err := removeKey(v, "key", "private_key")
if err != nil {
@@ -107,10 +110,10 @@ func removeCryptoKeys(x interface{}) error {
return nil
}
-func removeKey(x interface{}, keys ...string) error {
- val, ok := x.(map[string]interface{})
+func removeKey(x any, keys ...string) error {
+ val, ok := x.(map[string]any)
if !ok {
- return fmt.Errorf("type assertion error")
+ return errors.New("type assertion error")
}
for _, key := range keys {
@@ -122,6 +125,6 @@ func removeKey(x interface{}, keys ...string) error {
type illegalResolver struct{}
-func (illegalResolver) Resolve(ref ast.Ref) (interface{}, error) {
+func (illegalResolver) Resolve(ref ast.Ref) (any, error) {
return nil, fmt.Errorf("illegal value: %v", ref)
}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/save.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/save.go
similarity index 86%
rename from vendor/github.com/open-policy-agent/opa/topdown/save.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/save.go
index 0468692cc6..47bf7521b4 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/save.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/save.go
@@ -1,11 +1,13 @@
package topdown
import (
+ "cmp"
"container/list"
"fmt"
+ "slices"
"strings"
- "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/v1/ast"
)
// saveSet contains a stack of terms that are considered 'unknown' during
@@ -355,17 +357,23 @@ func splitPackageAndRule(path ast.Ref) (ast.Ref, ast.Ref) {
// being saved. This check allows the evaluator to evaluate statements
// completely during partial evaluation as long as they do not depend on any
// kind of unknown value or statements that would generate saves.
-func saveRequired(c *ast.Compiler, ic *inliningControl, icIgnoreInternal bool, ss *saveSet, b *bindings, x interface{}, rec bool) bool {
+func saveRequired(c *ast.Compiler, ic *inliningControl, icIgnoreInternal bool, ss *saveSet, b *bindings, x any, rec bool) bool {
var found bool
- vis := ast.NewGenericVisitor(func(node interface{}) bool {
+ vis := ast.NewGenericVisitor(func(node any) bool {
if found {
return found
}
switch node := node.(type) {
case *ast.Expr:
- found = len(node.With) > 0 || ignoreExprDuringPartial(node)
+ found = len(node.With) > 0
+ if found {
+ return found
+ }
+ if !ic.nondeterministicBuiltins { // skip evaluating non-det builtins for PE
+ found = ignoreExprDuringPartial(node)
+ }
case *ast.Term:
switch v := node.Value.(type) {
case ast.Var:
@@ -412,35 +420,56 @@ func ignoreDuringPartial(bi *ast.Builtin) bool {
// Note(philipc): We keep this legacy check around to avoid breaking
// existing library users.
//nolint:staticcheck // We specifically ignore our own linter warning here.
- for _, ignore := range ast.IgnoreDuringPartialEval {
- if bi == ignore {
- return true
- }
- }
- // Otherwise, ensure all non-deterministic builtins are thrown out.
- return bi.Nondeterministic
+ return cmp.Or(slices.Contains(ast.IgnoreDuringPartialEval, bi), bi.Nondeterministic)
}
type inliningControl struct {
- shallow bool
- disable []disableInliningFrame
+ shallow bool
+ disable []disableInliningFrame
+ nondeterministicBuiltins bool // evaluate non-det builtins during PE (if args are known)
}
type disableInliningFrame struct {
internal bool
refs []ast.Ref
+ v ast.Var
}
-func (i *inliningControl) PushDisable(refs []ast.Ref, internal bool) {
+func (i *inliningControl) PushDisable(x any, internal bool) {
if i == nil {
return
}
+
+ switch x := x.(type) {
+ case []ast.Ref:
+ i.PushDisableRefs(x, internal)
+ case ast.Var:
+ i.PushDisableVar(x, internal)
+ }
+}
+
+func (i *inliningControl) PushDisableRefs(refs []ast.Ref, internal bool) {
+ if i == nil {
+ return
+ }
+
i.disable = append(i.disable, disableInliningFrame{
internal: internal,
refs: refs,
})
}
+func (i *inliningControl) PushDisableVar(v ast.Var, internal bool) {
+ if i == nil {
+ return
+ }
+
+ i.disable = append(i.disable, disableInliningFrame{
+ internal: internal,
+ v: v,
+ })
+}
+
func (i *inliningControl) PopDisable() {
if i == nil {
return
@@ -448,10 +477,26 @@ func (i *inliningControl) PopDisable() {
i.disable = i.disable[:len(i.disable)-1]
}
-func (i *inliningControl) Disabled(ref ast.Ref, ignoreInternal bool) bool {
+func (i *inliningControl) Disabled(x any, ignoreInternal bool) bool {
if i == nil {
return false
}
+
+ switch x := x.(type) {
+ case ast.Ref:
+ return i.DisabledRef(x, ignoreInternal)
+ case ast.Var:
+ return i.DisabledVar(x, ignoreInternal)
+ }
+
+ return false
+}
+
+func (i *inliningControl) DisabledRef(ref ast.Ref, ignoreInternal bool) bool {
+ if i == nil {
+ return false
+ }
+
for _, frame := range i.disable {
if !frame.internal || !ignoreInternal {
for _, other := range frame.refs {
@@ -463,3 +508,16 @@ func (i *inliningControl) Disabled(ref ast.Ref, ignoreInternal bool) bool {
}
return false
}
+
+func (i *inliningControl) DisabledVar(v ast.Var, ignoreInternal bool) bool {
+ if i == nil {
+ return false
+ }
+
+ for _, frame := range i.disable {
+ if (!frame.internal || !ignoreInternal) && frame.v.Equal(v) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/semver.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/semver.go
similarity index 86%
rename from vendor/github.com/open-policy-agent/opa/topdown/semver.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/semver.go
index 7bb7b9c183..3b79ebd586 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/semver.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/semver.go
@@ -7,9 +7,9 @@ package topdown
import (
"fmt"
- "github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/internal/semver"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
func builtinSemVerCompare(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -34,13 +34,13 @@ func builtinSemVerCompare(_ BuiltinContext, operands []*ast.Term, iter func(*ast
result := versionA.Compare(*versionB)
- return iter(ast.IntNumberTerm(result))
+ return iter(ast.InternedTerm(result))
}
func builtinSemVerIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
versionString, err := builtins.StringOperand(operands[0].Value, 1)
if err != nil {
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
result := true
@@ -50,7 +50,7 @@ func builtinSemVerIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast
result = false
}
- return iter(ast.BooleanTerm(result))
+ return iter(ast.InternedTerm(result))
}
func init() {
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/sets.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/sets.go
similarity index 90%
rename from vendor/github.com/open-policy-agent/opa/topdown/sets.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/sets.go
index a973404f3f..c50efe4a80 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/sets.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/sets.go
@@ -5,11 +5,11 @@
package topdown
import (
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
-// Deprecated in v0.4.2 in favour of minus/infix "-" operation.
+// Deprecated: deprecated in v0.4.2 in favour of minus/infix "-" operation.
func builtinSetDiff(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
s1, err := builtins.SetOperand(operands[0].Value, 1)
@@ -35,7 +35,7 @@ func builtinSetIntersection(_ BuiltinContext, operands []*ast.Term, iter func(*a
// empty input set
if inputSet.Len() == 0 {
- return iter(ast.NewTerm(ast.NewSet()))
+ return iter(ast.InternedEmptySet)
}
var result ast.Set
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/strings.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/strings.go
similarity index 62%
rename from vendor/github.com/open-policy-agent/opa/topdown/strings.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/strings.go
index d9e4a55e58..13e9b81339 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/strings.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/strings.go
@@ -5,15 +5,20 @@
package topdown
import (
+ "errors"
"fmt"
"math/big"
"sort"
+ "strconv"
"strings"
+ "unicode"
+ "unicode/utf8"
"github.com/tchap/go-patricia/v2/patricia"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/util"
)
func builtinAnyPrefixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -47,7 +52,7 @@ func builtinAnyPrefixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*as
return builtins.NewOperandTypeErr(2, b, "string", "set", "array")
}
- return iter(ast.BooleanTerm(anyStartsWithAny(strs, prefixes)))
+ return iter(ast.InternedTerm(anyStartsWithAny(strs, prefixes)))
}
func builtinAnySuffixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -87,7 +92,7 @@ func builtinAnySuffixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*as
return builtins.NewOperandTypeErr(2, b, "string", "set", "array")
}
- return iter(ast.BooleanTerm(anyStartsWithAny(strsReversed, suffixesReversed)))
+ return iter(ast.InternedTerm(anyStartsWithAny(strsReversed, suffixesReversed)))
}
func anyStartsWithAny(strs []string, prefixes []string) bool {
@@ -99,11 +104,11 @@ func anyStartsWithAny(strs []string, prefixes []string) bool {
}
trie := patricia.NewTrie()
- for i := 0; i < len(strs); i++ {
+ for i := range strs {
trie.Insert([]byte(strs[i]), true)
}
- for i := 0; i < len(prefixes); i++ {
+ for i := range prefixes {
if trie.MatchSubtree([]byte(prefixes[i])) {
return true
}
@@ -131,6 +136,9 @@ func builtinFormatInt(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter
case ast.Number("8"):
format = "%o"
case ast.Number("10"):
+ if i, ok := input.Int(); ok {
+ return iter(ast.InternedIntegerString(i))
+ }
format = "%d"
case ast.Number("16"):
format = "%x"
@@ -141,48 +149,110 @@ func builtinFormatInt(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter
f := builtins.NumberToFloat(input)
i, _ := f.Int(nil)
- return iter(ast.StringTerm(fmt.Sprintf(format, i)))
+ return iter(ast.InternedTerm(fmt.Sprintf(format, i)))
}
func builtinConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
-
join, err := builtins.StringOperand(operands[0].Value, 1)
if err != nil {
return err
}
- strs := []string{}
+ // fast path for empty or single string array/set, allocates no memory
+ if term, ok := zeroOrOneStringTerm(operands[1].Value); ok {
+ return iter(term)
+ }
+ // NOTE(anderseknert):
+ // More or less Go's strings.Join implementation, but where we avoid
+ // creating an intermediate []string slice to pass to that function,
+ // as that's expensive (3.5x more space allocated). Instead we build
+ // the string directly using a strings.Builder to concatenate the string
+ // values from the array/set with the separator.
+ n := 0
switch b := operands[1].Value.(type) {
case *ast.Array:
- err := b.Iter(func(x *ast.Term) error {
- s, ok := x.Value.(ast.String)
+ l := b.Len()
+ for i := range l {
+ s, ok := b.Elem(i).Value.(ast.String)
if !ok {
- return builtins.NewOperandElementErr(2, operands[1].Value, x.Value, "string")
+ return builtins.NewOperandElementErr(2, b, b.Elem(i).Value, "string")
+ }
+ n += len(s)
+ }
+ sep := string(join)
+ n += len(sep) * (l - 1)
+ var sb strings.Builder
+ sb.Grow(n)
+ sb.WriteString(string(b.Elem(0).Value.(ast.String)))
+ if sep == "" {
+ for i := 1; i < l; i++ {
+ sb.WriteString(string(b.Elem(i).Value.(ast.String)))
+ }
+ } else if len(sep) == 1 {
+ // when the separator is a single byte, sb.WriteByte is substantially faster
+ bsep := sep[0]
+ for i := 1; i < l; i++ {
+ sb.WriteByte(bsep)
+ sb.WriteString(string(b.Elem(i).Value.(ast.String)))
+ }
+ } else {
+ // for longer separators, there is no such difference between WriteString and Write
+ for i := 1; i < l; i++ {
+ sb.WriteString(sep)
+ sb.WriteString(string(b.Elem(i).Value.(ast.String)))
}
- strs = append(strs, string(s))
- return nil
- })
- if err != nil {
- return err
}
+ return iter(ast.InternedTerm(sb.String()))
case ast.Set:
- err := b.Iter(func(x *ast.Term) error {
- s, ok := x.Value.(ast.String)
+ for _, v := range b.Slice() {
+ s, ok := v.Value.(ast.String)
if !ok {
- return builtins.NewOperandElementErr(2, operands[1].Value, x.Value, "string")
+ return builtins.NewOperandElementErr(2, b, v.Value, "string")
}
- strs = append(strs, string(s))
- return nil
- })
- if err != nil {
- return err
+ n += len(s)
}
- default:
- return builtins.NewOperandTypeErr(2, operands[1].Value, "set", "array")
+ sep := string(join)
+ l := b.Len()
+ n += len(sep) * (l - 1)
+ var sb strings.Builder
+ sb.Grow(n)
+ for i, v := range b.Slice() {
+ sb.WriteString(string(v.Value.(ast.String)))
+ if i < l-1 {
+ sb.WriteString(sep)
+ }
+ }
+ return iter(ast.InternedTerm(sb.String()))
}
- return iter(ast.StringTerm(strings.Join(strs, string(join))))
+ return builtins.NewOperandTypeErr(2, operands[1].Value, "set", "array")
+}
+
+func zeroOrOneStringTerm(a ast.Value) (*ast.Term, bool) {
+ switch b := a.(type) {
+ case *ast.Array:
+ if b.Len() == 0 {
+ return ast.InternedEmptyString, true
+ }
+ if b.Len() == 1 {
+ e := b.Elem(0)
+ if _, ok := e.Value.(ast.String); ok {
+ return e, true
+ }
+ }
+ case ast.Set:
+ if b.Len() == 0 {
+ return ast.InternedEmptyString, true
+ }
+ if b.Len() == 1 {
+ e := b.Slice()[0]
+ if _, ok := e.Value.(ast.String); ok {
+ return e, true
+ }
+ }
+ }
+ return nil, false
}
func runesEqual(a, b []rune) bool {
@@ -208,7 +278,14 @@ func builtinIndexOf(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term)
return err
}
if len(string(search)) == 0 {
- return fmt.Errorf("empty search character")
+ return errors.New("empty search character")
+ }
+
+ if isASCII(string(base)) && isASCII(string(search)) {
+ // this is a false positive in the indexAlloc rule that thinks
+ // we're converting byte arrays to strings
+ //nolint:gocritic
+ return iter(ast.InternedTerm(strings.Index(string(base), string(search))))
}
baseRunes := []rune(string(base))
@@ -218,14 +295,14 @@ func builtinIndexOf(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term)
for i, r := range baseRunes {
if len(baseRunes) >= i+searchLen {
if r == searchRunes[0] && runesEqual(baseRunes[i:i+searchLen], searchRunes) {
- return iter(ast.IntNumberTerm(i))
+ return iter(ast.InternedTerm(i))
}
} else {
break
}
}
- return iter(ast.IntNumberTerm(-1))
+ return iter(ast.InternedTerm(-1))
}
func builtinIndexOfN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -239,7 +316,7 @@ func builtinIndexOfN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term
return err
}
if len(string(search)) == 0 {
- return fmt.Errorf("empty search character")
+ return errors.New("empty search character")
}
baseRunes := []rune(string(base))
@@ -250,7 +327,7 @@ func builtinIndexOfN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term
for i, r := range baseRunes {
if len(baseRunes) >= i+searchLen {
if r == searchRunes[0] && runesEqual(baseRunes[i:i+searchLen], searchRunes) {
- arr = append(arr, ast.IntNumberTerm(i))
+ arr = append(arr, ast.InternedTerm(i))
}
} else {
break
@@ -266,15 +343,10 @@ func builtinSubstring(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter
if err != nil {
return err
}
- runes := []rune(base)
startIndex, err := builtins.IntOperand(operands[1].Value, 2)
if err != nil {
return err
- } else if startIndex >= len(runes) {
- return iter(ast.StringTerm(""))
- } else if startIndex < 0 {
- return fmt.Errorf("negative offset")
}
length, err := builtins.IntOperand(operands[2].Value, 3)
@@ -282,18 +354,62 @@ func builtinSubstring(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter
return err
}
- var s ast.String
+ if startIndex < 0 {
+ return errors.New("negative offset")
+ }
+
+ sbase := string(base)
+ if sbase == "" {
+ return iter(ast.InternedEmptyString)
+ }
+
+ // Optimized path for the likely common case of ASCII strings.
+ // This allocates less memory and runs in about 1/3 the time.
+ if isASCII(sbase) {
+ if startIndex >= len(sbase) {
+ return iter(ast.InternedEmptyString)
+ }
+
+ if length < 0 {
+ return iter(ast.InternedTerm(sbase[startIndex:]))
+ }
+
+ if startIndex == 0 && length >= len(sbase) {
+ return iter(operands[0])
+ }
+
+ upto := min(len(sbase), startIndex+length)
+ return iter(ast.InternedTerm(sbase[startIndex:upto]))
+ }
+
+ if startIndex == 0 && length >= utf8.RuneCountInString(sbase) {
+ return iter(operands[0])
+ }
+
+ runes := []rune(base)
+
+ if startIndex >= len(runes) {
+ return iter(ast.InternedEmptyString)
+ }
+
+ var s string
if length < 0 {
- s = ast.String(runes[startIndex:])
+ s = string(runes[startIndex:])
} else {
- upto := startIndex + length
- if len(runes) < upto {
- upto = len(runes)
- }
- s = ast.String(runes[startIndex:upto])
+ upto := min(len(runes), startIndex+length)
+ s = string(runes[startIndex:upto])
}
- return iter(ast.NewTerm(s))
+ return iter(ast.InternedTerm(s))
+}
+
+func isASCII(s string) bool {
+ for i := range len(s) {
+ if s[i] > unicode.MaxASCII {
+ return false
+ }
+ }
+ return true
}
func builtinContains(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -307,7 +423,7 @@ func builtinContains(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term
return err
}
- return iter(ast.BooleanTerm(strings.Contains(string(s), string(substr))))
+ return iter(ast.InternedTerm(strings.Contains(string(s), string(substr))))
}
func builtinStringCount(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -323,10 +439,9 @@ func builtinStringCount(_ BuiltinContext, operands []*ast.Term, iter func(*ast.T
baseTerm := string(s)
searchTerm := string(substr)
-
count := strings.Count(baseTerm, searchTerm)
- return iter(ast.IntNumberTerm(count))
+ return iter(ast.InternedTerm(count))
}
func builtinStartsWith(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -340,7 +455,7 @@ func builtinStartsWith(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te
return err
}
- return iter(ast.BooleanTerm(strings.HasPrefix(string(s), string(prefix))))
+ return iter(ast.InternedTerm(strings.HasPrefix(string(s), string(prefix))))
}
func builtinEndsWith(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -354,7 +469,7 @@ func builtinEndsWith(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term
return err
}
- return iter(ast.BooleanTerm(strings.HasSuffix(string(s), string(suffix))))
+ return iter(ast.InternedTerm(strings.HasSuffix(string(s), string(suffix))))
}
func builtinLower(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -363,7 +478,14 @@ func builtinLower(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e
return err
}
- return iter(ast.StringTerm(strings.ToLower(string(s))))
+ arg := string(s)
+ low := strings.ToLower(arg)
+
+ if arg == low {
+ return iter(operands[0])
+ }
+
+ return iter(ast.InternedTerm(low))
}
func builtinUpper(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -372,7 +494,14 @@ func builtinUpper(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e
return err
}
- return iter(ast.StringTerm(strings.ToUpper(string(s))))
+ arg := string(s)
+ upp := strings.ToUpper(arg)
+
+ if arg == upp {
+ return iter(operands[0])
+ }
+
+ return iter(ast.InternedTerm(upp))
}
func builtinSplit(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -380,16 +509,18 @@ func builtinSplit(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e
if err != nil {
return err
}
+
d, err := builtins.StringOperand(operands[1].Value, 2)
if err != nil {
return err
}
- elems := strings.Split(string(s), string(d))
- arr := make([]*ast.Term, len(elems))
- for i := range elems {
- arr[i] = ast.StringTerm(elems[i])
+
+ text, delim := string(s), string(d)
+ if !strings.Contains(text, delim) {
+ return iter(ast.ArrayTerm(operands[0]))
}
- return iter(ast.ArrayTerm(arr...))
+
+ return iter(ast.ArrayTerm(util.SplitMap(text, delim, ast.InternedTerm)...))
}
func builtinReplace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -408,7 +539,12 @@ func builtinReplace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term)
return err
}
- return iter(ast.StringTerm(strings.Replace(string(s), string(old), string(n), -1)))
+ replaced := strings.ReplaceAll(string(s), string(old), string(n))
+ if replaced == string(s) {
+ return iter(operands[0])
+ }
+
+ return iter(ast.InternedTerm(replaced))
}
func builtinReplaceN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -437,14 +573,8 @@ func builtinReplaceN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term
}
oldnewArr = append(oldnewArr, string(keyVal), string(strVal))
}
- if err != nil {
- return err
- }
-
- r := strings.NewReplacer(oldnewArr...)
- replaced := r.Replace(string(s))
- return iter(ast.StringTerm(replaced))
+ return iter(ast.InternedTerm(strings.NewReplacer(oldnewArr...).Replace(string(s))))
}
func builtinTrim(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -458,7 +588,12 @@ func builtinTrim(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) er
return err
}
- return iter(ast.StringTerm(strings.Trim(string(s), string(c))))
+ trimmed := strings.Trim(string(s), string(c))
+ if trimmed == string(s) {
+ return iter(operands[0])
+ }
+
+ return iter(ast.InternedTerm(strings.Trim(string(s), string(c))))
}
func builtinTrimLeft(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -472,7 +607,12 @@ func builtinTrimLeft(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term
return err
}
- return iter(ast.StringTerm(strings.TrimLeft(string(s), string(c))))
+ trimmed := strings.TrimLeft(string(s), string(c))
+ if trimmed == string(s) {
+ return iter(operands[0])
+ }
+
+ return iter(ast.InternedTerm(trimmed))
}
func builtinTrimPrefix(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -486,7 +626,12 @@ func builtinTrimPrefix(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te
return err
}
- return iter(ast.StringTerm(strings.TrimPrefix(string(s), string(pre))))
+ trimmed := strings.TrimPrefix(string(s), string(pre))
+ if trimmed == string(s) {
+ return iter(operands[0])
+ }
+
+ return iter(ast.InternedTerm(trimmed))
}
func builtinTrimRight(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -500,7 +645,12 @@ func builtinTrimRight(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter
return err
}
- return iter(ast.StringTerm(strings.TrimRight(string(s), string(c))))
+ trimmed := strings.TrimRight(string(s), string(c))
+ if trimmed == string(s) {
+ return iter(operands[0])
+ }
+
+ return iter(ast.InternedTerm(trimmed))
}
func builtinTrimSuffix(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -514,7 +664,12 @@ func builtinTrimSuffix(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te
return err
}
- return iter(ast.StringTerm(strings.TrimSuffix(string(s), string(suf))))
+ trimmed := strings.TrimSuffix(string(s), string(suf))
+ if trimmed == string(s) {
+ return iter(operands[0])
+ }
+
+ return iter(ast.InternedTerm(trimmed))
}
func builtinTrimSpace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -523,7 +678,12 @@ func builtinTrimSpace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter
return err
}
- return iter(ast.StringTerm(strings.TrimSpace(string(s))))
+ trimmed := strings.TrimSpace(string(s))
+ if trimmed == string(s) {
+ return iter(operands[0])
+ }
+
+ return iter(ast.InternedTerm(trimmed))
}
func builtinSprintf(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -537,7 +697,20 @@ func builtinSprintf(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term)
return builtins.NewOperandTypeErr(2, operands[1].Value, "array")
}
- args := make([]interface{}, astArr.Len())
+ // Optimized path for where sprintf is used as a "to_string" function for
+ // a single integer, i.e. sprintf("%d", [x]) where x is an integer.
+ if s == "%d" && astArr.Len() == 1 {
+ if n, ok := astArr.Elem(0).Value.(ast.Number); ok {
+ if i, ok := n.Int(); ok {
+ if interned := ast.InternedIntegerString(i); interned != nil {
+ return iter(interned)
+ }
+ return iter(ast.StringTerm(strconv.Itoa(i)))
+ }
+ }
+ }
+
+ args := make([]any, astArr.Len())
for i := range args {
switch v := astArr.Elem(i).Value.(type) {
@@ -558,7 +731,7 @@ func builtinSprintf(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term)
}
}
- return iter(ast.StringTerm(fmt.Sprintf(string(s), args...)))
+ return iter(ast.InternedTerm(fmt.Sprintf(string(s), args...)))
}
func builtinReverse(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -567,19 +740,27 @@ func builtinReverse(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term)
return err
}
- return iter(ast.StringTerm(reverseString(string(s))))
+ return iter(ast.InternedTerm(reverseString(string(s))))
}
func reverseString(str string) string {
- sRunes := []rune(str)
- length := len(sRunes)
- reversedRunes := make([]rune, length)
+ var buf []byte
+ var arr [255]byte
+ size := len(str)
+
+ if size < 255 {
+ buf = arr[:size:size]
+ } else {
+ buf = make([]byte, size)
+ }
- for index, r := range sRunes {
- reversedRunes[length-index-1] = r
+ for start := 0; start < size; {
+ r, n := utf8.DecodeRuneInString(str[start:])
+ start += n
+ utf8.EncodeRune(buf[size-start:], r)
}
- return string(reversedRunes)
+ return string(buf)
}
func init() {
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/subset.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/subset.go
similarity index 82%
rename from vendor/github.com/open-policy-agent/opa/topdown/subset.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/subset.go
index 7b152a5ef9..d50dc2db77 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/subset.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/subset.go
@@ -5,8 +5,8 @@
package topdown
import (
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
func bothObjects(t1, t2 *ast.Term) (bool, ast.Object, ast.Object) {
@@ -88,9 +88,8 @@ func arraySet(t1, t2 *ast.Term) (bool, *ast.Array, ast.Set) {
// associated with a key.
func objectSubset(super ast.Object, sub ast.Object) bool {
var superTerm *ast.Term
- isSubset := true
- sub.Until(func(key, subTerm *ast.Term) bool {
+ notSubset := sub.Until(func(key, subTerm *ast.Term) bool {
// This really wants to be a for loop, hence the somewhat
// weird internal structure. However, using Until() in this
// was is a performance optimization, as it avoids performing
@@ -98,10 +97,9 @@ func objectSubset(super ast.Object, sub ast.Object) bool {
superTerm = super.Get(key)
- // subTerm is can't be nil because we got it from Until(), so
+ // subTerm can't be nil because we got it from Until(), so
// we only need to verify that super is non-nil.
if superTerm == nil {
- isSubset = false
return true // break, not a subset
}
@@ -114,58 +112,39 @@ func objectSubset(super ast.Object, sub ast.Object) bool {
// them normally. If only one term is an object, then we
// do a normal comparison which will come up false.
if ok, superObj, subObj := bothObjects(superTerm, subTerm); ok {
- if !objectSubset(superObj, subObj) {
- isSubset = false
- return true // break, not a subset
- }
-
- return false // continue
+ return !objectSubset(superObj, subObj)
}
if ok, superSet, subSet := bothSets(superTerm, subTerm); ok {
- if !setSubset(superSet, subSet) {
- isSubset = false
- return true // break, not a subset
- }
-
- return false // continue
+ return !setSubset(superSet, subSet)
}
if ok, superArray, subArray := bothArrays(superTerm, subTerm); ok {
- if !arraySubset(superArray, subArray) {
- isSubset = false
- return true // break, not a subset
- }
-
- return false // continue
+ return !arraySubset(superArray, subArray)
}
// We have already checked for exact equality, as well as for
// all of the types of nested subsets we care about, so if we
// get here it means this isn't a subset.
- isSubset = false
return true // break, not a subset
})
- return isSubset
+ return !notSubset
}
// setSubset implements the subset operation on sets.
//
// Unlike in the object case, this is not recursive, we just compare values
-// using ast.Set.Contains() because we have no well defined way to "match up"
+// using ast.Set.Contains() because we have no well-defined way to "match up"
// objects that are in different sets.
func setSubset(super ast.Set, sub ast.Set) bool {
- isSubset := true
- sub.Until(func(t *ast.Term) bool {
- if !super.Contains(t) {
- isSubset = false
- return true
+ for _, elem := range sub.Slice() {
+ if !super.Contains(elem) {
+ return false
}
- return false
- })
+ }
- return isSubset
+ return true
}
// arraySubset implements the subset operation on arrays.
@@ -197,12 +176,12 @@ func arraySubset(super, sub *ast.Array) bool {
return false
}
- subElem := sub.Elem(subCursor)
superElem := super.Elem(superCursor + subCursor)
if superElem == nil {
return false
}
+ subElem := sub.Elem(subCursor)
if superElem.Value.Compare(subElem.Value) == 0 {
subCursor++
} else {
@@ -237,22 +216,22 @@ func builtinObjectSubset(_ BuiltinContext, operands []*ast.Term, iter func(*ast.
if ok, superObj, subObj := bothObjects(superTerm, subTerm); ok {
// Both operands are objects.
- return iter(ast.BooleanTerm(objectSubset(superObj, subObj)))
+ return iter(ast.InternedTerm(objectSubset(superObj, subObj)))
}
if ok, superSet, subSet := bothSets(superTerm, subTerm); ok {
// Both operands are sets.
- return iter(ast.BooleanTerm(setSubset(superSet, subSet)))
+ return iter(ast.InternedTerm(setSubset(superSet, subSet)))
}
if ok, superArray, subArray := bothArrays(superTerm, subTerm); ok {
// Both operands are sets.
- return iter(ast.BooleanTerm(arraySubset(superArray, subArray)))
+ return iter(ast.InternedTerm(arraySubset(superArray, subArray)))
}
if ok, superArray, subSet := arraySet(superTerm, subTerm); ok {
// Super operand is array and sub operand is set
- return iter(ast.BooleanTerm(arraySetSubset(superArray, subSet)))
+ return iter(ast.InternedTerm(arraySetSubset(superArray, subSet)))
}
return builtins.ErrOperand("both arguments object.subset must be of the same type or array and set")
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/template.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/template.go
similarity index 86%
rename from vendor/github.com/open-policy-agent/opa/topdown/template.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/template.go
index cf42477ee8..29038a6579 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/template.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/template.go
@@ -4,8 +4,8 @@ import (
"bytes"
"text/template"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
func renderTemplate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -19,7 +19,7 @@ func renderTemplate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term)
return err
}
- var templateVariables map[string]interface{}
+ var templateVariables map[string]any
if err := ast.As(templateVariablesTerm, &templateVariables); err != nil {
return err
diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/test.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/test.go
new file mode 100644
index 0000000000..02958d2264
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/test.go
@@ -0,0 +1,30 @@
+// Copyright 2025 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import "github.com/open-policy-agent/opa/v1/ast"
+
+const TestCaseOp Op = "TestCase"
+
+func builtinTestCase(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ e := &Event{
+ Op: TestCaseOp,
+ QueryID: bctx.QueryID,
+ Node: ast.NewExpr([]*ast.Term{
+ ast.NewTerm(ast.InternalTestCase.Ref()),
+ ast.NewTerm(operands[0].Value),
+ }),
+ }
+
+ for _, tracer := range bctx.QueryTracers {
+ tracer.TraceEvent(*e)
+ }
+
+ return iter(ast.BooleanTerm(true))
+}
+
+func init() {
+ RegisterBuiltinFunc(ast.InternalTestCase.Name, builtinTestCase)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/time.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/time.go
similarity index 92%
rename from vendor/github.com/open-policy-agent/opa/topdown/time.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/time.go
index ba3efc75dc..16eae3e0bd 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/time.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/time.go
@@ -6,7 +6,7 @@ package topdown
import (
"encoding/json"
- "fmt"
+ "errors"
"math"
"math/big"
"strconv"
@@ -14,8 +14,8 @@ import (
"time"
_ "time/tzdata" // this is needed to have LoadLocation when no filesystem tzdata is available
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
var tzCache map[string]*time.Location
@@ -29,7 +29,7 @@ var maxDateAllowedForNsConversion = time.Unix(0, math.MaxInt64)
func toSafeUnixNano(t time.Time, iter func(*ast.Term) error) error {
if t.Before(minDateAllowedForNsConversion) || t.After(maxDateAllowedForNsConversion) {
- return fmt.Errorf("time outside of valid range")
+ return errors.New("time outside of valid range")
}
return iter(ast.NewTerm(ast.Number(int64ToJSONNumber(t.UnixNano()))))
@@ -127,8 +127,8 @@ func builtinDate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) er
return err
}
year, month, day := t.Date()
- result := ast.NewArray(ast.IntNumberTerm(year), ast.IntNumberTerm(int(month)), ast.IntNumberTerm(day))
- return iter(ast.NewTerm(result))
+
+ return iter(ast.ArrayTerm(ast.InternedTerm(year), ast.InternedTerm(int(month)), ast.InternedTerm(day)))
}
func builtinClock(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -137,7 +137,7 @@ func builtinClock(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e
return err
}
hour, minute, second := t.Clock()
- result := ast.NewArray(ast.IntNumberTerm(hour), ast.IntNumberTerm(minute), ast.IntNumberTerm(second))
+ result := ast.NewArray(ast.InternedTerm(hour), ast.InternedTerm(minute), ast.InternedTerm(second))
return iter(ast.NewTerm(result))
}
@@ -238,8 +238,8 @@ func builtinDiff(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) er
}
// END REDISTRIBUTION FROM APACHE 2.0 LICENSED PROJECT
- return iter(ast.ArrayTerm(ast.IntNumberTerm(year), ast.IntNumberTerm(month), ast.IntNumberTerm(day),
- ast.IntNumberTerm(hour), ast.IntNumberTerm(min), ast.IntNumberTerm(sec)))
+ return iter(ast.ArrayTerm(ast.InternedTerm(year), ast.InternedTerm(month), ast.InternedTerm(day),
+ ast.InternedTerm(hour), ast.InternedTerm(min), ast.InternedTerm(sec)))
}
func tzTime(a ast.Value) (t time.Time, lay string, err error) {
@@ -313,7 +313,7 @@ func tzTime(a ast.Value) (t time.Time, lay string, err error) {
f := builtins.NumberToFloat(value)
i64, acc := f.Int64()
if acc != big.Exact {
- return time.Time{}, layout, fmt.Errorf("timestamp too big")
+ return time.Time{}, layout, errors.New("timestamp too big")
}
t = time.Unix(0, i64).In(loc)
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/tokens.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/tokens.go
similarity index 62%
rename from vendor/github.com/open-policy-agent/opa/topdown/tokens.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/tokens.go
index 7457f1f15d..aea15dd26a 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/tokens.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/tokens.go
@@ -7,11 +7,13 @@ package topdown
import (
"crypto"
"crypto/ecdsa"
+ "crypto/ed25519"
"crypto/hmac"
"crypto/rsa"
"crypto/sha256"
"crypto/sha512"
"crypto/x509"
+ "encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/pem"
@@ -21,25 +23,15 @@ import (
"math/big"
"strings"
- "github.com/open-policy-agent/opa/ast"
- "github.com/open-policy-agent/opa/internal/jwx/jwa"
- "github.com/open-policy-agent/opa/internal/jwx/jwk"
- "github.com/open-policy-agent/opa/internal/jwx/jws"
- "github.com/open-policy-agent/opa/topdown/builtins"
-)
+ "github.com/lestrrat-go/jwx/v3/jwk"
+ "github.com/lestrrat-go/jwx/v3/jws/jwsbb"
-var (
- jwtEncKey = ast.StringTerm("enc")
- jwtCtyKey = ast.StringTerm("cty")
- jwtIssKey = ast.StringTerm("iss")
- jwtExpKey = ast.StringTerm("exp")
- jwtNbfKey = ast.StringTerm("nbf")
- jwtAudKey = ast.StringTerm("aud")
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/topdown/cache"
)
-const (
- headerJwt = "JWT"
-)
+const headerJwt = "JWT"
// JSONWebToken represent the 3 parts (header, payload & signature) of
//
@@ -85,7 +77,7 @@ func builtinJWTDecode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter
return fmt.Errorf("JWT payload had invalid encoding: %v", err)
}
- if cty := token.decodedHeader.Get(jwtCtyKey); cty != nil {
+ if cty := token.decodedHeader.Get(ast.InternedTerm("cty")); cty != nil {
ctyVal := string(cty.Value.(ast.String))
// It is possible for the contents of a token to be another
// token as a result of nested signing or encryption. To handle
@@ -129,8 +121,8 @@ func builtinJWTDecode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter
}
// Implements RS256 JWT signature verification
-func builtinJWTVerifyRS256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
- result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha256.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error {
+func builtinJWTVerifyRS256(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha256.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error {
return rsa.VerifyPKCS1v15(
publicKey,
crypto.SHA256,
@@ -138,14 +130,14 @@ func builtinJWTVerifyRS256(_ BuiltinContext, operands []*ast.Term, iter func(*as
signature)
})
if err == nil {
- return iter(ast.NewTerm(result))
+ return iter(ast.InternedTerm(result))
}
return err
}
// Implements RS384 JWT signature verification
-func builtinJWTVerifyRS384(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
- result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha512.New384, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error {
+func builtinJWTVerifyRS384(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha512.New384, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error {
return rsa.VerifyPKCS1v15(
publicKey,
crypto.SHA384,
@@ -153,14 +145,14 @@ func builtinJWTVerifyRS384(_ BuiltinContext, operands []*ast.Term, iter func(*as
signature)
})
if err == nil {
- return iter(ast.NewTerm(result))
+ return iter(ast.InternedTerm(result))
}
return err
}
// Implements RS512 JWT signature verification
-func builtinJWTVerifyRS512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
- result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha512.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error {
+func builtinJWTVerifyRS512(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha512.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error {
return rsa.VerifyPKCS1v15(
publicKey,
crypto.SHA512,
@@ -168,14 +160,14 @@ func builtinJWTVerifyRS512(_ BuiltinContext, operands []*ast.Term, iter func(*as
signature)
})
if err == nil {
- return iter(ast.NewTerm(result))
+ return iter(ast.InternedTerm(result))
}
return err
}
// Implements PS256 JWT signature verification
-func builtinJWTVerifyPS256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
- result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha256.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error {
+func builtinJWTVerifyPS256(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha256.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error {
return rsa.VerifyPSS(
publicKey,
crypto.SHA256,
@@ -184,14 +176,14 @@ func builtinJWTVerifyPS256(_ BuiltinContext, operands []*ast.Term, iter func(*as
nil)
})
if err == nil {
- return iter(ast.NewTerm(result))
+ return iter(ast.InternedTerm(result))
}
return err
}
// Implements PS384 JWT signature verification
-func builtinJWTVerifyPS384(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
- result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha512.New384, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error {
+func builtinJWTVerifyPS384(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha512.New384, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error {
return rsa.VerifyPSS(
publicKey,
crypto.SHA384,
@@ -200,14 +192,14 @@ func builtinJWTVerifyPS384(_ BuiltinContext, operands []*ast.Term, iter func(*as
nil)
})
if err == nil {
- return iter(ast.NewTerm(result))
+ return iter(ast.InternedTerm(result))
}
return err
}
// Implements PS512 JWT signature verification
-func builtinJWTVerifyPS512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
- result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha512.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error {
+func builtinJWTVerifyPS512(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha512.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error {
return rsa.VerifyPSS(
publicKey,
crypto.SHA512,
@@ -216,50 +208,50 @@ func builtinJWTVerifyPS512(_ BuiltinContext, operands []*ast.Term, iter func(*as
nil)
})
if err == nil {
- return iter(ast.NewTerm(result))
+ return iter(ast.InternedTerm(result))
}
return err
}
// Implements RSA JWT signature verification.
-func builtinJWTVerifyRSA(a ast.Value, b ast.Value, hasher func() hash.Hash, verify func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error) (ast.Value, error) {
- return builtinJWTVerify(a, b, hasher, func(publicKey interface{}, digest []byte, signature []byte) error {
+func builtinJWTVerifyRSA(bctx BuiltinContext, jwt ast.Value, keyStr ast.Value, hasher func() hash.Hash, verify func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error) (bool, error) {
+ return builtinJWTVerify(bctx, jwt, keyStr, hasher, func(publicKey any, digest []byte, signature []byte) error {
publicKeyRsa, ok := publicKey.(*rsa.PublicKey)
if !ok {
- return fmt.Errorf("incorrect public key type")
+ return errors.New("incorrect public key type")
}
return verify(publicKeyRsa, digest, signature)
})
}
// Implements ES256 JWT signature verification.
-func builtinJWTVerifyES256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
- result, err := builtinJWTVerify(operands[0].Value, operands[1].Value, sha256.New, verifyES)
+func builtinJWTVerifyES256(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := builtinJWTVerify(bctx, operands[0].Value, operands[1].Value, sha256.New, verifyES)
if err == nil {
- return iter(ast.NewTerm(result))
+ return iter(ast.InternedTerm(result))
}
return err
}
// Implements ES384 JWT signature verification
-func builtinJWTVerifyES384(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
- result, err := builtinJWTVerify(operands[0].Value, operands[1].Value, sha512.New384, verifyES)
+func builtinJWTVerifyES384(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := builtinJWTVerify(bctx, operands[0].Value, operands[1].Value, sha512.New384, verifyES)
if err == nil {
- return iter(ast.NewTerm(result))
+ return iter(ast.InternedTerm(result))
}
return err
}
// Implements ES512 JWT signature verification
-func builtinJWTVerifyES512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
- result, err := builtinJWTVerify(operands[0].Value, operands[1].Value, sha512.New, verifyES)
+func builtinJWTVerifyES512(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := builtinJWTVerify(bctx, operands[0].Value, operands[1].Value, sha512.New, verifyES)
if err == nil {
- return iter(ast.NewTerm(result))
+ return iter(ast.InternedTerm(result))
}
return err
}
-func verifyES(publicKey interface{}, digest []byte, signature []byte) (err error) {
+func verifyES(publicKey any, digest []byte, signature []byte) (err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("ECDSA signature verification error: %v", r)
@@ -267,7 +259,7 @@ func verifyES(publicKey interface{}, digest []byte, signature []byte) (err error
}()
publicKeyEcdsa, ok := publicKey.(*ecdsa.PublicKey)
if !ok {
- return fmt.Errorf("incorrect public key type")
+ return errors.New("incorrect public key type")
}
r, s := &big.Int{}, &big.Int{}
n := len(signature) / 2
@@ -276,13 +268,38 @@ func verifyES(publicKey interface{}, digest []byte, signature []byte) (err error
if ecdsa.Verify(publicKeyEcdsa, digest, r, s) {
return nil
}
- return fmt.Errorf("ECDSA signature verification error")
+ return errors.New("ECDSA signature verification error")
+}
+
+// Implements EdDSA JWT signature verification
+func builtinJWTVerifyEdDSA(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := builtinJWTVerify(bctx, operands[0].Value, operands[1].Value, nil, verifyEd25519)
+ if err == nil {
+ return iter(ast.InternedTerm(result))
+ }
+ return err
+}
+
+func verifyEd25519(publicKey any, digest []byte, signature []byte) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("EdDSA signature verification error: %v", r)
+ }
+ }()
+ publicKeyEcdsa, ok := publicKey.(ed25519.PublicKey)
+ if !ok {
+ return errors.New("incorrect public key type")
+ }
+ if ed25519.Verify(publicKeyEcdsa, digest, signature) {
+ return nil
+ }
+ return errors.New("ECDSA signature verification error")
}
type verificationKey struct {
alg string
kid string
- key interface{}
+ key any
}
// getKeysFromCertOrJWK returns the public key found in a X.509 certificate or JWK key(s).
@@ -291,7 +308,7 @@ type verificationKey struct {
func getKeysFromCertOrJWK(certificate string) ([]verificationKey, error) {
if block, rest := pem.Decode([]byte(certificate)); block != nil {
if len(rest) > 0 {
- return nil, fmt.Errorf("extra data after a PEM certificate block")
+ return nil, errors.New("extra data after a PEM certificate block")
}
if block.Type == blockTypeCertificate {
@@ -311,7 +328,7 @@ func getKeysFromCertOrJWK(certificate string) ([]verificationKey, error) {
return []verificationKey{{key: key}}, nil
}
- return nil, fmt.Errorf("failed to extract a Key from the PEM certificate")
+ return nil, errors.New("failed to extract a Key from the PEM certificate")
}
jwks, err := jwk.ParseString(certificate)
@@ -319,15 +336,36 @@ func getKeysFromCertOrJWK(certificate string) ([]verificationKey, error) {
return nil, fmt.Errorf("failed to parse a JWK key (set): %w", err)
}
- keys := make([]verificationKey, 0, len(jwks.Keys))
- for _, k := range jwks.Keys {
- key, err := k.Materialize()
- if err != nil {
+ keys := make([]verificationKey, 0, jwks.Len())
+ for i := range jwks.Len() {
+ k, ok := jwks.Key(i)
+ if !ok {
+ continue
+ }
+ var key any
+ if err := jwk.Export(k, &key); err != nil {
return nil, err
}
+ var alg string
+ if algInterface, ok := k.Algorithm(); ok {
+ alg = algInterface.String()
+ }
+
+ // Skip keys with unknown/unsupported algorithms
+ if alg != "" {
+ if _, ok := tokenAlgorithms[alg]; !ok {
+ continue
+ }
+ }
+
+ var kid string
+ if kidValue, ok := k.KeyID(); ok {
+ kid = kidValue
+ }
+
keys = append(keys, verificationKey{
- alg: k.GetAlgorithm().String(),
- kid: k.GetKeyID(),
+ alg: alg,
+ kid: kid,
key: key,
})
}
@@ -345,34 +383,43 @@ func getKeyByKid(kid string, keys []verificationKey) *verificationKey {
}
// Implements JWT signature verification.
-func builtinJWTVerify(a ast.Value, b ast.Value, hasher func() hash.Hash, verify func(publicKey interface{}, digest []byte, signature []byte) error) (ast.Value, error) {
- token, err := decodeJWT(a)
+func builtinJWTVerify(bctx BuiltinContext, jwt ast.Value, keyStr ast.Value, hasher func() hash.Hash, verify func(publicKey any, digest []byte, signature []byte) error) (bool, error) {
+ if found, _, _, valid := getTokenFromCache(bctx, jwt, keyStr); found {
+ return valid, nil
+ }
+
+ token, err := decodeJWT(jwt)
if err != nil {
- return nil, err
+ return false, err
}
- s, err := builtins.StringOperand(b, 2)
+ s, err := builtins.StringOperand(keyStr, 2)
if err != nil {
- return nil, err
+ return false, err
}
keys, err := getKeysFromCertOrJWK(string(s))
if err != nil {
- return nil, err
+ return false, err
}
signature, err := token.decodeSignature()
if err != nil {
- return nil, err
+ return false, err
}
err = token.decodeHeader()
if err != nil {
- return nil, err
+ return false, err
}
header, err := parseTokenHeader(token)
if err != nil {
- return nil, err
+ return false, err
+ }
+
+ done := func(valid bool) (bool, error) {
+ putTokenInCache(bctx, jwt, keyStr, nil, nil, valid)
+ return valid, nil
}
// Validate the JWT signature
@@ -383,7 +430,7 @@ func builtinJWTVerify(a ast.Value, b ast.Value, hasher func() hash.Hash, verify
if key := getKeyByKid(header.kid, keys); key != nil {
err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), []byte(signature))
- return ast.Boolean(err == nil), nil
+ return done(err == nil)
}
}
@@ -395,7 +442,7 @@ func builtinJWTVerify(a ast.Value, b ast.Value, hasher func() hash.Hash, verify
// we'll need to verify to find out
err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), []byte(signature))
if err == nil {
- return ast.Boolean(true), nil
+ return done(true)
}
} else {
if header.alg != key.alg {
@@ -403,48 +450,32 @@ func builtinJWTVerify(a ast.Value, b ast.Value, hasher func() hash.Hash, verify
}
err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), []byte(signature))
if err == nil {
- return ast.Boolean(true), nil
+ return done(true)
}
}
}
// None of the keys worked, return false
- return ast.Boolean(false), nil
+ return done(false)
}
// Implements HS256 (secret) JWT signature verification
-func builtinJWTVerifyHS256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
- // Decode the JSON Web Token
- token, err := decodeJWT(operands[0].Value)
- if err != nil {
- return err
- }
-
- // Process Secret input
- astSecret, err := builtins.StringOperand(operands[1].Value, 2)
- if err != nil {
- return err
- }
- secret := string(astSecret)
-
- mac := hmac.New(sha256.New, []byte(secret))
- _, err = mac.Write([]byte(token.header + "." + token.payload))
- if err != nil {
- return err
- }
+func builtinJWTVerifyHS256(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ return builtinJWTVerifyHS(bctx, operands, sha256.New, iter)
+}
- signature, err := token.decodeSignature()
- if err != nil {
- return err
- }
+// Implements HS384 JWT signature verification
+func builtinJWTVerifyHS384(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ return builtinJWTVerifyHS(bctx, operands, sha512.New384, iter)
+}
- return iter(ast.NewTerm(ast.Boolean(hmac.Equal([]byte(signature), mac.Sum(nil)))))
+// Implements HS512 JWT signature verification
+func builtinJWTVerifyHS512(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ return builtinJWTVerifyHS(bctx, operands, sha512.New, iter)
}
-// Implements HS384 JWT signature verification
-func builtinJWTVerifyHS384(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
- // Decode the JSON Web Token
- token, err := decodeJWT(operands[0].Value)
+func builtinJWTVerifyHS(bctx BuiltinContext, operands []*ast.Term, hashF func() hash.Hash, iter func(*ast.Term) error) error {
+ jwt, err := builtins.StringOperand(operands[0].Value, 1)
if err != nil {
return err
}
@@ -454,38 +485,20 @@ func builtinJWTVerifyHS384(_ BuiltinContext, operands []*ast.Term, iter func(*as
if err != nil {
return err
}
- secret := string(astSecret)
- mac := hmac.New(sha512.New384, []byte(secret))
- _, err = mac.Write([]byte(token.header + "." + token.payload))
- if err != nil {
- return err
- }
-
- signature, err := token.decodeSignature()
- if err != nil {
- return err
+ if found, _, _, valid := getTokenFromCache(bctx, jwt, astSecret); found {
+ return iter(ast.InternedTerm(valid))
}
- return iter(ast.NewTerm(ast.Boolean(hmac.Equal([]byte(signature), mac.Sum(nil)))))
-}
-
-// Implements HS512 JWT signature verification
-func builtinJWTVerifyHS512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
// Decode the JSON Web Token
- token, err := decodeJWT(operands[0].Value)
+ token, err := decodeJWT(jwt)
if err != nil {
return err
}
- // Process Secret input
- astSecret, err := builtins.StringOperand(operands[1].Value, 2)
- if err != nil {
- return err
- }
secret := string(astSecret)
- mac := hmac.New(sha512.New, []byte(secret))
+ mac := hmac.New(hashF, []byte(secret))
_, err = mac.Write([]byte(token.header + "." + token.payload))
if err != nil {
return err
@@ -496,7 +509,11 @@ func builtinJWTVerifyHS512(_ BuiltinContext, operands []*ast.Term, iter func(*as
return err
}
- return iter(ast.NewTerm(ast.Boolean(hmac.Equal([]byte(signature), mac.Sum(nil)))))
+ valid := hmac.Equal([]byte(signature), mac.Sum(nil))
+
+ putTokenInCache(bctx, jwt, astSecret, nil, nil, valid)
+
+ return iter(ast.InternedTerm(valid))
}
// -- Full JWT verification and decoding --
@@ -553,7 +570,7 @@ var tokenConstraintTypes = map[string]tokenConstraintHandler{
func tokenConstraintCert(value ast.Value, constraints *tokenConstraints) error {
s, ok := value.(ast.String)
if !ok {
- return fmt.Errorf("cert constraint: must be a string")
+ return errors.New("cert constraint: must be a string")
}
keys, err := getKeysFromCertOrJWK(string(s))
@@ -578,14 +595,14 @@ func tokenConstraintTime(value ast.Value, constraints *tokenConstraints) error {
func timeFromValue(value ast.Value) (float64, error) {
time, ok := value.(ast.Number)
if !ok {
- return 0, fmt.Errorf("token time constraint: must be a number")
+ return 0, errors.New("token time constraint: must be a number")
}
timeFloat, ok := time.Float64()
if !ok {
- return 0, fmt.Errorf("token time constraint: unvalid float64")
+ return 0, errors.New("token time constraint: unvalid float64")
}
if timeFloat < 0 {
- return 0, fmt.Errorf("token time constraint: must not be negative")
+ return 0, errors.New("token time constraint: must not be negative")
}
return timeFloat, nil
}
@@ -636,10 +653,10 @@ func (constraints *tokenConstraints) validate() error {
keys++
}
if keys > 1 {
- return fmt.Errorf("duplicate key constraints")
+ return errors.New("duplicate key constraints")
}
if keys < 1 {
- return fmt.Errorf("no key constraint")
+ return errors.New("no key constraint")
}
return nil
}
@@ -647,19 +664,13 @@ func (constraints *tokenConstraints) validate() error {
// verify verifies a JWT using the constraints and the algorithm from the header
func (constraints *tokenConstraints) verify(kid, alg, header, payload, signature string) error {
// Construct the payload
- plaintext := []byte(header)
- plaintext = append(plaintext, []byte(".")...)
- plaintext = append(plaintext, payload...)
- // Look up the algorithm
- a, ok := tokenAlgorithms[alg]
- if !ok {
- return fmt.Errorf("unknown JWS algorithm: %s", alg)
- }
+ plaintext := append(append([]byte(header), '.'), []byte(payload)...)
+
// If we're configured with asymmetric key(s) then only trust that
if constraints.keys != nil {
if kid != "" {
if key := getKeyByKid(kid, constraints.keys); key != nil {
- err := a.verify(key.key, a.hash, plaintext, []byte(signature))
+ err := jwsbb.Verify(key.key, alg, plaintext, []byte(signature))
if err != nil {
return errSignatureNotVerified
}
@@ -670,7 +681,7 @@ func (constraints *tokenConstraints) verify(kid, alg, header, payload, signature
verified := false
for _, key := range constraints.keys {
if key.alg == "" {
- err := a.verify(key.key, a.hash, plaintext, []byte(signature))
+ err := jwsbb.Verify(key.key, alg, plaintext, []byte(signature))
if err == nil {
verified = true
break
@@ -679,7 +690,7 @@ func (constraints *tokenConstraints) verify(kid, alg, header, payload, signature
if alg != key.alg {
continue
}
- err := a.verify(key.key, a.hash, plaintext, []byte(signature))
+ err := jwsbb.Verify(key.key, alg, plaintext, []byte(signature))
if err == nil {
verified = true
break
@@ -693,7 +704,11 @@ func (constraints *tokenConstraints) verify(kid, alg, header, payload, signature
return nil
}
if constraints.secret != "" {
- return a.verify([]byte(constraints.secret), a.hash, plaintext, []byte(signature))
+ err := jwsbb.Verify([]byte(constraints.secret), alg, plaintext, []byte(signature))
+ if err != nil {
+ return errSignatureNotVerified
+ }
+ return nil
}
// (*tokenConstraints)validate() should prevent this happening
return errors.New("unexpectedly found no keys to trust")
@@ -720,101 +735,26 @@ func (constraints *tokenConstraints) validAudience(aud ast.Value) bool {
// JWT algorithms
-type (
- tokenVerifyFunction func(key interface{}, hash crypto.Hash, payload []byte, signature []byte) error
- tokenVerifyAsymmetricFunction func(key interface{}, hash crypto.Hash, digest []byte, signature []byte) error
-)
-
-// jwtAlgorithm describes a JWS 'alg' value
-type tokenAlgorithm struct {
- hash crypto.Hash
- verify tokenVerifyFunction
-}
-
// tokenAlgorithms is the known JWT algorithms
-var tokenAlgorithms = map[string]tokenAlgorithm{
- "RS256": {crypto.SHA256, verifyAsymmetric(verifyRSAPKCS)},
- "RS384": {crypto.SHA384, verifyAsymmetric(verifyRSAPKCS)},
- "RS512": {crypto.SHA512, verifyAsymmetric(verifyRSAPKCS)},
- "PS256": {crypto.SHA256, verifyAsymmetric(verifyRSAPSS)},
- "PS384": {crypto.SHA384, verifyAsymmetric(verifyRSAPSS)},
- "PS512": {crypto.SHA512, verifyAsymmetric(verifyRSAPSS)},
- "ES256": {crypto.SHA256, verifyAsymmetric(verifyECDSA)},
- "ES384": {crypto.SHA384, verifyAsymmetric(verifyECDSA)},
- "ES512": {crypto.SHA512, verifyAsymmetric(verifyECDSA)},
- "HS256": {crypto.SHA256, verifyHMAC},
- "HS384": {crypto.SHA384, verifyHMAC},
- "HS512": {crypto.SHA512, verifyHMAC},
+var tokenAlgorithms = map[string]struct{}{
+ "RS256": {},
+ "RS384": {},
+ "RS512": {},
+ "PS256": {},
+ "PS384": {},
+ "PS512": {},
+ "ES256": {},
+ "ES384": {},
+ "ES512": {},
+ "HS256": {},
+ "HS384": {},
+ "HS512": {},
+ "EdDSA": {},
}
// errSignatureNotVerified is returned when a signature cannot be verified.
var errSignatureNotVerified = errors.New("signature not verified")
-func verifyHMAC(key interface{}, hash crypto.Hash, payload []byte, signature []byte) error {
- macKey, ok := key.([]byte)
- if !ok {
- return fmt.Errorf("incorrect symmetric key type")
- }
- mac := hmac.New(hash.New, macKey)
- if _, err := mac.Write(payload); err != nil {
- return err
- }
- if !hmac.Equal(signature, mac.Sum([]byte{})) {
- return errSignatureNotVerified
- }
- return nil
-}
-
-func verifyAsymmetric(verify tokenVerifyAsymmetricFunction) tokenVerifyFunction {
- return func(key interface{}, hash crypto.Hash, payload []byte, signature []byte) error {
- h := hash.New()
- h.Write(payload)
- return verify(key, hash, h.Sum([]byte{}), signature)
- }
-}
-
-func verifyRSAPKCS(key interface{}, hash crypto.Hash, digest []byte, signature []byte) error {
- publicKeyRsa, ok := key.(*rsa.PublicKey)
- if !ok {
- return fmt.Errorf("incorrect public key type")
- }
- if err := rsa.VerifyPKCS1v15(publicKeyRsa, hash, digest, signature); err != nil {
- return errSignatureNotVerified
- }
- return nil
-}
-
-func verifyRSAPSS(key interface{}, hash crypto.Hash, digest []byte, signature []byte) error {
- publicKeyRsa, ok := key.(*rsa.PublicKey)
- if !ok {
- return fmt.Errorf("incorrect public key type")
- }
- if err := rsa.VerifyPSS(publicKeyRsa, hash, digest, signature, nil); err != nil {
- return errSignatureNotVerified
- }
- return nil
-}
-
-func verifyECDSA(key interface{}, _ crypto.Hash, digest []byte, signature []byte) (err error) {
- defer func() {
- if r := recover(); r != nil {
- err = fmt.Errorf("ECDSA signature verification error: %v", r)
- }
- }()
- publicKeyEcdsa, ok := key.(*ecdsa.PublicKey)
- if !ok {
- return fmt.Errorf("incorrect public key type")
- }
- r, s := &big.Int{}, &big.Int{}
- n := len(signature) / 2
- r.SetBytes(signature[:n])
- s.SetBytes(signature[n:])
- if ecdsa.Verify(publicKeyEcdsa, digest, r, s) {
- return nil
- }
- return errSignatureNotVerified
-}
-
// JWT header parsing and parameters. See tokens_test.go for unit tests.
// tokenHeaderType represents a recognized JWT header field
@@ -852,19 +792,19 @@ var tokenHeaderTypes = map[string]tokenHeaderHandler{
func tokenHeaderCrit(header *tokenHeader, value ast.Value) error {
v, ok := value.(*ast.Array)
if !ok {
- return fmt.Errorf("crit: must be a list")
+ return errors.New("crit: must be a list")
}
header.crit = map[string]bool{}
_ = v.Iter(func(elem *ast.Term) error {
tv, ok := elem.Value.(ast.String)
if !ok {
- return fmt.Errorf("crit: must be a list of strings")
+ return errors.New("crit: must be a list of strings")
}
header.crit[string(tv)] = true
return nil
})
if len(header.crit) == 0 {
- return fmt.Errorf("crit: must be a nonempty list") // 'MUST NOT' use the empty list
+ return errors.New("crit: must be a nonempty list") // 'MUST NOT' use the empty list
}
return nil
}
@@ -913,42 +853,48 @@ func (header *tokenHeader) valid() bool {
return true
}
-func commonBuiltinJWTEncodeSign(bctx BuiltinContext, inputHeaders, jwsPayload, jwkSrc string, iter func(*ast.Term) error) error {
- keys, err := jwk.ParseString(jwkSrc)
+func commonBuiltinJWTEncodeSign(bctx BuiltinContext, inputHeaders, jwsPayload, jwkSrc []byte, iter func(*ast.Term) error) error {
+ keys, err := jwk.Parse(jwkSrc)
if err != nil {
return err
}
- key, err := keys.Keys[0].Materialize()
- if err != nil {
- return err
+
+ if keys.Len() == 0 {
+ return errors.New("no keys found in JWK set")
}
- if jwk.GetKeyTypeFromKey(key) != keys.Keys[0].GetKeyType() {
- return fmt.Errorf("JWK derived key type and keyType parameter do not match")
+
+ key, ok := keys.Key(0)
+ if !ok {
+ return errors.New("failed to get first key from JWK set")
}
- standardHeaders := &jws.StandardHeaders{}
- jwsHeaders := []byte(inputHeaders)
- err = json.Unmarshal(jwsHeaders, standardHeaders)
+ // Parse headers to get algorithm.
+ headers := jwsbb.HeaderParse(inputHeaders)
+ algStr, err := jwsbb.HeaderGetString(headers, "alg")
if err != nil {
- return err
+ return fmt.Errorf("missing or invalid 'alg' header: %w", err)
}
- alg := standardHeaders.GetAlgorithm()
- if alg == jwa.Unsupported {
- return fmt.Errorf("unknown signature algorithm")
+ // Make sure the algorithm is supported.
+ _, ok = tokenAlgorithms[algStr]
+ if !ok {
+ return fmt.Errorf("unknown JWS algorithm: %s", algStr)
}
- if (standardHeaders.Type == "" || standardHeaders.Type == headerJwt) && !json.Valid([]byte(jwsPayload)) {
- return fmt.Errorf("type is JWT but payload is not JSON")
+ typ, err := jwsbb.HeaderGetString(headers, "typ")
+ if (err != nil || typ == headerJwt) && !json.Valid(jwsPayload) {
+ return errors.New("type is JWT but payload is not JSON")
}
- // process payload and sign
- var jwsCompact []byte
- jwsCompact, err = jws.SignLiteral([]byte(jwsPayload), alg, key, jwsHeaders, bctx.Seed)
+ payload := jwsbb.SignBuffer(nil, inputHeaders, jwsPayload, base64.RawURLEncoding, true)
+
+ signature, err := jwsbb.Sign(key, algStr, payload, bctx.Seed)
if err != nil {
return err
}
- return iter(ast.StringTerm(string(jwsCompact)))
+ jwsCompact := string(payload) + "." + base64.RawURLEncoding.EncodeToString(signature)
+
+ return iter(ast.StringTerm(jwsCompact))
}
func builtinJWTEncodeSign(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
@@ -984,9 +930,9 @@ func builtinJWTEncodeSign(bctx BuiltinContext, operands []*ast.Term, iter func(*
return commonBuiltinJWTEncodeSign(
bctx,
- string(inputHeadersBs),
- string(payloadBs),
- string(signatureBs),
+ inputHeadersBs,
+ payloadBs,
+ signatureBs,
iter,
)
}
@@ -1004,7 +950,7 @@ func builtinJWTEncodeSignRaw(bctx BuiltinContext, operands []*ast.Term, iter fun
if err != nil {
return err
}
- return commonBuiltinJWTEncodeSign(bctx, string(inputHeaders), string(jwsPayload), string(jwkSrc), iter)
+ return commonBuiltinJWTEncodeSign(bctx, []byte(inputHeaders), []byte(jwsPayload), []byte(jwkSrc), iter)
}
// Implements full JWT decoding, validation and verification.
@@ -1024,9 +970,9 @@ func builtinJWTDecodeVerify(bctx BuiltinContext, operands []*ast.Term, iter func
}
unverified := ast.ArrayTerm(
- ast.BooleanTerm(false),
- ast.NewTerm(ast.NewObject()),
- ast.NewTerm(ast.NewObject()),
+ ast.InternedTerm(false),
+ ast.InternedEmptyObject,
+ ast.InternedEmptyObject,
)
constraints, err := parseTokenConstraints(b, bctx.Time)
if err != nil {
@@ -1036,61 +982,119 @@ func builtinJWTDecodeVerify(bctx BuiltinContext, operands []*ast.Term, iter func
return err
}
var token *JSONWebToken
- var p *ast.Term
- for {
- // RFC7519 7.2 #1-2 split into parts
- if token, err = decodeJWT(a); err != nil {
- return err
- }
- // RFC7519 7.2 #3, #4, #6
- if err := token.decodeHeader(); err != nil {
- return err
- }
- // RFC7159 7.2 #5 (and RFC7159 5.2 #5) validate header fields
- header, err := parseTokenHeader(token)
- if err != nil {
- return err
- }
- if !header.valid() {
- return iter(unverified)
- }
- // Check constraints that impact signature verification.
- if constraints.alg != "" && constraints.alg != header.alg {
+ var payload ast.Object
+ var header ast.Object
+
+ // FIXME: optimize
+ k, _ := b.Filter(ast.NewObject(
+ ast.Item(ast.InternedTerm("secret"), ast.InternedEmptyObject),
+ ast.Item(ast.InternedTerm("cert"), ast.InternedEmptyObject),
+ ))
+
+ if found, th, tp, validSignature := getTokenFromCache(bctx, a, k); found {
+ if !validSignature {
+ // For the given token and key(s), the signature is invalid
return iter(unverified)
}
- // RFC7159 7.2 #7 verify the signature
- signature, err := token.decodeSignature()
- if err != nil {
- return err
+
+ if th != nil && tp != nil {
+ header = th
+ payload = tp
+ } else {
+ // Cache entry was created by one of the other built-ins that doesn't decode header/payload
+
+ if token, err = decodeJWT(a); err != nil {
+ return err
+ }
+
+ header = token.decodedHeader
+
+ p, err := getResult(builtinBase64UrlDecode, ast.StringTerm(token.payload))
+ if err != nil {
+ return fmt.Errorf("JWT payload had invalid encoding: %v", err)
+ }
+
+ payload, err = extractJSONObject(string(p.Value.(ast.String)))
+ if err != nil {
+ return err
+ }
+
+ putTokenInCache(bctx, a, k, header, payload, true)
}
- if err := constraints.verify(header.kid, header.alg, token.header, token.payload, signature); err != nil {
- if err == errSignatureNotVerified {
+ } else {
+ var p *ast.Term
+
+ for {
+ // RFC7519 7.2 #1-2 split into parts
+ if token, err = decodeJWT(a); err != nil {
+ return err
+ }
+
+ // RFC7519 7.2 #3, #4, #6
+ if err := token.decodeHeader(); err != nil {
+ return err
+ }
+
+ // RFC7159 7.2 #5 (and RFC7159 5.2 #5) validate header fields
+ header, err := parseTokenHeader(token)
+ if err != nil {
+ return err
+ }
+
+ if !header.valid() {
return iter(unverified)
}
- return err
+
+ // Check constraints that impact signature verification.
+ if constraints.alg != "" && constraints.alg != header.alg {
+ return iter(unverified)
+ }
+
+ // RFC7159 7.2 #7 verify the signature
+ signature, err := token.decodeSignature()
+ if err != nil {
+ return err
+ }
+
+ if err := constraints.verify(header.kid, header.alg, token.header, token.payload, signature); err != nil {
+ if err == errSignatureNotVerified {
+ putTokenInCache(bctx, a, k, nil, nil, false)
+ return iter(unverified)
+ }
+ return err
+ }
+
+ // RFC7159 7.2 #9-10 decode the payload
+ p, err = getResult(builtinBase64UrlDecode, ast.StringTerm(token.payload))
+ if err != nil {
+ return fmt.Errorf("JWT payload had invalid encoding: %v", err)
+ }
+
+ // RFC7159 7.2 #8 and 5.2 cty
+ if strings.EqualFold(header.cty, headerJwt) {
+ // Nested JWT, go round again with payload as first argument
+ a = p.Value
+ continue
+ }
+
+ // Non-nested JWT (or we've reached the bottom of the nesting).
+ break
}
- // RFC7159 7.2 #9-10 decode the payload
- p, err = getResult(builtinBase64UrlDecode, ast.StringTerm(token.payload))
+
+ payload, err = extractJSONObject(string(p.Value.(ast.String)))
if err != nil {
- return fmt.Errorf("JWT payload had invalid encoding: %v", err)
- }
- // RFC7159 7.2 #8 and 5.2 cty
- if strings.ToUpper(header.cty) == headerJwt {
- // Nested JWT, go round again with payload as first argument
- a = p.Value
- continue
+ return err
}
- // Non-nested JWT (or we've reached the bottom of the nesting).
- break
- }
- payload, err := extractJSONObject(string(p.Value.(ast.String)))
- if err != nil {
- return err
+
+ header = token.decodedHeader
+
+ putTokenInCache(bctx, a, k, header, payload, true)
}
+
// Check registered claim names against constraints or environment
// RFC7159 4.1.1 iss
if constraints.iss != "" {
- if iss := payload.Get(jwtIssKey); iss != nil {
+ if iss := payload.Get(ast.InternedTerm("iss")); iss != nil {
issVal := string(iss.Value.(ast.String))
if constraints.iss != issVal {
return iter(unverified)
@@ -1100,7 +1104,7 @@ func builtinJWTDecodeVerify(bctx BuiltinContext, operands []*ast.Term, iter func
}
}
// RFC7159 4.1.3 aud
- if aud := payload.Get(jwtAudKey); aud != nil {
+ if aud := payload.Get(ast.InternedTerm("aud")); aud != nil {
if !constraints.validAudience(aud.Value) {
return iter(unverified)
}
@@ -1110,35 +1114,35 @@ func builtinJWTDecodeVerify(bctx BuiltinContext, operands []*ast.Term, iter func
}
}
// RFC7159 4.1.4 exp
- if exp := payload.Get(jwtExpKey); exp != nil {
- switch exp.Value.(type) {
+ if exp := payload.Get(ast.InternedTerm("exp")); exp != nil {
+ switch v := exp.Value.(type) {
case ast.Number:
// constraints.time is in nanoseconds but exp Value is in seconds
compareTime := ast.FloatNumberTerm(constraints.time / 1000000000)
- if ast.Compare(compareTime, exp.Value.(ast.Number)) != -1 {
+ if ast.Compare(compareTime, v) != -1 {
return iter(unverified)
}
default:
- return fmt.Errorf("exp value must be a number")
+ return errors.New("exp value must be a number")
}
}
// RFC7159 4.1.5 nbf
- if nbf := payload.Get(jwtNbfKey); nbf != nil {
- switch nbf.Value.(type) {
+ if nbf := payload.Get(ast.InternedTerm("nbf")); nbf != nil {
+ switch v := nbf.Value.(type) {
case ast.Number:
// constraints.time is in nanoseconds but nbf Value is in seconds
compareTime := ast.FloatNumberTerm(constraints.time / 1000000000)
- if ast.Compare(compareTime, nbf.Value.(ast.Number)) == -1 {
+ if ast.Compare(compareTime, v) == -1 {
return iter(unverified)
}
default:
- return fmt.Errorf("nbf value must be a number")
+ return errors.New("nbf value must be a number")
}
}
verified := ast.ArrayTerm(
- ast.BooleanTerm(true),
- ast.NewTerm(token.decodedHeader),
+ ast.InternedTerm(true),
+ ast.NewTerm(header),
ast.NewTerm(payload),
)
return iter(verified)
@@ -1191,7 +1195,7 @@ func validateJWTHeader(h string) (ast.Object, error) {
// won't support it for now.
// This code checks which kind of JWT we are dealing with according to
// RFC 7516 Section 9: https://tools.ietf.org/html/rfc7516#section-9
- if header.Get(jwtEncKey) != nil {
+ if header.Get(ast.InternedTerm("enc")) != nil {
return nil, errors.New("JWT is a JWE object, which is not supported")
}
@@ -1221,12 +1225,72 @@ func extractJSONObject(s string) (ast.Object, error) {
// getInputSha returns the SHA checksum of the input
func getInputSHA(input []byte, h func() hash.Hash) []byte {
+ if h == nil {
+ return input
+ }
+
hasher := h()
hasher.Write(input)
return hasher.Sum(nil)
}
+type jwtCacheEntry struct {
+ payload ast.Object
+ header ast.Object
+ validSignature bool
+}
+
+const tokenCacheName = "io_jwt"
+
+func getTokenFromCache(bctx BuiltinContext, serializedJwt ast.Value, publicKey ast.Value) (bool, ast.Object, ast.Object, bool) {
+ if bctx.InterQueryBuiltinValueCache == nil {
+ return false, nil, nil, false
+ }
+
+ c := bctx.InterQueryBuiltinValueCache.GetCache(tokenCacheName)
+ if c == nil {
+ return false, nil, nil, false
+ }
+
+ key := createTokenCacheKey(serializedJwt, publicKey)
+
+ entry, ok := c.Get(key)
+ if !ok {
+ return false, nil, nil, false
+ }
+
+ if jwtEntry, ok := entry.(jwtCacheEntry); ok {
+ return true, jwtEntry.header, jwtEntry.payload, jwtEntry.validSignature
+ }
+
+ return false, nil, nil, false
+}
+
+func putTokenInCache(bctx BuiltinContext, serializedJwt ast.Value, publicKey ast.Value, header ast.Object, payload ast.Object, validSignature bool) {
+ if bctx.InterQueryBuiltinValueCache == nil {
+ return
+ }
+
+ c := bctx.InterQueryBuiltinValueCache.GetCache(tokenCacheName)
+ if c == nil {
+ return
+ }
+
+ key := createTokenCacheKey(serializedJwt, publicKey)
+
+ c.Insert(key, jwtCacheEntry{header: header, payload: payload, validSignature: validSignature})
+}
+
+func createTokenCacheKey(serializedJwt ast.Value, publicKey ast.Value) ast.Value {
+ // We need to create a key that is unique to the serialized JWT (for lookup) and the public key used to verify it,
+ // so that we don't get a misleading cached validation result for a different, invalid key.
+ return ast.NewArray(ast.NewTerm(serializedJwt), ast.NewTerm(publicKey))
+}
+
func init() {
+ // By default, the JWT cache is disabled.
+ cache.RegisterDefaultInterQueryBuiltinValueCacheConfig(tokenCacheName, nil)
+
RegisterBuiltinFunc(ast.JWTDecode.Name, builtinJWTDecode)
RegisterBuiltinFunc(ast.JWTVerifyRS256.Name, builtinJWTVerifyRS256)
RegisterBuiltinFunc(ast.JWTVerifyRS384.Name, builtinJWTVerifyRS384)
@@ -1237,6 +1301,7 @@ func init() {
RegisterBuiltinFunc(ast.JWTVerifyES256.Name, builtinJWTVerifyES256)
RegisterBuiltinFunc(ast.JWTVerifyES384.Name, builtinJWTVerifyES384)
RegisterBuiltinFunc(ast.JWTVerifyES512.Name, builtinJWTVerifyES512)
+ RegisterBuiltinFunc(ast.JWTVerifyEdDSA.Name, builtinJWTVerifyEdDSA)
RegisterBuiltinFunc(ast.JWTVerifyHS256.Name, builtinJWTVerifyHS256)
RegisterBuiltinFunc(ast.JWTVerifyHS384.Name, builtinJWTVerifyHS384)
RegisterBuiltinFunc(ast.JWTVerifyHS512.Name, builtinJWTVerifyHS512)
diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/trace.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/trace.go
new file mode 100644
index 0000000000..c9df12b4c5
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/trace.go
@@ -0,0 +1,895 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "slices"
+ "strings"
+
+ iStrs "github.com/open-policy-agent/opa/internal/strings"
+
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
+)
+
+const (
+ minLocationWidth = 5 // len("query")
+ maxIdealLocationWidth = 64
+ columnPadding = 4
+ maxExprVarWidth = 32
+ maxPrettyExprVarWidth = 64
+)
+
+// Op defines the types of tracing events.
+type Op string
+
+const (
+ // EnterOp is emitted when a new query is about to be evaluated.
+ EnterOp Op = "Enter"
+
+ // ExitOp is emitted when a query has evaluated to true.
+ ExitOp Op = "Exit"
+
+ // EvalOp is emitted when an expression is about to be evaluated.
+ EvalOp Op = "Eval"
+
+ // RedoOp is emitted when an expression, rule, or query is being re-evaluated.
+ RedoOp Op = "Redo"
+
+ // SaveOp is emitted when an expression is saved instead of evaluated
+ // during partial evaluation.
+ SaveOp Op = "Save"
+
+ // FailOp is emitted when an expression evaluates to false.
+ FailOp Op = "Fail"
+
+ // DuplicateOp is emitted when a query has produced a duplicate value. The search
+ // will stop at the point where the duplicate was emitted and backtrack.
+ DuplicateOp Op = "Duplicate"
+
+ // NoteOp is emitted when an expression invokes a tracing built-in function.
+ NoteOp Op = "Note"
+
+ // IndexOp is emitted during an expression evaluation to represent lookup
+ // matches.
+ IndexOp Op = "Index"
+
+ // WasmOp is emitted when resolving a ref using an external
+ // Resolver.
+ WasmOp Op = "Wasm"
+
+ // UnifyOp is emitted when two terms are unified. Node will be set to an
+ // equality expression with the two terms. This Node will not have location
+ // info.
+ UnifyOp Op = "Unify"
+ FailedAssertionOp Op = "FailedAssertion"
+)
+
+// VarMetadata provides some user facing information about
+// a variable in some policy.
+type VarMetadata struct {
+ Name ast.Var `json:"name"`
+ Location *ast.Location `json:"location"`
+}
+
+// Event contains state associated with a tracing event.
+type Event struct {
+ Op Op // Identifies type of event.
+ Node ast.Node // Contains AST node relevant to the event.
+ Location *ast.Location // The location of the Node this event relates to.
+ QueryID uint64 // Identifies the query this event belongs to.
+ ParentID uint64 // Identifies the parent query this event belongs to.
+ Locals *ast.ValueMap // Contains local variable bindings from the query context. Nil if variables were not included in the trace event.
+ LocalMetadata map[ast.Var]VarMetadata // Contains metadata for the local variable bindings. Nil if variables were not included in the trace event.
+ Message string // Contains message for Note events.
+ Ref *ast.Ref // Identifies the subject ref for the event. Only applies to Index and Wasm operations.
+
+ input *ast.Term
+ bindings *bindings
+ localVirtualCacheSnapshot *ast.ValueMap
+}
+
+func (evt *Event) WithInput(input *ast.Term) *Event {
+ evt.input = input
+ return evt
+}
+
+// HasRule returns true if the Event contains an ast.Rule.
+func (evt *Event) HasRule() bool {
+ _, ok := evt.Node.(*ast.Rule)
+ return ok
+}
+
+// HasBody returns true if the Event contains an ast.Body.
+func (evt *Event) HasBody() bool {
+ _, ok := evt.Node.(ast.Body)
+ return ok
+}
+
+// HasExpr returns true if the Event contains an ast.Expr.
+func (evt *Event) HasExpr() bool {
+ _, ok := evt.Node.(*ast.Expr)
+ return ok
+}
+
+// Equal returns true if this event is equal to the other event.
+func (evt *Event) Equal(other *Event) bool {
+ if evt.Op != other.Op {
+ return false
+ }
+ if evt.QueryID != other.QueryID {
+ return false
+ }
+ if evt.ParentID != other.ParentID {
+ return false
+ }
+ if !evt.equalNodes(other) {
+ return false
+ }
+ return evt.Locals.Equal(other.Locals)
+}
+
+func (evt *Event) String() string {
+ return fmt.Sprintf("%v %v %v (qid=%v, pqid=%v)", evt.Op, evt.Node, evt.Locals, evt.QueryID, evt.ParentID)
+}
+
+// Input returns the input object as it was at the event.
+func (evt *Event) Input() *ast.Term {
+ return evt.input
+}
+
+// Plug plugs event bindings into the provided ast.Term. Because bindings are mutable, this only makes sense to do when
+// the event is emitted rather than on recorded trace events as the bindings are going to be different by then.
+func (evt *Event) Plug(term *ast.Term) *ast.Term {
+ return evt.bindings.Plug(term)
+}
+
+func (evt *Event) equalNodes(other *Event) bool {
+ switch a := evt.Node.(type) {
+ case ast.Body:
+ if b, ok := other.Node.(ast.Body); ok {
+ return a.Equal(b)
+ }
+ case *ast.Rule:
+ if b, ok := other.Node.(*ast.Rule); ok {
+ return a.Equal(b)
+ }
+ case *ast.Expr:
+ if b, ok := other.Node.(*ast.Expr); ok {
+ return a.Equal(b)
+ }
+ case nil:
+ return other.Node == nil
+ }
+ return false
+}
+
+// Tracer defines the interface for tracing in the top-down evaluation engine.
+// Deprecated: Use QueryTracer instead.
+type Tracer interface {
+ Enabled() bool
+ Trace(*Event)
+}
+
+// QueryTracer defines the interface for tracing in the top-down evaluation engine.
+// The implementation can provide additional configuration to modify the tracing
+// behavior for query evaluations.
+type QueryTracer interface {
+ Enabled() bool
+ TraceEvent(Event)
+ Config() TraceConfig
+}
+
+// TraceConfig defines some common configuration for Tracer implementations
+type TraceConfig struct {
+ PlugLocalVars bool // Indicate whether to plug local variable bindings before calling into the tracer.
+}
+
+// legacyTracer Implements the QueryTracer interface by wrapping an older Tracer instance.
+type legacyTracer struct {
+ t Tracer
+}
+
+func (l *legacyTracer) Enabled() bool {
+ return l.t.Enabled()
+}
+
+func (*legacyTracer) Config() TraceConfig {
+ return TraceConfig{
+ PlugLocalVars: true, // For backwards compatibility old tracers will plug local variables
+ }
+}
+
+func (l *legacyTracer) TraceEvent(evt Event) {
+ l.t.Trace(&evt)
+}
+
+// WrapLegacyTracer will create a new QueryTracer which wraps an
+// older Tracer instance.
+func WrapLegacyTracer(tracer Tracer) QueryTracer {
+ return &legacyTracer{t: tracer}
+}
+
+// BufferTracer implements the Tracer and QueryTracer interface by
+// simply buffering all events received.
+type BufferTracer []*Event
+
+// NewBufferTracer returns a new BufferTracer.
+func NewBufferTracer() *BufferTracer {
+ return &BufferTracer{}
+}
+
+// Enabled always returns true if the BufferTracer is instantiated.
+func (b *BufferTracer) Enabled() bool {
+ return b != nil
+}
+
+// Trace adds the event to the buffer.
+// Deprecated: Use TraceEvent instead.
+func (b *BufferTracer) Trace(evt *Event) {
+ *b = append(*b, evt)
+}
+
+// TraceEvent adds the event to the buffer.
+func (b *BufferTracer) TraceEvent(evt Event) {
+ *b = append(*b, &evt)
+}
+
+// Config returns the Tracers standard configuration
+func (*BufferTracer) Config() TraceConfig {
+ return TraceConfig{PlugLocalVars: true}
+}
+
+// PrettyTrace pretty prints the trace to the writer.
+func PrettyTrace(w io.Writer, trace []*Event) {
+ PrettyTraceWithOpts(w, trace, PrettyTraceOptions{})
+}
+
+// PrettyTraceWithLocation prints the trace to the writer and includes location information
+func PrettyTraceWithLocation(w io.Writer, trace []*Event) {
+ PrettyTraceWithOpts(w, trace, PrettyTraceOptions{Locations: true})
+}
+
+type PrettyTraceOptions struct {
+ Locations bool // Include location information
+ ExprVariables bool // Include variables found in the expression
+ LocalVariables bool // Include all local variables
+}
+
+type traceRow []string
+
+func (r *traceRow) add(s string) {
+ *r = append(*r, s)
+}
+
+type traceTable struct {
+ rows []traceRow
+ maxWidths []int
+}
+
+func (t *traceTable) add(row traceRow) {
+ t.rows = append(t.rows, row)
+ for i := range row {
+ if i >= len(t.maxWidths) {
+ t.maxWidths = append(t.maxWidths, len(row[i]))
+ } else if len(row[i]) > t.maxWidths[i] {
+ t.maxWidths[i] = len(row[i])
+ }
+ }
+}
+
+func (t *traceTable) write(w io.Writer, padding int) {
+ for _, row := range t.rows {
+ for i, cell := range row {
+ width := t.maxWidths[i] + padding
+ if i < len(row)-1 {
+ _, _ = fmt.Fprintf(w, "%-*s ", width, cell)
+ } else {
+ _, _ = fmt.Fprintf(w, "%s", cell)
+ }
+ }
+ _, _ = fmt.Fprintln(w)
+ }
+}
+
+func PrettyTraceWithOpts(w io.Writer, trace []*Event, opts PrettyTraceOptions) {
+ depths := depths{}
+
+ // FIXME: Can we shorten each location as we process each trace event instead of beforehand?
+ filePathAliases, _ := getShortenedFileNames(trace)
+
+ table := traceTable{}
+
+ for _, event := range trace {
+ depth := depths.GetOrSet(event.QueryID, event.ParentID)
+ row := traceRow{}
+
+ if opts.Locations {
+ location := formatLocation(event, filePathAliases)
+ row.add(location)
+ }
+
+ row.add(formatEvent(event, depth))
+
+ if opts.ExprVariables {
+ vars := exprLocalVars(event)
+ keys := sortedKeys(vars)
+
+ buf := new(bytes.Buffer)
+ buf.WriteString("{")
+ for i, k := range keys {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ _, _ = fmt.Fprintf(buf, "%v: %s", k, iStrs.Truncate(vars.Get(k).String(), maxExprVarWidth))
+ }
+ buf.WriteString("}")
+ row.add(buf.String())
+ }
+
+ if opts.LocalVariables {
+ if locals := event.Locals; locals != nil {
+ keys := sortedKeys(locals)
+
+ buf := new(bytes.Buffer)
+ buf.WriteString("{")
+ for i, k := range keys {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ _, _ = fmt.Fprintf(buf, "%v: %s", k, iStrs.Truncate(locals.Get(k).String(), maxExprVarWidth))
+ }
+ buf.WriteString("}")
+ row.add(buf.String())
+ } else {
+ row.add("{}")
+ }
+ }
+
+ table.add(row)
+ }
+
+ table.write(w, columnPadding)
+}
+
+func sortedKeys(vm *ast.ValueMap) []ast.Value {
+ keys := make([]ast.Value, 0, vm.Len())
+ vm.Iter(func(k, _ ast.Value) bool {
+ keys = append(keys, k)
+ return false
+ })
+ slices.SortFunc(keys, func(a, b ast.Value) int {
+ return strings.Compare(a.String(), b.String())
+ })
+ return keys
+}
+
+func exprLocalVars(e *Event) *ast.ValueMap {
+ vars := ast.NewValueMap()
+
+ findVars := func(term *ast.Term) bool {
+ if name, ok := term.Value.(ast.Var); ok {
+ if meta, ok := e.LocalMetadata[name]; ok {
+ if val := e.Locals.Get(name); val != nil {
+ vars.Put(meta.Name, val)
+ }
+ }
+ }
+ return false
+ }
+
+ if r, ok := e.Node.(*ast.Rule); ok {
+ // We're only interested in vars in the head, not the body
+ ast.WalkTerms(r.Head, findVars)
+ return vars
+ }
+
+ // The local cache snapshot only contains a snapshot for those refs present in the event node,
+ // so they can all be added to the vars map.
+ e.localVirtualCacheSnapshot.Iter(func(k, v ast.Value) bool {
+ vars.Put(k, v)
+ return false
+ })
+
+ ast.WalkTerms(e.Node, findVars)
+
+ return vars
+}
+
+func formatEvent(event *Event, depth int) string {
+ padding := formatEventPadding(event, depth)
+ if event.Op == NoteOp {
+ return fmt.Sprintf("%v%v %q", padding, event.Op, event.Message)
+ }
+
+ var details any
+ if node, ok := event.Node.(*ast.Rule); ok {
+ details = node.Path()
+ } else if event.Ref != nil {
+ details = event.Ref
+ } else {
+ details = rewrite(event).Node
+ }
+
+ template := "%v%v %v"
+ opts := []any{padding, event.Op, details}
+
+ if event.Message != "" {
+ template += " %v"
+ opts = append(opts, event.Message)
+ }
+
+ return fmt.Sprintf(template, opts...)
+}
+
+func formatEventPadding(event *Event, depth int) string {
+ spaces := formatEventSpaces(event, depth)
+ if spaces > 1 {
+ return strings.Repeat("| ", spaces-1)
+ }
+ return ""
+}
+
+func formatEventSpaces(event *Event, depth int) int {
+ switch event.Op {
+ case EnterOp:
+ return depth
+ case RedoOp:
+ if _, ok := event.Node.(*ast.Expr); !ok {
+ return depth
+ }
+ }
+ return depth + 1
+}
+
+// getShortenedFileNames will return a map of file paths to shortened aliases
+// that were found in the trace. It also returns the longest location expected
+func getShortenedFileNames(trace []*Event) (map[string]string, int) {
+ // Get a deduplicated list of all file paths
+ // and the longest file path size
+ fpAliases := map[string]string{}
+ var canShorten []string
+ longestLocation := 0
+ for _, event := range trace {
+ if event.Location != nil {
+ if event.Location.File != "" {
+ // length of ":"
+ curLen := len(event.Location.File) + numDigits10(event.Location.Row) + 1
+ if curLen > longestLocation {
+ longestLocation = curLen
+ }
+
+ if _, ok := fpAliases[event.Location.File]; ok {
+ continue
+ }
+
+ canShorten = append(canShorten, event.Location.File)
+
+ // Default to just alias their full path
+ fpAliases[event.Location.File] = event.Location.File
+ } else {
+ // length of ":"
+ curLen := minLocationWidth + numDigits10(event.Location.Row) + 1
+ if curLen > longestLocation {
+ longestLocation = curLen
+ }
+ }
+ }
+ }
+
+ if len(canShorten) > 0 && longestLocation > maxIdealLocationWidth {
+ fpAliases, longestLocation = iStrs.TruncateFilePaths(maxIdealLocationWidth, longestLocation, canShorten...)
+ }
+
+ return fpAliases, longestLocation
+}
+
+func numDigits10(n int) int {
+ if n < 10 {
+ return 1
+ }
+ return numDigits10(n/10) + 1
+}
+
+func formatLocation(event *Event, fileAliases map[string]string) string {
+
+ location := event.Location
+ if location == nil {
+ return ""
+ }
+
+ if location.File == "" {
+ return fmt.Sprintf("query:%v", location.Row)
+ }
+
+ return fmt.Sprintf("%v:%v", fileAliases[location.File], location.Row)
+}
+
+// depths is a helper for computing the depth of an event. Events within the
+// same query all have the same depth. The depth of query is
+// depth(parent(query))+1.
+type depths map[uint64]int
+
+func (ds depths) GetOrSet(qid uint64, pqid uint64) int {
+ depth := ds[qid]
+ if depth == 0 {
+ depth = ds[pqid]
+ depth++
+ ds[qid] = depth
+ }
+ return depth
+}
+
+func builtinTrace(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+
+ str, err := builtins.StringOperand(operands[0].Value, 1)
+ if err != nil {
+ return handleBuiltinErr(ast.Trace.Name, bctx.Location, err)
+ }
+
+ if !bctx.TraceEnabled {
+ return iter(ast.InternedTerm(true))
+ }
+
+ evt := Event{
+ Op: NoteOp,
+ Location: bctx.Location,
+ QueryID: bctx.QueryID,
+ ParentID: bctx.ParentID,
+ Message: string(str),
+ }
+
+ for i := range bctx.QueryTracers {
+ bctx.QueryTracers[i].TraceEvent(evt)
+ }
+
+ return iter(ast.InternedTerm(true))
+}
+
+func rewrite(event *Event) *Event {
+
+ cpy := *event
+
+ var node ast.Node
+
+ switch v := event.Node.(type) {
+ case *ast.Expr:
+ expr := v.Copy()
+
+ // Hide generated local vars in 'key' position that have not been
+ // rewritten.
+ if ev, ok := v.Terms.(*ast.Every); ok {
+ if kv, ok := ev.Key.Value.(ast.Var); ok {
+ if rw, ok := cpy.LocalMetadata[kv]; !ok || rw.Name.IsGenerated() {
+ expr.Terms.(*ast.Every).Key = nil
+ }
+ }
+ }
+ node = expr
+ case ast.Body:
+ node = v.Copy()
+ case *ast.Rule:
+ node = v.Copy()
+ }
+
+ _, _ = ast.TransformVars(node, func(v ast.Var) (ast.Value, error) {
+ if meta, ok := cpy.LocalMetadata[v]; ok {
+ return meta.Name, nil
+ }
+ return v, nil
+ })
+
+ cpy.Node = node
+
+ return &cpy
+}
+
+type varInfo struct {
+ VarMetadata
+ val ast.Value
+ exprLoc *ast.Location
+ col int // 0-indexed column
+}
+
+func (v varInfo) Value() string {
+ if v.val != nil {
+ return v.val.String()
+ }
+ return "undefined"
+}
+
+func (v varInfo) Title() string {
+ if v.exprLoc != nil && v.exprLoc.Text != nil {
+ return string(v.exprLoc.Text)
+ }
+ return string(v.Name)
+}
+
+func padLocationText(loc *ast.Location) string {
+ if loc == nil {
+ return ""
+ }
+
+ text := string(loc.Text)
+
+ if loc.Col == 0 {
+ return text
+ }
+
+ buf := new(bytes.Buffer)
+ j := 0
+ for i := 1; i < loc.Col; i++ {
+ if len(loc.Tabs) > 0 && j < len(loc.Tabs) && loc.Tabs[j] == i {
+ buf.WriteString("\t")
+ j++
+ } else {
+ buf.WriteString(" ")
+ }
+ }
+
+ buf.WriteString(text)
+ return buf.String()
+}
+
+type PrettyEventOpts struct {
+ PrettyVars bool
+}
+
+func walkTestTerms(x any, f func(*ast.Term) bool) {
+ var vis *ast.GenericVisitor
+ vis = ast.NewGenericVisitor(func(x any) bool {
+ switch x := x.(type) {
+ case ast.Call:
+ for _, t := range x[1:] {
+ vis.Walk(t)
+ }
+ return true
+ case *ast.Expr:
+ if x.IsCall() {
+ for _, o := range x.Operands() {
+ vis.Walk(o)
+ }
+ for i := range x.With {
+ vis.Walk(x.With[i])
+ }
+ return true
+ }
+ case *ast.Term:
+ return f(x)
+ case *ast.With:
+ vis.Walk(x.Value)
+ return true
+ }
+ return false
+ })
+ vis.Walk(x)
+}
+
+func PrettyEvent(w io.Writer, e *Event, opts PrettyEventOpts) error {
+ if !opts.PrettyVars {
+ _, _ = fmt.Fprintln(w, padLocationText(e.Location))
+ return nil
+ }
+
+ buf := new(bytes.Buffer)
+ exprVars := map[string]varInfo{}
+
+ findVars := func(unknownAreUndefined bool) func(term *ast.Term) bool {
+ return func(term *ast.Term) bool {
+ if term.Location == nil {
+ return false
+ }
+
+ switch v := term.Value.(type) {
+ case *ast.ArrayComprehension, *ast.SetComprehension, *ast.ObjectComprehension:
+ // we don't report on the internals of a comprehension, as it's already evaluated, and we won't have the local vars.
+ return true
+ case ast.Var:
+ var info *varInfo
+ if meta, ok := e.LocalMetadata[v]; ok {
+ info = &varInfo{
+ VarMetadata: meta,
+ val: e.Locals.Get(v),
+ exprLoc: term.Location,
+ }
+ } else if unknownAreUndefined {
+ info = &varInfo{
+ VarMetadata: VarMetadata{Name: v},
+ exprLoc: term.Location,
+ col: term.Location.Col,
+ }
+ }
+
+ if info != nil {
+ if v, exists := exprVars[info.Title()]; !exists || v.val == nil {
+ if term.Location != nil {
+ info.col = term.Location.Col
+ }
+ exprVars[info.Title()] = *info
+ }
+ }
+ }
+ return false
+ }
+ }
+
+ expr, ok := e.Node.(*ast.Expr)
+ if !ok || expr == nil {
+ return nil
+ }
+
+ base := expr.BaseCogeneratedExpr()
+ exprText := padLocationText(base.Location)
+ buf.WriteString(exprText)
+
+ e.localVirtualCacheSnapshot.Iter(func(k, v ast.Value) bool {
+ var info *varInfo
+ switch k := k.(type) {
+ case ast.Ref:
+ info = &varInfo{
+ VarMetadata: VarMetadata{Name: ast.Var(k.String())},
+ val: v,
+ exprLoc: k[0].Location,
+ col: k[0].Location.Col,
+ }
+ case *ast.ArrayComprehension:
+ info = &varInfo{
+ VarMetadata: VarMetadata{Name: ast.Var(k.String())},
+ val: v,
+ exprLoc: k.Term.Location,
+ col: k.Term.Location.Col,
+ }
+ case *ast.SetComprehension:
+ info = &varInfo{
+ VarMetadata: VarMetadata{Name: ast.Var(k.String())},
+ val: v,
+ exprLoc: k.Term.Location,
+ col: k.Term.Location.Col,
+ }
+ case *ast.ObjectComprehension:
+ info = &varInfo{
+ VarMetadata: VarMetadata{Name: ast.Var(k.String())},
+ val: v,
+ exprLoc: k.Key.Location,
+ col: k.Key.Location.Col,
+ }
+ }
+
+ if info != nil {
+ exprVars[info.Title()] = *info
+ }
+
+ return false
+ })
+
+ // If the expression is negated, we can't confidently assert that vars with unknown values are 'undefined',
+ // since the compiler might have opted out of the necessary rewrite.
+ walkTestTerms(expr, findVars(!expr.Negated))
+ coExprs := expr.CogeneratedExprs()
+ for _, coExpr := range coExprs {
+ // Only the current "co-expr" can have undefined vars, if we don't know the value for a var in any other co-expr,
+ // it's unknown, not undefined. A var can be unknown if it hasn't been assigned a value yet, because the co-expr
+ // hasn't been evaluated yet (the fail happened before it).
+ walkTestTerms(coExpr, findVars(false))
+ }
+
+ printPrettyVars(buf, exprVars)
+ _, _ = fmt.Fprint(w, buf.String())
+ return nil
+}
+
+func printPrettyVars(w *bytes.Buffer, exprVars map[string]varInfo) {
+ containsTabs := false
+ varRows := make(map[int]any)
+ for _, info := range exprVars {
+ if len(info.exprLoc.Tabs) > 0 {
+ containsTabs = true
+ }
+ varRows[info.exprLoc.Row] = nil
+ }
+
+ if containsTabs && len(varRows) > 1 {
+ // We can't (currently) reliably point to var locations when they are on different rows that contain tabs.
+ // So we'll just print them in alphabetical order instead.
+ byName := make([]varInfo, 0, len(exprVars))
+ for _, info := range exprVars {
+ byName = append(byName, info)
+ }
+ slices.SortStableFunc(byName, func(a, b varInfo) int {
+ return strings.Compare(a.Title(), b.Title())
+ })
+
+ w.WriteString("\n\nWhere:\n")
+ for _, info := range byName {
+ w.WriteString(fmt.Sprintf("\n%s: %s", info.Title(), iStrs.Truncate(info.Value(), maxPrettyExprVarWidth)))
+ }
+
+ return
+ }
+
+ byCol := make([]varInfo, 0, len(exprVars))
+ for _, info := range exprVars {
+ byCol = append(byCol, info)
+ }
+ slices.SortFunc(byCol, func(a, b varInfo) int {
+ // sort first by column, then by reverse row (to present vars in the same order they appear in the expr)
+ if a.col == b.col {
+ if a.exprLoc.Row == b.exprLoc.Row {
+ return strings.Compare(a.Title(), b.Title())
+ }
+ return b.exprLoc.Row - a.exprLoc.Row
+ }
+ return a.col - b.col
+ })
+
+ if len(byCol) == 0 {
+ return
+ }
+
+ w.WriteString("\n")
+ printArrows(w, byCol, -1)
+ for i := len(byCol) - 1; i >= 0; i-- {
+ w.WriteString("\n")
+ printArrows(w, byCol, i)
+ }
+}
+
+func printArrows(w *bytes.Buffer, l []varInfo, printValueAt int) {
+ prevCol := 0
+ var slice []varInfo
+ if printValueAt >= 0 {
+ slice = l[:printValueAt+1]
+ } else {
+ slice = l
+ }
+ isFirst := true
+ for i, info := range slice {
+
+ isLast := i >= len(slice)-1
+ col := info.col
+
+ if !isLast && col == l[i+1].col {
+ // We're sharing the same column with another, subsequent var
+ continue
+ }
+
+ spaces := col - 1
+ if i > 0 && !isFirst {
+ spaces = (col - prevCol) - 1
+ }
+
+ for j := range spaces {
+ tab := false
+ if slices.Contains(info.exprLoc.Tabs, j+prevCol+1) {
+ w.WriteString("\t")
+ tab = true
+ }
+ if !tab {
+ w.WriteString(" ")
+ }
+ }
+
+ if isLast && printValueAt >= 0 {
+ valueStr := iStrs.Truncate(info.Value(), maxPrettyExprVarWidth)
+ if (i > 0 && col == l[i-1].col) || (i < len(l)-1 && col == l[i+1].col) {
+ // There is another var on this column, so we need to include the name to differentiate them.
+ w.WriteString(fmt.Sprintf("%s: %s", info.Title(), valueStr))
+ } else {
+ w.WriteString(valueStr)
+ }
+ } else {
+ w.WriteString("|")
+ }
+ prevCol = col
+ isFirst = false
+ }
+}
+
+func init() {
+ RegisterBuiltinFunc(ast.Trace.Name, builtinTrace)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/type.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/type.go
similarity index 75%
rename from vendor/github.com/open-policy-agent/opa/topdown/type.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/type.go
index dab5c853cd..0e23d2721b 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/type.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/type.go
@@ -5,69 +5,69 @@
package topdown
import (
- "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/v1/ast"
)
func builtinIsNumber(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
switch operands[0].Value.(type) {
case ast.Number:
- return iter(ast.BooleanTerm(true))
+ return iter(ast.InternedTerm(true))
default:
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
}
func builtinIsString(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
switch operands[0].Value.(type) {
case ast.String:
- return iter(ast.BooleanTerm(true))
+ return iter(ast.InternedTerm(true))
default:
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
}
func builtinIsBoolean(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
switch operands[0].Value.(type) {
case ast.Boolean:
- return iter(ast.BooleanTerm(true))
+ return iter(ast.InternedTerm(true))
default:
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
}
func builtinIsArray(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
switch operands[0].Value.(type) {
case *ast.Array:
- return iter(ast.BooleanTerm(true))
+ return iter(ast.InternedTerm(true))
default:
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
}
func builtinIsSet(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
switch operands[0].Value.(type) {
case ast.Set:
- return iter(ast.BooleanTerm(true))
+ return iter(ast.InternedTerm(true))
default:
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
}
func builtinIsObject(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
switch operands[0].Value.(type) {
case ast.Object:
- return iter(ast.BooleanTerm(true))
+ return iter(ast.InternedTerm(true))
default:
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
}
func builtinIsNull(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
switch operands[0].Value.(type) {
case ast.Null:
- return iter(ast.BooleanTerm(true))
+ return iter(ast.InternedTerm(true))
default:
- return iter(ast.BooleanTerm(false))
+ return iter(ast.InternedTerm(false))
}
}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/type_name.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/type_name.go
similarity index 58%
rename from vendor/github.com/open-policy-agent/opa/topdown/type_name.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/type_name.go
index 0a8b44aed3..9c079500c2 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/type_name.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/type_name.go
@@ -5,30 +5,30 @@
package topdown
import (
- "fmt"
+ "errors"
- "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/v1/ast"
)
func builtinTypeName(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
switch operands[0].Value.(type) {
case ast.Null:
- return iter(ast.StringTerm("null"))
+ return iter(ast.InternedTerm("null"))
case ast.Boolean:
- return iter(ast.StringTerm("boolean"))
+ return iter(ast.InternedTerm("boolean"))
case ast.Number:
- return iter(ast.StringTerm("number"))
+ return iter(ast.InternedTerm("number"))
case ast.String:
- return iter(ast.StringTerm("string"))
+ return iter(ast.InternedTerm("string"))
case *ast.Array:
- return iter(ast.StringTerm("array"))
+ return iter(ast.InternedTerm("array"))
case ast.Object:
- return iter(ast.StringTerm("object"))
+ return iter(ast.InternedTerm("object"))
case ast.Set:
- return iter(ast.StringTerm("set"))
+ return iter(ast.InternedTerm("set"))
}
- return fmt.Errorf("illegal value")
+ return errors.New("illegal value")
}
func init() {
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/uuid.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/uuid.go
similarity index 89%
rename from vendor/github.com/open-policy-agent/opa/topdown/uuid.go
rename to vendor/github.com/open-policy-agent/opa/v1/topdown/uuid.go
index d3a7a5f900..141fb908bd 100644
--- a/vendor/github.com/open-policy-agent/opa/topdown/uuid.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/uuid.go
@@ -5,9 +5,9 @@
package topdown
import (
- "github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/internal/uuid"
- "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/v1/ast"
+ "github.com/open-policy-agent/opa/v1/topdown/builtins"
)
type uuidCachingKey string
@@ -26,7 +26,7 @@ func builtinUUIDRFC4122(bctx BuiltinContext, operands []*ast.Term, iter func(*as
return err
}
- result := ast.NewTerm(ast.String(s))
+ result := ast.StringTerm(s)
bctx.Cache.Put(key, result)
return iter(result)
diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/walk.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/walk.go
new file mode 100644
index 0000000000..1c8961e71f
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/walk.go
@@ -0,0 +1,163 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "github.com/open-policy-agent/opa/v1/ast"
+)
+
+func evalWalk(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ input := operands[0]
+
+ if pathIsWildcard(operands) {
+ // When the path assignment is a wildcard: walk(input, [_, value])
+ // we may skip the path construction entirely, and simply return
+ // same pointer in each iteration. This is a *much* more efficient
+ // path when only the values are needed.
+ return walkNoPath(ast.ArrayTerm(ast.InternedEmptyArray, input), iter)
+ }
+
+ filter := getOutputPath(operands)
+ return walk(filter, nil, input, iter)
+}
+
+func walk(filter, path *ast.Array, input *ast.Term, iter func(*ast.Term) error) error {
+ if filter == nil || filter.Len() == 0 {
+ var pathCopy *ast.Array
+ if path == nil {
+ pathCopy = ast.InternedEmptyArrayValue
+ } else {
+ // Shallow copy, as while the array is modified, the elements are not
+ pathCopy = copyShallow(path)
+ }
+
+ // TODO(ae): I'd *really* like these terms to be retrieved from a sync.Pool, and
+ // returned after iter is called. However, all my atttempts to do this have failed
+ // as there seems to be something holding on to these references after the call,
+ // leading to modifications that entirely alter the results. Perhaps this is not
+ // possible to do, but if it is,it would be a huge performance win.
+ if err := iter(ast.ArrayTerm(ast.NewTerm(pathCopy), input)); err != nil {
+ return err
+ }
+ }
+
+ if filter != nil && filter.Len() > 0 {
+ key := filter.Elem(0)
+ filter = filter.Slice(1, -1)
+ if key.IsGround() {
+ if term := input.Get(key); term != nil {
+ return walk(filter, pathAppend(path, key), term, iter)
+ }
+ return nil
+ }
+ }
+
+ switch v := input.Value.(type) {
+ case *ast.Array:
+ for i := range v.Len() {
+ if err := walk(filter, pathAppend(path, ast.InternedTerm(i)), v.Elem(i), iter); err != nil {
+ return err
+ }
+ }
+ case ast.Object:
+ for _, k := range v.Keys() {
+ if err := walk(filter, pathAppend(path, k), v.Get(k), iter); err != nil {
+ return err
+ }
+ }
+ case ast.Set:
+ for _, elem := range v.Slice() {
+ if err := walk(filter, pathAppend(path, elem), elem, iter); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func walkNoPath(input *ast.Term, iter func(*ast.Term) error) error {
+ // Note: the path array is embedded in the input from the start here
+ // in order to avoid an extra allocation per iteration. This leads to
+ // a little convoluted code below in order to extract and set the value,
+ // but since walk is commonly used to traverse large data structures,
+ // the performance gain is worth it.
+ if err := iter(input); err != nil {
+ return err
+ }
+
+ inputArray := input.Value.(*ast.Array)
+ value := inputArray.Get(ast.InternedTerm(1)).Value
+
+ switch v := value.(type) {
+ case ast.Object:
+ for _, k := range v.Keys() {
+ inputArray.Set(1, v.Get(k))
+ if err := walkNoPath(input, iter); err != nil {
+ return err
+ }
+ }
+ case *ast.Array:
+ for i := range v.Len() {
+ inputArray.Set(1, v.Elem(i))
+ if err := walkNoPath(input, iter); err != nil {
+ return err
+ }
+ }
+ case ast.Set:
+ for _, elem := range v.Slice() {
+ inputArray.Set(1, elem)
+ if err := walkNoPath(input, iter); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func pathAppend(path *ast.Array, key *ast.Term) *ast.Array {
+ if path == nil {
+ return ast.NewArray(key)
+ }
+
+ return path.Append(key)
+}
+
+func getOutputPath(operands []*ast.Term) *ast.Array {
+ if len(operands) == 2 {
+ if arr, ok := operands[1].Value.(*ast.Array); ok && arr.Len() == 2 {
+ if path, ok := arr.Elem(0).Value.(*ast.Array); ok {
+ return path
+ }
+ }
+ }
+ return nil
+}
+
+func pathIsWildcard(operands []*ast.Term) bool {
+ if len(operands) == 2 {
+ if arr, ok := operands[1].Value.(*ast.Array); ok && arr.Len() == 2 {
+ if v, ok := arr.Elem(0).Value.(ast.Var); ok {
+ return v.IsWildcard()
+ }
+ }
+ }
+ return false
+}
+
+func copyShallow(arr *ast.Array) *ast.Array {
+ cpy := make([]*ast.Term, 0, arr.Len())
+
+ arr.Foreach(func(elem *ast.Term) {
+ cpy = append(cpy, elem)
+ })
+
+ return ast.NewArray(cpy...)
+}
+
+func init() {
+ RegisterBuiltinFunc(ast.WalkBuiltin.Name, evalWalk)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/tracing/tracing.go b/vendor/github.com/open-policy-agent/opa/v1/tracing/tracing.go
similarity index 96%
rename from vendor/github.com/open-policy-agent/opa/tracing/tracing.go
rename to vendor/github.com/open-policy-agent/opa/v1/tracing/tracing.go
index 2708b78e29..df2fb434a6 100644
--- a/vendor/github.com/open-policy-agent/opa/tracing/tracing.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/tracing/tracing.go
@@ -11,10 +11,10 @@ package tracing
import "net/http"
// Options are options for the HTTPTracingService, passed along as-is.
-type Options []interface{}
+type Options []any
// NewOptions is a helper method for constructing `tracing.Options`
-func NewOptions(opts ...interface{}) Options {
+func NewOptions(opts ...any) Options {
return opts
}
diff --git a/vendor/github.com/open-policy-agent/opa/types/decode.go b/vendor/github.com/open-policy-agent/opa/v1/types/decode.go
similarity index 96%
rename from vendor/github.com/open-policy-agent/opa/types/decode.go
rename to vendor/github.com/open-policy-agent/opa/v1/types/decode.go
index a6bd9ea030..367b64bffb 100644
--- a/vendor/github.com/open-policy-agent/opa/types/decode.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/types/decode.go
@@ -8,7 +8,7 @@ import (
"encoding/json"
"fmt"
- "github.com/open-policy-agent/opa/util"
+ "github.com/open-policy-agent/opa/v1/util"
)
const (
@@ -31,13 +31,13 @@ func Unmarshal(bs []byte) (result Type, err error) {
if err = util.UnmarshalJSON(bs, &hint); err == nil {
switch hint.Type {
case typeNull:
- result = NewNull()
+ result = Nl
case typeBoolean:
- result = NewBoolean()
+ result = B
case typeNumber:
- result = NewNumber()
+ result = N
case typeString:
- result = NewString()
+ result = S
case typeArray:
var arr rawarray
if err = util.UnmarshalJSON(bs, &arr); err == nil {
@@ -131,7 +131,7 @@ type rawobject struct {
}
type rawstaticproperty struct {
- Key interface{} `json:"key"`
+ Key any `json:"key"`
Value json.RawMessage `json:"value"`
}
diff --git a/vendor/github.com/open-policy-agent/opa/types/types.go b/vendor/github.com/open-policy-agent/opa/v1/types/types.go
similarity index 90%
rename from vendor/github.com/open-policy-agent/opa/types/types.go
rename to vendor/github.com/open-policy-agent/opa/v1/types/types.go
index 2a050927dd..366903f0cb 100644
--- a/vendor/github.com/open-policy-agent/opa/types/types.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/types/types.go
@@ -8,11 +8,29 @@ package types
import (
"encoding/json"
+ "errors"
"fmt"
+ "slices"
"sort"
"strings"
- "github.com/open-policy-agent/opa/util"
+ "github.com/open-policy-agent/opa/v1/util"
+)
+
+var (
+ // Nl represents an instance of the null type.
+ Nl Type = NewNull()
+ // B represents an instance of the boolean type.
+ B Type = NewBoolean()
+ // S represents an instance of the string type.
+ S Type = NewString()
+ // N represents an instance of the number type.
+ N Type = NewNumber()
+ // A represents the superset of all types.
+ A Type = NewAny()
+
+ // Boxed set types.
+ SetOfAny, SetOfStr, SetOfNum Type = NewSet(A), NewSet(S), NewSet(N)
)
// Sprint returns the string representation of the type.
@@ -58,12 +76,12 @@ type NamedType struct {
func (n *NamedType) typeMarker() string { return n.Type.typeMarker() }
func (n *NamedType) String() string { return n.Name + ": " + n.Type.String() }
func (n *NamedType) MarshalJSON() ([]byte, error) {
- var obj map[string]interface{}
+ var obj map[string]any
switch x := n.Type.(type) {
- case interface{ toMap() map[string]interface{} }:
+ case interface{ toMap() map[string]any }:
obj = x.toMap()
default:
- obj = map[string]interface{}{
+ obj = map[string]any{
"type": n.Type.typeMarker(),
}
}
@@ -91,7 +109,7 @@ func Named(name string, t Type) *NamedType {
// MarshalJSON returns the JSON encoding of t.
func (t Null) MarshalJSON() ([]byte, error) {
- return json.Marshal(map[string]interface{}{
+ return json.Marshal(map[string]any{
"type": t.typeMarker(),
})
}
@@ -105,16 +123,13 @@ func unwrap(t Type) Type {
}
}
-func (t Null) String() string {
+func (Null) String() string {
return typeNull
}
// Boolean represents the boolean type.
type Boolean struct{}
-// B represents an instance of the boolean type.
-var B = NewBoolean()
-
// NewBoolean returns a new Boolean type.
func NewBoolean() Boolean {
return Boolean{}
@@ -122,7 +137,7 @@ func NewBoolean() Boolean {
// MarshalJSON returns the JSON encoding of t.
func (t Boolean) MarshalJSON() ([]byte, error) {
- repr := map[string]interface{}{
+ repr := map[string]any{
"type": t.typeMarker(),
}
return json.Marshal(repr)
@@ -135,9 +150,6 @@ func (t Boolean) String() string {
// String represents the string type.
type String struct{}
-// S represents an instance of the string type.
-var S = NewString()
-
// NewString returns a new String type.
func NewString() String {
return String{}
@@ -145,7 +157,7 @@ func NewString() String {
// MarshalJSON returns the JSON encoding of t.
func (t String) MarshalJSON() ([]byte, error) {
- return json.Marshal(map[string]interface{}{
+ return json.Marshal(map[string]any{
"type": t.typeMarker(),
})
}
@@ -157,9 +169,6 @@ func (String) String() string {
// Number represents the number type.
type Number struct{}
-// N represents an instance of the number type.
-var N = NewNumber()
-
// NewNumber returns a new Number type.
func NewNumber() Number {
return Number{}
@@ -167,7 +176,7 @@ func NewNumber() Number {
// MarshalJSON returns the JSON encoding of t.
func (t Number) MarshalJSON() ([]byte, error) {
- return json.Marshal(map[string]interface{}{
+ return json.Marshal(map[string]any{
"type": t.typeMarker(),
})
}
@@ -195,8 +204,8 @@ func (t *Array) MarshalJSON() ([]byte, error) {
return json.Marshal(t.toMap())
}
-func (t *Array) toMap() map[string]interface{} {
- repr := map[string]interface{}{
+func (t *Array) toMap() map[string]any {
+ repr := map[string]any{
"type": t.typeMarker(),
}
if len(t.static) != 0 {
@@ -268,8 +277,8 @@ func (t *Set) MarshalJSON() ([]byte, error) {
return json.Marshal(t.toMap())
}
-func (t *Set) toMap() map[string]interface{} {
- repr := map[string]interface{}{
+func (t *Set) toMap() map[string]any {
+ repr := map[string]any{
"type": t.typeMarker(),
}
if t.of != nil {
@@ -285,12 +294,12 @@ func (t *Set) String() string {
// StaticProperty represents a static object property.
type StaticProperty struct {
- Key interface{}
+ Key any
Value Type
}
// NewStaticProperty returns a new StaticProperty object.
-func NewStaticProperty(key interface{}, value Type) *StaticProperty {
+func NewStaticProperty(key any, value Type) *StaticProperty {
return &StaticProperty{
Key: key,
Value: value,
@@ -299,7 +308,7 @@ func NewStaticProperty(key interface{}, value Type) *StaticProperty {
// MarshalJSON returns the JSON encoding of p.
func (p *StaticProperty) MarshalJSON() ([]byte, error) {
- return json.Marshal(map[string]interface{}{
+ return json.Marshal(map[string]any{
"key": p.Key,
"value": p.Value,
})
@@ -321,7 +330,7 @@ func NewDynamicProperty(key, value Type) *DynamicProperty {
// MarshalJSON returns the JSON encoding of p.
func (p *DynamicProperty) MarshalJSON() ([]byte, error) {
- return json.Marshal(map[string]interface{}{
+ return json.Marshal(map[string]any{
"key": p.Key,
"value": p.Value,
})
@@ -339,9 +348,8 @@ type Object struct {
// NewObject returns a new Object type.
func NewObject(static []*StaticProperty, dynamic *DynamicProperty) *Object {
- sort.Slice(static, func(i, j int) bool {
- cmp := util.Compare(static[i].Key, static[j].Key)
- return cmp == -1
+ slices.SortFunc(static, func(a, b *StaticProperty) int {
+ return util.Compare(a.Key, b.Key)
})
return &Object{
static: static,
@@ -384,8 +392,8 @@ func (t *Object) StaticProperties() []*StaticProperty {
}
// Keys returns the keys of the object's static elements.
-func (t *Object) Keys() []interface{} {
- sl := make([]interface{}, 0, len(t.static))
+func (t *Object) Keys() []any {
+ sl := make([]any, 0, len(t.static))
for _, p := range t.static {
sl = append(sl, p.Key)
}
@@ -397,8 +405,8 @@ func (t *Object) MarshalJSON() ([]byte, error) {
return json.Marshal(t.toMap())
}
-func (t *Object) toMap() map[string]interface{} {
- repr := map[string]interface{}{
+func (t *Object) toMap() map[string]any {
+ repr := map[string]any{
"type": t.typeMarker(),
}
if len(t.static) != 0 {
@@ -411,7 +419,7 @@ func (t *Object) toMap() map[string]interface{} {
}
// Select returns the type of the named property.
-func (t *Object) Select(name interface{}) Type {
+func (t *Object) Select(name any) Type {
pos := sort.Search(len(t.static), func(x int) bool {
return util.Compare(t.static[x].Key, name) >= 0
})
@@ -471,7 +479,7 @@ func mergeObjects(a, b *Object) *Object {
dynamicProps = b.dynamic
}
- staticPropsMap := make(map[interface{}]Type)
+ staticPropsMap := make(map[any]Type)
for _, sp := range a.static {
staticPropsMap[sp.Key] = sp.Value
@@ -503,9 +511,6 @@ func mergeObjects(a, b *Object) *Object {
// Any represents a dynamic type.
type Any []Type
-// A represents the superset of all types.
-var A = NewAny()
-
// NewAny returns a new Any type.
func NewAny(of ...Type) Any {
sl := make(Any, len(of))
@@ -536,8 +541,8 @@ func (t Any) MarshalJSON() ([]byte, error) {
return json.Marshal(t.toMap())
}
-func (t Any) toMap() map[string]interface{} {
- repr := map[string]interface{}{
+func (t Any) toMap() map[string]any {
+ repr := map[string]any{
"type": t.typeMarker(),
}
if len(t) != 0 {
@@ -578,10 +583,7 @@ func (t Any) Union(other Any) Any {
return other
}
// Prealloc the output list.
- maxLen := lenT
- if lenT < lenOther {
- maxLen = lenOther
- }
+ maxLen := max(lenT, lenOther)
merged := make(Any, 0, maxLen)
// Note(philipc): Create a merged slice, doing the minimum number of
// comparisons along the way. We treat this as a problem of merging two
@@ -675,7 +677,7 @@ func Arity(x Type) int {
if !ok {
return 0
}
- return len(f.FuncArgs().Args)
+ return f.Arity()
}
// NewFunction returns a new Function object of the given argument and result types.
@@ -723,6 +725,11 @@ func (t *Function) Args() []Type {
return cpy
}
+// Arity returns the number of arguments in the function signature.
+func (t *Function) Arity() int {
+ return len(t.args)
+}
+
// Result returns the function's result type.
func (t *Function) Result() Type {
return unwrap(t.result)
@@ -739,7 +746,7 @@ func (t *Function) String() string {
// MarshalJSON returns the JSON encoding of t.
func (t *Function) MarshalJSON() ([]byte, error) {
- repr := map[string]interface{}{
+ repr := map[string]any{
"type": t.typeMarker(),
}
if len(t.args) > 0 {
@@ -763,7 +770,7 @@ func (t *Function) UnmarshalJSON(bs []byte) error {
f, ok := tpe.(*Function)
if !ok {
- return fmt.Errorf("invalid type")
+ return errors.New("invalid type")
}
*t = *f
@@ -780,14 +787,15 @@ func (t *Function) Union(other *Function) *Function {
return other
}
- a := t.Args()
- b := other.Args()
- if len(a) != len(b) {
+ if t.Arity() != other.Arity() {
return nil
}
- aIsVariadic := t.FuncArgs().Variadic != nil
- bIsVariadic := other.FuncArgs().Variadic != nil
+ tfa := t.FuncArgs()
+ ofa := other.FuncArgs()
+
+ aIsVariadic := tfa.Variadic != nil
+ bIsVariadic := ofa.Variadic != nil
if aIsVariadic && !bIsVariadic {
return nil
@@ -795,13 +803,16 @@ func (t *Function) Union(other *Function) *Function {
return nil
}
+ a := t.Args()
+ b := other.Args()
+
args := make([]Type, len(a))
for i := range a {
args[i] = Or(a[i], b[i])
}
result := NewFunction(args, Or(t.Result(), other.Result()))
- result.variadic = Or(t.FuncArgs().Variadic, other.FuncArgs().Variadic)
+ result.variadic = Or(tfa.Variadic, ofa.Variadic)
return result
}
@@ -841,7 +852,7 @@ func Compare(a, b Type) int {
} else if x < y {
return -1
}
- switch a.(type) {
+ switch a.(type) { //nolint:gocritic
case nil, Null, Boolean, Number, String:
return 0
case *Array:
@@ -878,12 +889,9 @@ func Compare(a, b Type) int {
lenStaticA := len(objA.static)
lenStaticB := len(objB.static)
- minLen := lenStaticA
- if lenStaticB < minLen {
- minLen = lenStaticB
- }
+ minLen := min(lenStaticB, lenStaticA)
- for i := 0; i < minLen; i++ {
+ for i := range minLen {
if cmp := util.Compare(objA.static[i].Key, objB.static[i].Key); cmp != 0 {
return cmp
}
@@ -922,7 +930,7 @@ func Compare(a, b Type) int {
} else if len(fA.args) > len(fB.args) {
return 1
}
- for i := 0; i < len(fA.args); i++ {
+ for i := range len(fA.args) {
if cmp := Compare(fA.args[i], fB.args[i]); cmp != 0 {
return cmp
}
@@ -975,7 +983,7 @@ func Or(a, b Type) Type {
}
// Select returns a property or item of a.
-func Select(a Type, x interface{}) Type {
+func Select(a Type, x any) Type {
switch a := unwrap(a).(type) {
case *Array:
n, ok := x.(json.Number)
@@ -1086,17 +1094,13 @@ func Nil(a Type) bool {
case nil:
return true
case *Function:
- for i := range a.args {
- if Nil(a.args[i]) {
- return true
- }
+ if slices.ContainsFunc(a.args, Nil) {
+ return true
}
return Nil(a.result)
case *Array:
- for i := range a.static {
- if Nil(a.static[i]) {
- return true
- }
+ if slices.ContainsFunc(a.static, Nil) {
+ return true
}
if a.dynamic != nil {
return Nil(a.dynamic)
@@ -1117,32 +1121,32 @@ func Nil(a Type) bool {
}
// TypeOf returns the type of the Golang native value.
-func TypeOf(x interface{}) Type {
+func TypeOf(x any) Type {
switch x := x.(type) {
case nil:
- return NewNull()
+ return Nl
case bool:
return B
case string:
return S
case json.Number:
return N
- case map[string]interface{}:
- // The ast.ValueToInterface() function returns ast.Object values as map[string]interface{}
- // so map[string]interface{} must be handled here because the type checker uses the value
+ case map[string]any:
+ // The ast.ValueToInterface() function returns ast.Object values as map[string]any
+ // so map[string]any must be handled here because the type checker uses the value
// to interface conversion when inferring object types.
static := make([]*StaticProperty, 0, len(x))
for k, v := range x {
static = append(static, NewStaticProperty(k, TypeOf(v)))
}
return NewObject(static, nil)
- case map[interface{}]interface{}:
+ case map[any]any:
static := make([]*StaticProperty, 0, len(x))
for k, v := range x {
static = append(static, NewStaticProperty(k, TypeOf(v)))
}
return NewObject(static, nil)
- case []interface{}:
+ case []any:
static := make([]Type, len(x))
for i := range x {
static[i] = TypeOf(x[i])
@@ -1155,15 +1159,12 @@ func TypeOf(x interface{}) Type {
type typeSlice []Type
func (s typeSlice) Less(i, j int) bool { return Compare(s[i], s[j]) < 0 }
-func (s typeSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x }
+func (s typeSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s typeSlice) Len() int { return len(s) }
func typeSliceCompare(a, b []Type) int {
- minLen := len(a)
- if len(b) < minLen {
- minLen = len(b)
- }
- for i := 0; i < minLen; i++ {
+ minLen := min(len(b), len(a))
+ for i := range minLen {
if cmp := Compare(a[i], b[i]); cmp != 0 {
return cmp
}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/backoff.go b/vendor/github.com/open-policy-agent/opa/v1/util/backoff.go
new file mode 100644
index 0000000000..d58af616da
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/util/backoff.go
@@ -0,0 +1,44 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package util
+
+import (
+ "math/rand"
+ "time"
+)
+
+// DefaultBackoff returns a delay with an exponential backoff based on the
+// number of retries.
+func DefaultBackoff(base, maxNS float64, retries int) time.Duration {
+ return Backoff(base, maxNS, .2, 1.6, retries)
+}
+
+// Backoff returns a delay with an exponential backoff based on the number of
+// retries. Same algorithm used in gRPC.
+// Note that if maxNS is smaller than base, the backoff will still be capped at
+// maxNS.
+func Backoff(base, maxNS, jitter, factor float64, retries int) time.Duration {
+ if retries == 0 {
+ return 0
+ }
+
+ backoff, maxNS := base, maxNS
+ for backoff < maxNS && retries > 0 {
+ backoff *= factor
+ retries--
+ }
+ if backoff > maxNS {
+ backoff = maxNS
+ }
+
+ // Randomize backoff delays so that if a cluster of requests start at
+ // the same time, they won't operate in lockstep.
+ backoff *= 1 + jitter*(rand.Float64()*2-1)
+ if backoff < 0 {
+ return 0
+ }
+
+ return time.Duration(backoff)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/channel.go b/vendor/github.com/open-policy-agent/opa/v1/util/channel.go
new file mode 100644
index 0000000000..e2653ac7fd
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/util/channel.go
@@ -0,0 +1,32 @@
+package util
+
+import (
+ "github.com/open-policy-agent/opa/v1/metrics"
+)
+
+// This prevents getting blocked forever writing to a full buffer, in case another routine fills the last space.
+// Retrying maxEventRetry times to drop the oldest event. Dropping the incoming event if there still isn't room.
+const maxEventRetry = 1000
+
+// PushFIFO pushes data into a buffered channel without blocking when full, making room by dropping the oldest data.
+// An optional metric can be recorded when data is dropped.
+func PushFIFO[T any](buffer chan T, data T, metrics metrics.Metrics, metricName string) {
+
+ for range maxEventRetry {
+ // non-blocking send to the buffer, to prevent blocking if buffer is full so room can be made.
+ select {
+ case buffer <- data:
+ return
+ default:
+ }
+
+ // non-blocking drop from the buffer to make room for incoming event
+ select {
+ case <-buffer:
+ if metrics != nil && metricName != "" {
+ metrics.Counter(metricName).Incr()
+ }
+ default:
+ }
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/util/close.go b/vendor/github.com/open-policy-agent/opa/v1/util/close.go
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/util/close.go
rename to vendor/github.com/open-policy-agent/opa/v1/util/close.go
diff --git a/vendor/github.com/open-policy-agent/opa/util/compare.go b/vendor/github.com/open-policy-agent/opa/v1/util/compare.go
similarity index 79%
rename from vendor/github.com/open-policy-agent/opa/util/compare.go
rename to vendor/github.com/open-policy-agent/opa/v1/util/compare.go
index 8ae7753690..df78f64755 100644
--- a/vendor/github.com/open-policy-agent/opa/util/compare.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/util/compare.go
@@ -8,16 +8,15 @@ import (
"encoding/json"
"fmt"
"math/big"
- "sort"
)
// Compare returns 0 if a equals b, -1 if a is less than b, and 1 if b is than a.
//
// For comparison between values of different types, the following ordering is used:
-// nil < bool < int, float64 < string < []interface{} < map[string]interface{}. Slices and maps
+// nil < bool < int, float64 < string < []any < map[string]any. Slices and maps
// are compared recursively. If one slice or map is a subset of the other slice or map
// it is considered "less than". Nil is always equal to nil.
-func Compare(a, b interface{}) int {
+func Compare(a, b any) int {
aSortOrder := sortOrder(a)
bSortOrder := sortOrder(b)
if aSortOrder < bSortOrder {
@@ -74,16 +73,13 @@ func Compare(a, b interface{}) int {
}
return 1
}
- case []interface{}:
+ case []any:
switch b := b.(type) {
- case []interface{}:
+ case []any:
bLen := len(b)
aLen := len(a)
- minLen := aLen
- if bLen < minLen {
- minLen = bLen
- }
- for i := 0; i < minLen; i++ {
+ minLen := min(bLen, aLen)
+ for i := range minLen {
cmp := Compare(a[i], b[i])
if cmp != 0 {
return cmp
@@ -96,26 +92,15 @@ func Compare(a, b interface{}) int {
}
return 1
}
- case map[string]interface{}:
+ case map[string]any:
switch b := b.(type) {
- case map[string]interface{}:
- var aKeys []string
- for k := range a {
- aKeys = append(aKeys, k)
- }
- var bKeys []string
- for k := range b {
- bKeys = append(bKeys, k)
- }
- sort.Strings(aKeys)
- sort.Strings(bKeys)
+ case map[string]any:
+ aKeys := KeysSorted(a)
+ bKeys := KeysSorted(b)
aLen := len(aKeys)
bLen := len(bKeys)
- minLen := aLen
- if bLen < minLen {
- minLen = bLen
- }
- for i := 0; i < minLen; i++ {
+ minLen := min(bLen, aLen)
+ for i := range minLen {
if aKeys[i] < bKeys[i] {
return -1
} else if bKeys[i] < aKeys[i] {
@@ -161,7 +146,7 @@ func compareJSONNumber(a, b json.Number) int {
return bigA.Cmp(bigB)
}
-func sortOrder(v interface{}) int {
+func sortOrder(v any) int {
switch v.(type) {
case nil:
return nilSort
@@ -175,9 +160,9 @@ func sortOrder(v interface{}) int {
return numberSort
case string:
return stringSort
- case []interface{}:
+ case []any:
return arraySort
- case map[string]interface{}:
+ case map[string]any:
return objectSort
}
panic(fmt.Sprintf("illegal argument of type %T", v))
diff --git a/vendor/github.com/open-policy-agent/opa/util/decoding/context.go b/vendor/github.com/open-policy-agent/opa/v1/util/decoding/context.go
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/util/decoding/context.go
rename to vendor/github.com/open-policy-agent/opa/v1/util/decoding/context.go
diff --git a/vendor/github.com/open-policy-agent/opa/util/doc.go b/vendor/github.com/open-policy-agent/opa/v1/util/doc.go
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/util/doc.go
rename to vendor/github.com/open-policy-agent/opa/v1/util/doc.go
diff --git a/vendor/github.com/open-policy-agent/opa/util/enumflag.go b/vendor/github.com/open-policy-agent/opa/v1/util/enumflag.go
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/util/enumflag.go
rename to vendor/github.com/open-policy-agent/opa/v1/util/enumflag.go
diff --git a/vendor/github.com/open-policy-agent/opa/util/graph.go b/vendor/github.com/open-policy-agent/opa/v1/util/graph.go
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/util/graph.go
rename to vendor/github.com/open-policy-agent/opa/v1/util/graph.go
diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/hashmap.go b/vendor/github.com/open-policy-agent/opa/v1/util/hashmap.go
new file mode 100644
index 0000000000..69a90cbb53
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/util/hashmap.go
@@ -0,0 +1,271 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package util
+
+import (
+ "fmt"
+ "strings"
+)
+
+// T is a concise way to refer to T.
+type T any
+
+type Hasher interface {
+ Hash() int
+}
+
+type hashEntry[K any, V any] struct {
+ k K
+ v V
+ next *hashEntry[K, V]
+}
+
+// TypedHashMap represents a key/value map.
+type TypedHashMap[K any, V any] struct {
+ keq func(K, K) bool
+ veq func(V, V) bool
+ khash func(K) int
+ vhash func(V) int
+ def V
+ table map[int]*hashEntry[K, V]
+ size int
+}
+
+// NewTypedHashMap returns a new empty TypedHashMap.
+func NewTypedHashMap[K any, V any](keq func(K, K) bool, veq func(V, V) bool, khash func(K) int, vhash func(V) int, def V) *TypedHashMap[K, V] {
+ return &TypedHashMap[K, V]{
+ keq: keq,
+ veq: veq,
+ khash: khash,
+ vhash: vhash,
+ def: def,
+ table: make(map[int]*hashEntry[K, V]),
+ size: 0,
+ }
+}
+
+// HashMap represents a key/value map.
+type HashMap = TypedHashMap[T, T]
+
+// NewHashMap returns a new empty HashMap.
+func NewHashMap(eq func(T, T) bool, hash func(T) int) *HashMap {
+ return &HashMap{
+ keq: eq,
+ veq: eq,
+ khash: hash,
+ vhash: hash,
+ def: nil,
+ table: make(map[int]*hashEntry[T, T]),
+ size: 0,
+ }
+}
+
+// Copy returns a shallow copy of this HashMap.
+func (h *TypedHashMap[K, V]) Copy() *TypedHashMap[K, V] {
+ cpy := NewTypedHashMap(h.keq, h.veq, h.khash, h.vhash, h.def)
+ h.Iter(func(k K, v V) bool {
+ cpy.Put(k, v)
+ return false
+ })
+ return cpy
+}
+
+// Equal returns true if this HashMap equals the other HashMap.
+// Two hash maps are equal if they contain the same key/value pairs.
+func (h *TypedHashMap[K, V]) Equal(other *TypedHashMap[K, V]) bool {
+ if h.Len() != other.Len() {
+ return false
+ }
+ return !h.Iter(func(k K, v V) bool {
+ ov, ok := other.Get(k)
+ if !ok {
+ return true
+ }
+ return !h.veq(v, ov)
+ })
+}
+
+// Get returns the value for k.
+func (h *TypedHashMap[K, V]) Get(k K) (V, bool) {
+ hash := h.khash(k)
+ for entry := h.table[hash]; entry != nil; entry = entry.next {
+ if h.keq(entry.k, k) {
+ return entry.v, true
+ }
+ }
+ return h.def, false
+}
+
+// Delete removes the key k.
+func (h *TypedHashMap[K, V]) Delete(k K) {
+ hash := h.khash(k)
+ var prev *hashEntry[K, V]
+ for entry := h.table[hash]; entry != nil; entry = entry.next {
+ if h.keq(entry.k, k) {
+ if prev != nil {
+ prev.next = entry.next
+ } else {
+ h.table[hash] = entry.next
+ }
+ h.size--
+ return
+ }
+ prev = entry
+ }
+}
+
+// Hash returns the hash code for this hash map.
+func (h *TypedHashMap[K, V]) Hash() int {
+ var hash int
+ h.Iter(func(k K, v V) bool {
+ hash += h.khash(k) + h.vhash(v)
+ return false
+ })
+ return hash
+}
+
+// Iter invokes the iter function for each element in the HashMap.
+// If the iter function returns true, iteration stops and the return value is true.
+// If the iter function never returns true, iteration proceeds through all elements
+// and the return value is false.
+func (h *TypedHashMap[K, V]) Iter(iter func(K, V) bool) bool {
+ for _, entry := range h.table {
+ for ; entry != nil; entry = entry.next {
+ if iter(entry.k, entry.v) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// Len returns the current size of this HashMap.
+func (h *TypedHashMap[K, V]) Len() int {
+ return h.size
+}
+
+// Put inserts a key/value pair into this HashMap. If the key is already present, the existing
+// value is overwritten.
+func (h *TypedHashMap[K, V]) Put(k K, v V) {
+ hash := h.khash(k)
+ head := h.table[hash]
+ for entry := head; entry != nil; entry = entry.next {
+ if h.keq(entry.k, k) {
+ entry.v = v
+ return
+ }
+ }
+ h.table[hash] = &hashEntry[K, V]{k: k, v: v, next: head}
+ h.size++
+}
+
+func (h *TypedHashMap[K, V]) String() string {
+ var buf []string
+ h.Iter(func(k K, v V) bool {
+ buf = append(buf, fmt.Sprintf("%v: %v", k, v))
+ return false
+ })
+ return "{" + strings.Join(buf, ", ") + "}"
+}
+
+// Update returns a new HashMap with elements from the other HashMap put into this HashMap.
+// If the other HashMap contains elements with the same key as this HashMap, the value
+// from the other HashMap overwrites the value from this HashMap.
+func (h *TypedHashMap[K, V]) Update(other *TypedHashMap[K, V]) *TypedHashMap[K, V] {
+ updated := h.Copy()
+ other.Iter(func(k K, v V) bool {
+ updated.Put(k, v)
+ return false
+ })
+ return updated
+}
+
+type hasherEntry[K Hasher, V any] struct {
+ k K
+ v V
+ next *hasherEntry[K, V]
+}
+
+// HasherMap represents a simpler version of TypedHashMap that uses Hasher's
+// for keys, and requires only an equality function for keys. Ideally we'd have
+// and Equal method for all key types too, and we could get rid of that requirement.
+type HasherMap[K Hasher, V any] struct {
+ keq func(K, K) bool
+ table map[int]*hasherEntry[K, V]
+ size int
+}
+
+// NewHasherMap returns a new empty HasherMap.
+func NewHasherMap[K Hasher, V any](keq func(K, K) bool) *HasherMap[K, V] {
+ return &HasherMap[K, V]{
+ keq: keq,
+ table: make(map[int]*hasherEntry[K, V]),
+ size: 0,
+ }
+}
+
+// Get returns the value for k.
+func (h *HasherMap[K, V]) Get(k K) (V, bool) {
+ for entry := h.table[k.Hash()]; entry != nil; entry = entry.next {
+ if h.keq(entry.k, k) {
+ return entry.v, true
+ }
+ }
+ var zero V
+ return zero, false
+}
+
+// Put inserts a key/value pair into this HashMap. If the key is already present, the existing
+// value is overwritten.
+func (h *HasherMap[K, V]) Put(k K, v V) {
+ hash := k.Hash()
+ head := h.table[hash]
+ for entry := head; entry != nil; entry = entry.next {
+ if h.keq(entry.k, k) {
+ entry.v = v
+ return
+ }
+ }
+ h.table[hash] = &hasherEntry[K, V]{k: k, v: v, next: head}
+ h.size++
+}
+
+// Delete removes the key k.
+func (h *HasherMap[K, V]) Delete(k K) {
+ hash := k.Hash()
+ var prev *hasherEntry[K, V]
+ for entry := h.table[hash]; entry != nil; entry = entry.next {
+ if h.keq(entry.k, k) {
+ if prev != nil {
+ prev.next = entry.next
+ } else {
+ h.table[hash] = entry.next
+ }
+ h.size--
+ return
+ }
+ prev = entry
+ }
+}
+
+// Iter invokes the iter function for each element in the HasherMap.
+// If the iter function returns true, iteration stops and the return value is true.
+// If the iter function never returns true, iteration proceeds through all elements
+// and the return value is false.
+func (h *HasherMap[K, V]) Iter(iter func(K, V) bool) bool {
+ for _, entry := range h.table {
+ for ; entry != nil; entry = entry.next {
+ if iter(entry.k, entry.v) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// Len returns the current size of this HashMap.
+func (h *HasherMap[K, V]) Len() int {
+ return h.size
+}
diff --git a/vendor/github.com/open-policy-agent/opa/util/json.go b/vendor/github.com/open-policy-agent/opa/v1/util/json.go
similarity index 55%
rename from vendor/github.com/open-policy-agent/opa/util/json.go
rename to vendor/github.com/open-policy-agent/opa/v1/util/json.go
index 0b7fd2ed64..de95ed50bf 100644
--- a/vendor/github.com/open-policy-agent/opa/util/json.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/util/json.go
@@ -10,24 +10,24 @@ import (
"fmt"
"io"
"reflect"
+ "strconv"
"sigs.k8s.io/yaml"
- "github.com/open-policy-agent/opa/loader/extension"
+ "github.com/open-policy-agent/opa/v1/loader/extension"
)
// UnmarshalJSON parses the JSON encoded data and stores the result in the value
// pointed to by x.
//
-// This function is intended to be used in place of the standard json.Marshal
-// function when json.Number is required.
-func UnmarshalJSON(bs []byte, x interface{}) error {
+// This function is intended to be used in place of the standard [json.Marshal]
+// function when [json.Number] is required.
+func UnmarshalJSON(bs []byte, x any) error {
return unmarshalJSON(bs, x, true)
}
-func unmarshalJSON(bs []byte, x interface{}, ext bool) error {
- buf := bytes.NewBuffer(bs)
- decoder := NewJSONDecoder(buf)
+func unmarshalJSON(bs []byte, x any, ext bool) error {
+ decoder := NewJSONDecoder(bytes.NewBuffer(bs))
if err := decoder.Decode(x); err != nil {
if handler := extension.FindExtension(".json"); handler != nil && ext {
return handler(bs, x)
@@ -49,8 +49,8 @@ func unmarshalJSON(bs []byte, x interface{}, ext bool) error {
// NewJSONDecoder returns a new decoder that reads from r.
//
-// This function is intended to be used in place of the standard json.NewDecoder
-// when json.Number is required.
+// This function is intended to be used in place of the standard [json.NewDecoder]
+// when [json.Number] is required.
func NewJSONDecoder(r io.Reader) *json.Decoder {
decoder := json.NewDecoder(r)
decoder.UseNumber()
@@ -61,8 +61,8 @@ func NewJSONDecoder(r io.Reader) *json.Decoder {
//
// If the data cannot be decoded, this function will panic. This function is for
// test purposes.
-func MustUnmarshalJSON(bs []byte) interface{} {
- var x interface{}
+func MustUnmarshalJSON(bs []byte) any {
+ var x any
if err := UnmarshalJSON(bs, &x); err != nil {
panic(err)
}
@@ -73,7 +73,7 @@ func MustUnmarshalJSON(bs []byte) interface{} {
//
// If the data cannot be encoded, this function will panic. This function is for
// test purposes.
-func MustMarshalJSON(x interface{}) []byte {
+func MustMarshalJSON(x any) []byte {
bs, err := json.Marshal(x)
if err != nil {
panic(err)
@@ -86,7 +86,56 @@ func MustMarshalJSON(x interface{}) []byte {
// Thereby, it is converting its argument to the representation expected by
// rego.Input and inmem's Write operations. Works with both references and
// values.
-func RoundTrip(x *interface{}) error {
+func RoundTrip(x *any) error {
+ // Avoid round-tripping types that won't change as a result of
+ // marshalling/unmarshalling, as even for those values, round-tripping
+ // comes with a significant cost.
+ if x == nil || !NeedsRoundTrip(*x) {
+ return nil
+ }
+
+ // For number types, we can write the json.Number representation
+ // directly into x without marshalling to bytes and back.
+ a := *x
+ switch v := a.(type) {
+ case int:
+ *x = json.Number(strconv.Itoa(v))
+ return nil
+ case int8:
+ *x = json.Number(strconv.FormatInt(int64(v), 10))
+ return nil
+ case int16:
+ *x = json.Number(strconv.FormatInt(int64(v), 10))
+ return nil
+ case int32:
+ *x = json.Number(strconv.FormatInt(int64(v), 10))
+ return nil
+ case int64:
+ *x = json.Number(strconv.FormatInt(v, 10))
+ return nil
+ case uint:
+ *x = json.Number(strconv.FormatUint(uint64(v), 10))
+ return nil
+ case uint8:
+ *x = json.Number(strconv.FormatUint(uint64(v), 10))
+ return nil
+ case uint16:
+ *x = json.Number(strconv.FormatUint(uint64(v), 10))
+ return nil
+ case uint32:
+ *x = json.Number(strconv.FormatUint(uint64(v), 10))
+ return nil
+ case uint64:
+ *x = json.Number(strconv.FormatUint(v, 10))
+ return nil
+ case float32:
+ *x = json.Number(strconv.FormatFloat(float64(v), 'f', -1, 32))
+ return nil
+ case float64:
+ *x = json.Number(strconv.FormatFloat(v, 'f', -1, 64))
+ return nil
+ }
+
bs, err := json.Marshal(x)
if err != nil {
return err
@@ -94,15 +143,28 @@ func RoundTrip(x *interface{}) error {
return UnmarshalJSON(bs, x)
}
+// NeedsRoundTrip returns true if the value won't change as a result of
+// a marshalling/unmarshalling round-trip. Since [RoundTrip] itself calls
+// this you normally don't need to call this function directly, unless you
+// want to make decisions based on the round-tripability of a value without
+// actually doing the round-trip.
+func NeedsRoundTrip(x any) bool {
+ switch x.(type) {
+ case nil, bool, string, json.Number:
+ return false
+ }
+ return true
+}
+
// Reference returns a pointer to its argument unless the argument already is
// a pointer. If the argument is **t, or ***t, etc, it will return *t.
//
// Used for preparing Go types (including pointers to structs) into values to be
-// put through util.RoundTrip().
-func Reference(x interface{}) *interface{} {
- var y interface{}
+// put through [RoundTrip].
+func Reference(x any) *any {
+ var y any
rv := reflect.ValueOf(x)
- if rv.Kind() == reflect.Ptr {
+ if rv.Kind() == reflect.Pointer {
return Reference(rv.Elem().Interface())
}
if rv.Kind() != reflect.Invalid {
@@ -113,7 +175,7 @@ func Reference(x interface{}) *interface{} {
}
// Unmarshal decodes a YAML, JSON or JSON extension value into the specified type.
-func Unmarshal(bs []byte, v interface{}) error {
+func Unmarshal(bs []byte, v any) error {
if len(bs) > 2 && bs[0] == 0xef && bs[1] == 0xbb && bs[2] == 0xbf {
bs = bs[3:] // Strip UTF-8 BOM, see https://www.rfc-editor.org/rfc/rfc8259#section-8.1
}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/maps.go b/vendor/github.com/open-policy-agent/opa/v1/util/maps.go
new file mode 100644
index 0000000000..c56fbe98ac
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/util/maps.go
@@ -0,0 +1,34 @@
+package util
+
+import (
+ "cmp"
+ "slices"
+)
+
+// Keys returns a slice of keys from any map.
+func Keys[M ~map[K]V, K comparable, V any](m M) []K {
+ r := make([]K, 0, len(m))
+ for k := range m {
+ r = append(r, k)
+ }
+ return r
+}
+
+// KeysSorted returns a slice of keys from any map, sorted in ascending order.
+func KeysSorted[M ~map[K]V, K cmp.Ordered, V any](m M) []K {
+ r := make([]K, 0, len(m))
+ for k := range m {
+ r = append(r, k)
+ }
+ slices.Sort(r)
+ return r
+}
+
+// Values returns a slice of values from any map. Copied from golang.org/x/exp/maps.
+func Values[M ~map[K]V, K comparable, V any](m M) []V {
+ r := make([]V, 0, len(m))
+ for _, v := range m {
+ r = append(r, v)
+ }
+ return r
+}
diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/performance.go b/vendor/github.com/open-policy-agent/opa/v1/util/performance.go
new file mode 100644
index 0000000000..c7bd57ea04
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/util/performance.go
@@ -0,0 +1,133 @@
+package util
+
+import (
+ "math"
+ "slices"
+ "strings"
+ "sync"
+ "unsafe"
+)
+
+// NewPtrSlice returns a slice of pointers to T with length n,
+// with only 2 allocations performed no matter the size of n.
+// See:
+// https://gist.github.com/CAFxX/e96e8a5c3841d152f16d266a1fe7f8bd#slices-of-pointers
+func NewPtrSlice[T any](n int) []*T {
+ return GrowPtrSlice[T](nil, n)
+}
+
+// GrowPtrSlice appends n elements to the slice, each pointing to
+// a newly-allocated T. The resulting slice has length equal to len(s)+n.
+//
+// It performs at most 2 allocations, regardless of n.
+func GrowPtrSlice[T any](s []*T, n int) []*T {
+ s = slices.Grow(s, n)
+ p := make([]T, n)
+ for i := range n {
+ s = append(s, &p[i])
+ }
+ return s
+}
+
+// Allocation free conversion from []byte to string (unsafe)
+// Note that the byte slice must not be modified after conversion
+func ByteSliceToString(bs []byte) string {
+ return unsafe.String(unsafe.SliceData(bs), len(bs))
+}
+
+// Allocation free conversion from ~string to []byte (unsafe)
+// Note that the byte slice must not be modified after conversion
+func StringToByteSlice[T ~string](s T) []byte {
+ return unsafe.Slice(unsafe.StringData(string(s)), len(s))
+}
+
+// NumDigitsInt returns the number of digits in n.
+// This is useful for pre-allocating buffers for string conversion.
+func NumDigitsInt(n int) int {
+ if n == 0 {
+ return 1
+ }
+
+ if n < 0 {
+ n = -n
+ }
+
+ return int(math.Log10(float64(n))) + 1
+}
+
+// NumDigitsUint returns the number of digits in n.
+// This is useful for pre-allocating buffers for string conversion.
+func NumDigitsUint(n uint64) int {
+ if n == 0 {
+ return 1
+ }
+
+ return int(math.Log10(float64(n))) + 1
+}
+
+// KeysCount returns the number of keys in m that satisfy predicate p.
+func KeysCount[K comparable, V any](m map[K]V, p func(K) bool) int {
+ count := 0
+ for k := range m {
+ if p(k) {
+ count++
+ }
+ }
+ return count
+}
+
+// SplitMap calls fn for each delim-separated part of text and returns a slice of the results.
+// Cheaper than calling fn on strings.Split(text, delim), as it avoids allocating an intermediate slice of strings.
+func SplitMap[T any](text string, delim string, fn func(string) T) []T {
+ sl := make([]T, 0, strings.Count(text, delim)+1)
+ for s := range strings.SplitSeq(text, delim) {
+ sl = append(sl, fn(s))
+ }
+ return sl
+}
+
+// SlicePool is a pool for (pointers to) slices of type T.
+// It uses sync.Pool to pool the slices, and grows them as needed.
+type SlicePool[T any] struct {
+ pool sync.Pool
+}
+
+// NewSlicePool creates a new SlicePool for slices of type T with the given initial length.
+// This number is only a hint, as the slices will grow as needed. For best performance, store
+// slices of similar lengths in the same pool.
+func NewSlicePool[T any](length int) *SlicePool[T] {
+ return &SlicePool[T]{
+ pool: sync.Pool{
+ New: func() any {
+ s := make([]T, length)
+ return &s
+ },
+ },
+ }
+}
+
+// Get returns a pointer to a slice of type T with the given length
+// from the pool. The slice capacity will grow as needed to accommodate
+// the requested length. The returned slice will have all its elements
+// set to the zero value of T. Returns a pointer to avoid allocating.
+func (sp *SlicePool[T]) Get(length int) *[]T {
+ s := sp.pool.Get().(*[]T)
+ d := *s
+
+ if cap(d) < length {
+ d = slices.Grow(d, length)
+ }
+
+ d = d[:length] // reslice to requested length, while keeping capacity
+
+ clear(d)
+
+ *s = d
+
+ return s
+}
+
+// Put returns a pointer to a slice of type T to the pool.
+func (sp *SlicePool[T]) Put(s *[]T) {
+ sp.pool.Put(s)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/util/queue.go b/vendor/github.com/open-policy-agent/opa/v1/util/queue.go
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/util/queue.go
rename to vendor/github.com/open-policy-agent/opa/v1/util/queue.go
diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/read_gzip_body.go b/vendor/github.com/open-policy-agent/opa/v1/util/read_gzip_body.go
new file mode 100644
index 0000000000..92c0df8b08
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/v1/util/read_gzip_body.go
@@ -0,0 +1,72 @@
+package util
+
+import (
+ "bytes"
+ "compress/gzip"
+ "encoding/binary"
+ "errors"
+ "io"
+ "net/http"
+ "strings"
+ "sync"
+
+ "github.com/open-policy-agent/opa/v1/util/decoding"
+)
+
+var gzipReaderPool = sync.Pool{
+ New: func() any {
+ reader := new(gzip.Reader)
+ return reader
+ },
+}
+
+// Note(philipc): Originally taken from server/server.go
+// The DecodingLimitHandler handles validating that the gzip payload is within the
+// allowed max size limit. Thus, in the event of a forged payload size trailer,
+// the worst that can happen is that we waste memory up to the allowed max gzip
+// payload size, but not an unbounded amount of memory, as was potentially
+// possible before.
+func ReadMaybeCompressedBody(r *http.Request) ([]byte, error) {
+ length := r.ContentLength
+ if maxLenConf, ok := decoding.GetServerDecodingMaxLen(r.Context()); ok {
+ length = maxLenConf
+ }
+
+ content, err := io.ReadAll(io.LimitReader(r.Body, length))
+ if err != nil {
+ return nil, err
+ }
+
+ if strings.Contains(r.Header.Get("Content-Encoding"), "gzip") {
+ gzipMaxLength, _ := decoding.GetServerDecodingGzipMaxLen(r.Context())
+
+ // Note(philipc): The last 4 bytes of a well-formed gzip blob will
+ // always be a little-endian uint32, representing the decompressed
+ // content size, modulo 2^32. We validate that the size is safe,
+ // earlier in DecodingLimitHandler.
+ sizeDecompressed := int64(binary.LittleEndian.Uint32(content[len(content)-4:]))
+ if sizeDecompressed > gzipMaxLength {
+ return nil, errors.New("gzip payload too large")
+ }
+
+ gzReader := gzipReaderPool.Get().(*gzip.Reader)
+ defer func() {
+ gzReader.Close()
+ gzipReaderPool.Put(gzReader)
+ }()
+
+ if err := gzReader.Reset(bytes.NewReader(content)); err != nil {
+ return nil, err
+ }
+
+ decompressed := bytes.NewBuffer(make([]byte, 0, sizeDecompressed))
+ if _, err = io.CopyN(decompressed, gzReader, sizeDecompressed); err != nil {
+ return nil, err
+ }
+
+ return decompressed.Bytes(), nil
+ }
+
+ // Request was not compressed; return the content bytes.
+ return content, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/util/time.go b/vendor/github.com/open-policy-agent/opa/v1/util/time.go
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/util/time.go
rename to vendor/github.com/open-policy-agent/opa/v1/util/time.go
diff --git a/vendor/github.com/open-policy-agent/opa/util/wait.go b/vendor/github.com/open-policy-agent/opa/v1/util/wait.go
similarity index 94%
rename from vendor/github.com/open-policy-agent/opa/util/wait.go
rename to vendor/github.com/open-policy-agent/opa/v1/util/wait.go
index b70ab6fcf9..b1ea84fd53 100644
--- a/vendor/github.com/open-policy-agent/opa/util/wait.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/util/wait.go
@@ -5,7 +5,7 @@
package util
import (
- "fmt"
+ "errors"
"time"
)
@@ -24,7 +24,7 @@ func WaitFunc(fun func() bool, interval, timeout time.Duration) error {
for {
select {
case <-timer.C:
- return fmt.Errorf("timeout")
+ return errors.New("timeout")
case <-ticker.C:
if fun() {
return nil
diff --git a/vendor/github.com/open-policy-agent/opa/version/version.go b/vendor/github.com/open-policy-agent/opa/v1/version/version.go
similarity index 79%
rename from vendor/github.com/open-policy-agent/opa/version/version.go
rename to vendor/github.com/open-policy-agent/opa/v1/version/version.go
index 862556bce0..2aef6b113f 100644
--- a/vendor/github.com/open-policy-agent/opa/version/version.go
+++ b/vendor/github.com/open-policy-agent/opa/v1/version/version.go
@@ -10,8 +10,7 @@ import (
"runtime/debug"
)
-// Version is the canonical version of OPA.
-var Version = "0.70.0"
+var Version = "1.10.1"
// GoVersion is the version of Go this was built with
var GoVersion = runtime.Version()
@@ -32,18 +31,28 @@ func init() {
if !ok {
return
}
- dirty := false
+ var dirty bool
+ var binTimestamp, binVcs string
+
for _, s := range bi.Settings {
switch s.Key {
case "vcs.time":
- Timestamp = s.Value
+ binTimestamp = s.Value
case "vcs.revision":
- Vcs = s.Value
+ binVcs = s.Value
case "vcs.modified":
dirty = s.Value == "true"
}
}
- if dirty {
- Vcs = Vcs + "-dirty"
+
+ if Timestamp == "" {
+ Timestamp = binTimestamp
+ }
+
+ if Vcs == "" {
+ Vcs = binVcs
+ if dirty {
+ Vcs += "-dirty"
+ }
}
}
diff --git a/vendor/github.com/open-policy-agent/opa/version/wasm.go b/vendor/github.com/open-policy-agent/opa/v1/version/wasm.go
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/version/wasm.go
rename to vendor/github.com/open-policy-agent/opa/v1/version/wasm.go
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
index ad347113c0..2331b8b4f3 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -95,7 +95,8 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const
help: help,
variableLabels: variableLabels.compile(),
}
- if !model.IsValidMetricName(model.LabelValue(fqName)) {
+ //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme.
+ if !model.NameValidationScheme.IsValidMetricName(fqName) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
return d
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
index 8b016355ad..7bac0da33d 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
@@ -453,7 +453,7 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
}
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
}
- if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
+ if len(group) > 0 && (len(group) != 1 || group[0].Tag != 'e') {
groups = append(groups, group)
}
return groups
@@ -568,7 +568,7 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
buf := bufio.NewWriter(writer)
defer buf.Flush()
wf := func(format string, args ...interface{}) error {
- _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ _, err := fmt.Fprintf(buf, format, args...)
return err
}
ws := func(s string) error {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
index f7f97ef926..d273b6640e 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
@@ -67,7 +67,7 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool)
}
// Our current conversion moves to legacy naming, so use legacy validation.
- valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name)
+ valid := model.LegacyValidation.IsValidMetricName(namespace + "_" + subsystem + "_" + name)
switch d.Kind {
case metrics.KindUint64:
case metrics.KindFloat64:
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
index c21911f292..5fe8d3b4d2 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -184,5 +184,6 @@ func validateLabelValues(vals []string, expectedNumberOfValues int) error {
}
func checkLabelName(l string) bool {
- return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
+ //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme.
+ return model.NameValidationScheme.IsValidLabelName(l) && !strings.HasPrefix(l, reservedLabelPrefix)
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index 592eec3e24..76e59f1288 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -186,21 +186,31 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
case pb.Counter != nil:
pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1]
case pb.Histogram != nil:
+ h := pb.Histogram
for _, e := range m.exemplars {
- // pb.Histogram.Bucket are sorted by UpperBound.
- i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool {
- return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue()
+ if (h.GetZeroThreshold() != 0 || h.GetZeroCount() != 0 ||
+ len(h.PositiveSpan) != 0 || len(h.NegativeSpan) != 0) &&
+ e.GetTimestamp() != nil {
+ h.Exemplars = append(h.Exemplars, e)
+ if len(h.Bucket) == 0 {
+ // Don't proceed to classic buckets if there are none.
+ continue
+ }
+ }
+ // h.Bucket are sorted by UpperBound.
+ i := sort.Search(len(h.Bucket), func(i int) bool {
+ return h.Bucket[i].GetUpperBound() >= e.GetValue()
})
- if i < len(pb.Histogram.Bucket) {
- pb.Histogram.Bucket[i].Exemplar = e
+ if i < len(h.Bucket) {
+ h.Bucket[i].Exemplar = e
} else {
// The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
b := &dto.Bucket{
- CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()),
+ CumulativeCount: proto.Uint64(h.GetSampleCount()),
UpperBound: proto.Float64(math.Inf(1)),
Exemplar: e,
}
- pb.Histogram.Bucket = append(pb.Histogram.Bucket, b)
+ h.Bucket = append(h.Bucket, b)
}
}
default:
@@ -227,6 +237,7 @@ type Exemplar struct {
// Only last applicable exemplar is injected from the list.
// For example for Counter it means last exemplar is injected.
// For Histogram, it means last applicable exemplar for each bucket is injected.
+// For a Native Histogram, all valid exemplars are injected.
//
// NewMetricWithExemplars works best with MustNewConstMetric and
// MustNewConstHistogram, see example.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
index 0a61b98461..b32c95fa3f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
@@ -25,9 +25,9 @@ import (
"golang.org/x/sys/unix"
)
-// notImplementedErr is returned by stub functions that replace cgo functions, when cgo
+// errNotImplemented is returned by stub functions that replace cgo functions, when cgo
// isn't available.
-var notImplementedErr = errors.New("not implemented")
+var errNotImplemented = errors.New("not implemented")
type memoryInfo struct {
vsize uint64 // Virtual memory size in bytes
@@ -101,7 +101,7 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
if memInfo, err := getMemory(); err == nil {
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss))
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize))
- } else if !errors.Is(err, notImplementedErr) {
+ } else if !errors.Is(err, errNotImplemented) {
// Don't report an error when support is not compiled in.
c.reportError(ch, c.rss, err)
c.reportError(ch, c.vsize, err)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go
index 8ddb0995d6..378865129b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go
@@ -16,7 +16,7 @@
package prometheus
func getMemory() (*memoryInfo, error) {
- return nil, notImplementedErr
+ return nil, errNotImplemented
}
// describe returns all descriptions of the collector for Darwin.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go
index 9f4b130bef..8074f70f5d 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go
@@ -66,11 +66,11 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
if netstat, err := p.Netstat(); err == nil {
var inOctets, outOctets float64
- if netstat.IpExt.InOctets != nil {
- inOctets = *netstat.IpExt.InOctets
+ if netstat.InOctets != nil {
+ inOctets = *netstat.InOctets
}
- if netstat.IpExt.OutOctets != nil {
- outOctets = *netstat.IpExt.OutOctets
+ if netstat.OutOctets != nil {
+ outOctets = *netstat.OutOctets
}
ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets)
ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
index 2c808eece0..487b466563 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -79,7 +79,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
return false
}
- return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
+ return m.deleteByHashWithLabelValues(h, lvs, m.curry)
}
// Delete deletes the metric where the variable labels are the same as those
@@ -101,7 +101,7 @@ func (m *MetricVec) Delete(labels Labels) bool {
return false
}
- return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
+ return m.deleteByHashWithLabels(h, labels, m.curry)
}
// DeletePartialMatch deletes all metrics where the variable labels contain all of those
@@ -114,7 +114,7 @@ func (m *MetricVec) DeletePartialMatch(labels Labels) int {
labels, closer := constrainLabels(m.desc, labels)
defer closer()
- return m.metricMap.deleteByLabels(labels, m.curry)
+ return m.deleteByLabels(labels, m.curry)
}
// Without explicit forwarding of Describe, Collect, Reset, those methods won't
@@ -216,7 +216,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
return nil, err
}
- return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
+ return m.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
}
// GetMetricWith returns the Metric for the given Labels map (the label names
@@ -244,7 +244,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
return nil, err
}
- return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
+ return m.getOrCreateMetricWithLabels(h, labels, m.curry), nil
}
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
index 25da157f15..2ed1285068 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -63,7 +63,7 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
// metric names that are standardized across applications, as that would break
// horizontal monitoring, for example the metrics provided by the Go collector
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
-// fact, those metrics are already prefixed with “go_” or “process_”,
+// fact, those metrics are already prefixed with "go_" or "process_",
// respectively.)
//
// Conflicts between Collectors registered through the original Registerer with
@@ -78,6 +78,40 @@ func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
}
}
+// WrapCollectorWith returns a Collector wrapping the provided Collector. The
+// wrapped Collector will add the provided Labels to all Metrics it collects (as
+// ConstLabels). The Metrics collected by the unmodified Collector must not
+// duplicate any of those labels.
+//
+// WrapCollectorWith can be useful to work with multiple instances of a third
+// party library that does not expose enough flexibility on the lifecycle of its
+// registered metrics.
+// For example, let's say you have a foo.New(reg Registerer) constructor that
+// registers metrics but never unregisters them, and you want to create multiple
+// instances of foo.Foo with different labels.
+// The way to achieve that, is to create a new Registry, pass it to foo.New,
+// then use WrapCollectorWith to wrap that Registry with the desired labels and
+// register that as a collector in your main Registry.
+// Then you can un-register the wrapped collector effectively un-registering the
+// metrics registered by foo.New.
+func WrapCollectorWith(labels Labels, c Collector) Collector {
+ return &wrappingCollector{
+ wrappedCollector: c,
+ labels: labels,
+ }
+}
+
+// WrapCollectorWithPrefix returns a Collector wrapping the provided Collector. The
+// wrapped Collector will add the provided prefix to the name of all Metrics it collects.
+//
+// See the documentation of WrapCollectorWith for more details on the use case.
+func WrapCollectorWithPrefix(prefix string, c Collector) Collector {
+ return &wrappingCollector{
+ wrappedCollector: c,
+ prefix: prefix,
+ }
+}
+
type wrappingRegisterer struct {
wrappedRegisterer Registerer
prefix string
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
index 1448439b7f..7b762370e2 100644
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -70,19 +70,34 @@ func ResponseFormat(h http.Header) Format {
return FmtUnknown
}
-// NewDecoder returns a new decoder based on the given input format.
-// If the input format does not imply otherwise, a text format decoder is returned.
+// NewDecoder returns a new decoder based on the given input format. Metric
+// names are validated based on the provided Format -- if the format requires
+// escaping, raditional Prometheues validity checking is used. Otherwise, names
+// are checked for UTF-8 validity. Supported formats include delimited protobuf
+// and Prometheus text format. For historical reasons, this decoder fallbacks
+// to classic text decoding for any other format. This decoder does not fully
+// support OpenMetrics although it may often succeed due to the similarities
+// between the formats. This decoder may not support the latest features of
+// Prometheus text format and is not intended for high-performance applications.
+// See: https://github.com/prometheus/common/issues/812
func NewDecoder(r io.Reader, format Format) Decoder {
+ scheme := model.LegacyValidation
+ if format.ToEscapingScheme() == model.NoEscaping {
+ scheme = model.UTF8Validation
+ }
switch format.FormatType() {
case TypeProtoDelim:
- return &protoDecoder{r: bufio.NewReader(r)}
+ return &protoDecoder{r: bufio.NewReader(r), s: scheme}
+ case TypeProtoText, TypeProtoCompact:
+ return &errDecoder{err: fmt.Errorf("format %s not supported for decoding", format)}
}
- return &textDecoder{r: r}
+ return &textDecoder{r: r, s: scheme}
}
// protoDecoder implements the Decoder interface for protocol buffers.
type protoDecoder struct {
r protodelim.Reader
+ s model.ValidationScheme
}
// Decode implements the Decoder interface.
@@ -93,7 +108,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
if err := opts.UnmarshalFrom(d.r, v); err != nil {
return err
}
- if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
+ if !d.s.IsValidMetricName(v.GetName()) {
return fmt.Errorf("invalid metric name %q", v.GetName())
}
for _, m := range v.GetMetric() {
@@ -107,7 +122,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
if !model.LabelValue(l.GetValue()).IsValid() {
return fmt.Errorf("invalid label value %q", l.GetValue())
}
- if !model.LabelName(l.GetName()).IsValid() {
+ if !d.s.IsValidLabelName(l.GetName()) {
return fmt.Errorf("invalid label name %q", l.GetName())
}
}
@@ -115,10 +130,20 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
return nil
}
+// errDecoder is an error-state decoder that always returns the same error.
+type errDecoder struct {
+ err error
+}
+
+func (d *errDecoder) Decode(*dto.MetricFamily) error {
+ return d.err
+}
+
// textDecoder implements the Decoder interface for the text protocol.
type textDecoder struct {
r io.Reader
fams map[string]*dto.MetricFamily
+ s model.ValidationScheme
err error
}
@@ -126,7 +151,7 @@ type textDecoder struct {
func (d *textDecoder) Decode(v *dto.MetricFamily) error {
if d.err == nil {
// Read all metrics in one shot.
- var p TextParser
+ p := NewTextParser(d.s)
d.fams, d.err = p.TextToMetricFamilies(d.r)
// If we don't get an error, store io.EOF for the end.
if d.err == nil {
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
index d7f3d76f55..73c24dfbc9 100644
--- a/vendor/github.com/prometheus/common/expfmt/encode.go
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -18,14 +18,12 @@ import (
"io"
"net/http"
+ "github.com/munnerz/goautoneg"
+ dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/encoding/protodelim"
"google.golang.org/protobuf/encoding/prototext"
"github.com/prometheus/common/model"
-
- "github.com/munnerz/goautoneg"
-
- dto "github.com/prometheus/client_model/go"
)
// Encoder types encode metric families into an underlying wire protocol.
@@ -61,7 +59,7 @@ func (ec encoderCloser) Close() error {
// appropriate accepted type is found, FmtText is returned (which is the
// Prometheus text format). This function will never negotiate FmtOpenMetrics,
// as the support is still experimental. To include the option to negotiate
-// FmtOpenMetrics, use NegotiateOpenMetrics.
+// FmtOpenMetrics, use NegotiateIncludingOpenMetrics.
func Negotiate(h http.Header) Format {
escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
@@ -153,7 +151,7 @@ func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder {
case TypeProtoDelim:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
- _, err := protodelim.MarshalTo(w, v)
+ _, err := protodelim.MarshalTo(w, model.EscapeMetricFamily(v, escapingScheme))
return err
},
close: func() error { return nil },
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
index b26886560d..c34c7de432 100644
--- a/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -36,9 +36,11 @@ const (
ProtoType = `application/vnd.google.protobuf`
ProtoProtocol = `io.prometheus.client.MetricFamily`
// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
- ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
- OpenMetricsType = `application/openmetrics-text`
+ ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+ OpenMetricsType = `application/openmetrics-text`
+ //nolint:revive // Allow for underscores.
OpenMetricsVersion_0_0_1 = "0.0.1"
+ //nolint:revive // Allow for underscores.
OpenMetricsVersion_1_0_0 = "1.0.0"
// The Content-Type values for the different wire protocols. Do not do direct
@@ -54,8 +56,10 @@ const (
// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
// Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
+ //nolint:revive // Allow for underscores.
FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
// Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
+ //nolint:revive // Allow for underscores.
FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
)
@@ -188,8 +192,8 @@ func (f Format) FormatType() FormatType {
// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid
// "escaping" term exists, that will be used. Otherwise, the global default will
// be returned.
-func (format Format) ToEscapingScheme() model.EscapingScheme {
- for _, p := range strings.Split(string(format), ";") {
+func (f Format) ToEscapingScheme() model.EscapingScheme {
+ for _, p := range strings.Split(string(f), ";") {
toks := strings.Split(p, "=")
if len(toks) != 2 {
continue
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
index dfac962a4e..0290f6abc4 100644
--- a/vendor/github.com/prometheus/common/expfmt/fuzz.go
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -17,7 +17,11 @@
package expfmt
-import "bytes"
+import (
+ "bytes"
+
+ "github.com/prometheus/common/model"
+)
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
//
@@ -26,9 +30,8 @@ import "bytes"
//
// Further input samples should go in the folder fuzz/corpus.
func Fuzz(in []byte) int {
- parser := TextParser{}
+ parser := NewTextParser(model.UTF8Validation)
_, err := parser.TextToMetricFamilies(bytes.NewReader(in))
-
if err != nil {
return 0
}
diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
index a21ed4ec1f..8dbf6d04ed 100644
--- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
@@ -22,11 +22,10 @@ import (
"strconv"
"strings"
+ dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/prometheus/common/model"
-
- dto "github.com/prometheus/client_model/go"
)
type encoderOption struct {
@@ -249,7 +248,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E
// Finally the samples, one line for each.
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") {
- compliantName = compliantName + "_total"
+ compliantName += "_total"
}
for _, metric := range in.Metric {
switch metricType {
@@ -477,7 +476,7 @@ func writeOpenMetricsNameAndLabelPairs(
if name != "" {
// If the name does not pass the legacy validity check, we must put the
// metric name inside the braces, quoted.
- if !model.IsValidLegacyMetricName(name) {
+ if !model.LegacyValidation.IsValidMetricName(name) {
metricInsideBraces = true
err := w.WriteByte(separator)
written++
@@ -641,11 +640,11 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
if err != nil {
return written, err
}
- err = (*e).Timestamp.CheckValid()
+ err = e.Timestamp.CheckValid()
if err != nil {
return written, err
}
- ts := (*e).Timestamp.AsTime()
+ ts := e.Timestamp.AsTime()
// TODO(beorn7): Format this directly from components of ts to
// avoid overflow/underflow and precision issues of the float
// conversion.
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
index 4b86434b33..c4e9c1bbc3 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -22,9 +22,9 @@ import (
"strings"
"sync"
- "github.com/prometheus/common/model"
-
dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/common/model"
)
// enhancedWriter has all the enhanced write functions needed here. bufio.Writer
@@ -354,7 +354,7 @@ func writeNameAndLabelPairs(
if name != "" {
// If the name does not pass the legacy validity check, we must put the
// metric name inside the braces.
- if !model.IsValidLegacyMetricName(name) {
+ if !model.LegacyValidation.IsValidMetricName(name) {
metricInsideBraces = true
err := w.WriteByte(separator)
written++
@@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) {
// writeName writes a string as-is if it complies with the legacy naming
// scheme, or escapes it in double quotes if not.
func writeName(w enhancedWriter, name string) (int, error) {
- if model.IsValidLegacyMetricName(name) {
+ if model.LegacyValidation.IsValidMetricName(name) {
return w.WriteString(name)
}
var written int
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index b4607fe4d2..8f2edde324 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -78,6 +78,14 @@ type TextParser struct {
// These indicate if the metric name from the current line being parsed is inside
// braces and if that metric name was found respectively.
currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool
+ // scheme sets the desired ValidationScheme for names. Defaults to the invalid
+ // UnsetValidation.
+ scheme model.ValidationScheme
+}
+
+// NewTextParser returns a new TextParser with the provided nameValidationScheme.
+func NewTextParser(nameValidationScheme model.ValidationScheme) TextParser {
+ return TextParser{scheme: nameValidationScheme}
}
// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
@@ -126,6 +134,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF
func (p *TextParser) reset(in io.Reader) {
p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+ p.currentLabelPairs = nil
if p.buf == nil {
p.buf = bufio.NewReader(in)
} else {
@@ -216,6 +225,9 @@ func (p *TextParser) startComment() stateFn {
return nil
}
p.setOrCreateCurrentMF()
+ if p.err != nil {
+ return nil
+ }
if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
@@ -244,6 +256,9 @@ func (p *TextParser) readingMetricName() stateFn {
return nil
}
p.setOrCreateCurrentMF()
+ if p.err != nil {
+ return nil
+ }
// Now is the time to fix the type if it hasn't happened yet.
if p.currentMF.Type == nil {
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
@@ -311,6 +326,9 @@ func (p *TextParser) startLabelName() stateFn {
switch p.currentByte {
case ',':
p.setOrCreateCurrentMF()
+ if p.err != nil {
+ return nil
+ }
if p.currentMF.Type == nil {
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
}
@@ -319,6 +337,10 @@ func (p *TextParser) startLabelName() stateFn {
return p.startLabelName
case '}':
p.setOrCreateCurrentMF()
+ if p.err != nil {
+ p.currentLabelPairs = nil
+ return nil
+ }
if p.currentMF.Type == nil {
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
}
@@ -341,25 +363,30 @@ func (p *TextParser) startLabelName() stateFn {
p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+ p.currentLabelPairs = nil
+ return nil
+ }
+ if !p.scheme.IsValidLabelName(p.currentLabelPair.GetName()) {
+ p.parseError(fmt.Sprintf("invalid label name %q", p.currentLabelPair.GetName()))
+ p.currentLabelPairs = nil
return nil
}
// Special summary/histogram treatment. Don't add 'quantile' and 'le'
// labels to 'real' labels.
- if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
- !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+ if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) &&
+ (p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) {
p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
}
// Check for duplicate label names.
labels := make(map[string]struct{})
for _, l := range p.currentLabelPairs {
lName := l.GetName()
- if _, exists := labels[lName]; !exists {
- labels[lName] = struct{}{}
- } else {
+ if _, exists := labels[lName]; exists {
p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
p.currentLabelPairs = nil
return nil
}
+ labels[lName] = struct{}{}
}
return p.startLabelValue
}
@@ -440,7 +467,8 @@ func (p *TextParser) readingValue() stateFn {
// When we are here, we have read all the labels, so for the
// special case of a summary/histogram, we can finally find out
// if the metric already exists.
- if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ switch p.currentMF.GetType() {
+ case dto.MetricType_SUMMARY:
signature := model.LabelsToSignature(p.currentLabels)
if summary := p.summaries[signature]; summary != nil {
p.currentMetric = summary
@@ -448,7 +476,7 @@ func (p *TextParser) readingValue() stateFn {
p.summaries[signature] = p.currentMetric
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
}
- } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ case dto.MetricType_HISTOGRAM:
signature := model.LabelsToSignature(p.currentLabels)
if histogram := p.histograms[signature]; histogram != nil {
p.currentMetric = histogram
@@ -456,7 +484,7 @@ func (p *TextParser) readingValue() stateFn {
p.histograms[signature] = p.currentMetric
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
}
- } else {
+ default:
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
}
if p.readTokenUntilWhitespace(); p.err != nil {
@@ -805,6 +833,10 @@ func (p *TextParser) setOrCreateCurrentMF() {
p.currentIsHistogramCount = false
p.currentIsHistogramSum = false
name := p.currentToken.String()
+ if !p.scheme.IsValidMetricName(name) {
+ p.parseError(fmt.Sprintf("invalid metric name %q", name))
+ return
+ }
if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
return
}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
index bd3a39e3e1..460f554f29 100644
--- a/vendor/github.com/prometheus/common/model/alert.go
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -65,7 +65,7 @@ func (a *Alert) Resolved() bool {
return a.ResolvedAt(time.Now())
}
-// ResolvedAt returns true off the activity interval ended before
+// ResolvedAt returns true iff the activity interval ended before
// the given timestamp.
func (a *Alert) ResolvedAt(ts time.Time) bool {
if a.EndsAt.IsZero() {
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
index 73b7aa3e60..dfeb34be5f 100644
--- a/vendor/github.com/prometheus/common/model/labels.go
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -22,7 +22,7 @@ import (
)
const (
- // AlertNameLabel is the name of the label containing the an alert's name.
+ // AlertNameLabel is the name of the label containing the alert's name.
AlertNameLabel = "alertname"
// ExportedLabelPrefix is the prefix to prepend to the label names present in
@@ -32,6 +32,12 @@ const (
// MetricNameLabel is the label name indicating the metric name of a
// timeseries.
MetricNameLabel = "__name__"
+ // MetricTypeLabel is the label name indicating the metric type of
+ // timeseries as per the PROM-39 proposal.
+ MetricTypeLabel = "__type__"
+ // MetricUnitLabel is the label name indicating the metric unit of
+ // timeseries as per the PROM-39 proposal.
+ MetricUnitLabel = "__unit__"
// SchemeLabel is the name of the label that holds the scheme on which to
// scrape a target.
@@ -100,33 +106,21 @@ type LabelName string
// IsValid returns true iff the name matches the pattern of LabelNameRE when
// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if
// NameValidationScheme is set to UTF8Validation.
+//
+// Deprecated: This method should not be used and may be removed in the future.
+// Use [ValidationScheme.IsValidLabelName] instead.
func (ln LabelName) IsValid() bool {
- if len(ln) == 0 {
- return false
- }
- switch NameValidationScheme {
- case LegacyValidation:
- return ln.IsValidLegacy()
- case UTF8Validation:
- return utf8.ValidString(string(ln))
- default:
- panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
- }
+ return NameValidationScheme.IsValidLabelName(string(ln))
}
// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for
// legacy names. It does not use LabelNameRE for the check but a much faster
// hardcoded implementation.
+//
+// Deprecated: This method should not be used and may be removed in the future.
+// Use [LegacyValidation.IsValidLabelName] instead.
func (ln LabelName) IsValidLegacy() bool {
- if len(ln) == 0 {
- return false
- }
- for i, b := range ln {
- if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
- return false
- }
- }
- return true
+ return LegacyValidation.IsValidLabelName(string(ln))
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
index d0ad88da33..9de47b2568 100644
--- a/vendor/github.com/prometheus/common/model/labelset.go
+++ b/vendor/github.com/prometheus/common/model/labelset.go
@@ -114,10 +114,10 @@ func (ls LabelSet) Clone() LabelSet {
}
// Merge is a helper function to non-destructively merge two label sets.
-func (l LabelSet) Merge(other LabelSet) LabelSet {
- result := make(LabelSet, len(l))
+func (ls LabelSet) Merge(other LabelSet) LabelSet {
+ result := make(LabelSet, len(ls))
- for k, v := range l {
+ for k, v := range ls {
result[k] = v
}
@@ -140,7 +140,7 @@ func (ls LabelSet) FastFingerprint() Fingerprint {
}
// UnmarshalJSON implements the json.Unmarshaler interface.
-func (l *LabelSet) UnmarshalJSON(b []byte) error {
+func (ls *LabelSet) UnmarshalJSON(b []byte) error {
var m map[LabelName]LabelValue
if err := json.Unmarshal(b, &m); err != nil {
return err
@@ -153,6 +153,6 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error {
return fmt.Errorf("%q is not a valid label name", ln)
}
}
- *l = LabelSet(m)
+ *ls = LabelSet(m)
return nil
}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
index 5766107cf9..3feebf328a 100644
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -14,6 +14,7 @@
package model
import (
+ "encoding/json"
"errors"
"fmt"
"regexp"
@@ -23,17 +24,30 @@ import (
"unicode/utf8"
dto "github.com/prometheus/client_model/go"
+ "go.yaml.in/yaml/v2"
"google.golang.org/protobuf/proto"
)
var (
- // NameValidationScheme determines the method of name validation to be used by
- // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8
- // mode in isolation from other components that don't support UTF-8 may result
- // in bugs or other undefined behavior. This value can be set to
- // LegacyValidation during startup if a binary is not UTF-8-aware binaries. To
- // avoid need for locking, this value should be set once, ideally in an
- // init(), before multiple goroutines are started.
+ // NameValidationScheme determines the global default method of the name
+ // validation to be used by all calls to IsValidMetricName() and LabelName
+ // IsValid().
+ //
+ // Deprecated: This variable should not be used and might be removed in the
+ // far future. If you wish to stick to the legacy name validation use
+ // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods
+ // instead. This variable is here as an escape hatch for emergency cases,
+ // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g.,
+ // to delay UTF-8 migrations in time or aid in debugging unforeseen results of
+ // the change. In such a case, a temporary assignment to `LegacyValidation`
+ // value in the `init()` function in your main.go or so, could be considered.
+ //
+ // Historically we opted for a global variable for feature gating different
+ // validation schemes in operations that were not otherwise easily adjustable
+ // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate
+ // Labels structure or package might have been a better choice. Given the
+ // change was made and many upgraded the common already, we live this as-is
+ // with this warning and learning for the future.
NameValidationScheme = UTF8Validation
// NameEscapingScheme defines the default way that names will be escaped when
@@ -50,16 +64,151 @@ var (
type ValidationScheme int
const (
- // LegacyValidation is a setting that requirets that metric and label names
+ // UnsetValidation represents an undefined ValidationScheme.
+ // Should not be used in practice.
+ UnsetValidation ValidationScheme = iota
+
+ // LegacyValidation is a setting that requires that all metric and label names
// conform to the original Prometheus character requirements described by
// MetricNameRE and LabelNameRE.
- LegacyValidation ValidationScheme = iota
+ LegacyValidation
// UTF8Validation only requires that metric and label names be valid UTF-8
// strings.
UTF8Validation
)
+var _ interface {
+ yaml.Marshaler
+ yaml.Unmarshaler
+ json.Marshaler
+ json.Unmarshaler
+ fmt.Stringer
+} = new(ValidationScheme)
+
+// String returns the string representation of s.
+func (s ValidationScheme) String() string {
+ switch s {
+ case UnsetValidation:
+ return "unset"
+ case LegacyValidation:
+ return "legacy"
+ case UTF8Validation:
+ return "utf8"
+ default:
+ panic(fmt.Errorf("unhandled ValidationScheme: %d", s))
+ }
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (s ValidationScheme) MarshalYAML() (any, error) {
+ switch s {
+ case UnsetValidation:
+ return "", nil
+ case LegacyValidation, UTF8Validation:
+ return s.String(), nil
+ default:
+ panic(fmt.Errorf("unhandled ValidationScheme: %d", s))
+ }
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (s *ValidationScheme) UnmarshalYAML(unmarshal func(any) error) error {
+ var scheme string
+ if err := unmarshal(&scheme); err != nil {
+ return err
+ }
+ return s.Set(scheme)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (s ValidationScheme) MarshalJSON() ([]byte, error) {
+ switch s {
+ case UnsetValidation:
+ return json.Marshal("")
+ case UTF8Validation, LegacyValidation:
+ return json.Marshal(s.String())
+ default:
+ return nil, fmt.Errorf("unhandled ValidationScheme: %d", s)
+ }
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (s *ValidationScheme) UnmarshalJSON(bytes []byte) error {
+ var repr string
+ if err := json.Unmarshal(bytes, &repr); err != nil {
+ return err
+ }
+ return s.Set(repr)
+}
+
+// Set implements the pflag.Value interface.
+func (s *ValidationScheme) Set(text string) error {
+ switch text {
+ case "":
+ // Don't change the value.
+ case LegacyValidation.String():
+ *s = LegacyValidation
+ case UTF8Validation.String():
+ *s = UTF8Validation
+ default:
+ return fmt.Errorf("unrecognized ValidationScheme: %q", text)
+ }
+ return nil
+}
+
+// IsValidMetricName returns whether metricName is valid according to s.
+func (s ValidationScheme) IsValidMetricName(metricName string) bool {
+ switch s {
+ case LegacyValidation:
+ if len(metricName) == 0 {
+ return false
+ }
+ for i, b := range metricName {
+ if !isValidLegacyRune(b, i) {
+ return false
+ }
+ }
+ return true
+ case UTF8Validation:
+ if len(metricName) == 0 {
+ return false
+ }
+ return utf8.ValidString(metricName)
+ default:
+ panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s.String()))
+ }
+}
+
+// IsValidLabelName returns whether labelName is valid according to s.
+func (s ValidationScheme) IsValidLabelName(labelName string) bool {
+ switch s {
+ case LegacyValidation:
+ if len(labelName) == 0 {
+ return false
+ }
+ for i, b := range labelName {
+ // TODO: Apply De Morgan's law. Make sure there are tests for this.
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck
+ return false
+ }
+ }
+ return true
+ case UTF8Validation:
+ if len(labelName) == 0 {
+ return false
+ }
+ return utf8.ValidString(labelName)
+ default:
+ panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s))
+ }
+}
+
+// Type implements the pflag.Value interface.
+func (ValidationScheme) Type() string {
+ return "validationScheme"
+}
+
type EscapingScheme int
const (
@@ -89,7 +238,7 @@ const (
// Accept header, the default NameEscapingScheme will be used.
EscapingKey = "escaping"
- // Possible values for Escaping Key:
+ // Possible values for Escaping Key.
AllowUTF8 = "allow-utf-8" // No escaping required.
EscapeUnderscores = "underscores"
EscapeDots = "dots"
@@ -163,34 +312,22 @@ func (m Metric) FastFingerprint() Fingerprint {
// IsValidMetricName returns true iff name matches the pattern of MetricNameRE
// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is
// selected.
+//
+// Deprecated: This function should not be used and might be removed in the future.
+// Use [ValidationScheme.IsValidMetricName] instead.
func IsValidMetricName(n LabelValue) bool {
- switch NameValidationScheme {
- case LegacyValidation:
- return IsValidLegacyMetricName(string(n))
- case UTF8Validation:
- if len(n) == 0 {
- return false
- }
- return utf8.ValidString(string(n))
- default:
- panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
- }
+ return NameValidationScheme.IsValidMetricName(string(n))
}
// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the
// legacy validation scheme regardless of the value of NameValidationScheme.
// This function, however, does not use MetricNameRE for the check but a much
// faster hardcoded implementation.
+//
+// Deprecated: This function should not be used and might be removed in the future.
+// Use [LegacyValidation.IsValidMetricName] instead.
func IsValidLegacyMetricName(n string) bool {
- if len(n) == 0 {
- return false
- }
- for i, b := range n {
- if !isValidLegacyRune(b, i) {
- return false
- }
- }
- return true
+ return LegacyValidation.IsValidMetricName(n)
}
// EscapeMetricFamily escapes the given metric names and labels with the given
@@ -298,13 +435,14 @@ func EscapeName(name string, scheme EscapingScheme) string {
case DotsEscaping:
// Do not early return for legacy valid names, we still escape underscores.
for i, b := range name {
- if b == '_' {
+ switch {
+ case b == '_':
escaped.WriteString("__")
- } else if b == '.' {
+ case b == '.':
escaped.WriteString("_dot_")
- } else if isValidLegacyRune(b, i) {
+ case isValidLegacyRune(b, i):
escaped.WriteRune(b)
- } else {
+ default:
escaped.WriteString("__")
}
}
@@ -315,13 +453,14 @@ func EscapeName(name string, scheme EscapingScheme) string {
}
escaped.WriteString("U__")
for i, b := range name {
- if b == '_' {
+ switch {
+ case b == '_':
escaped.WriteString("__")
- } else if isValidLegacyRune(b, i) {
+ case isValidLegacyRune(b, i):
escaped.WriteRune(b)
- } else if !utf8.ValidRune(b) {
+ case !utf8.ValidRune(b):
escaped.WriteString("_FFFD_")
- } else {
+ default:
escaped.WriteRune('_')
escaped.WriteString(strconv.FormatInt(int64(b), 16))
escaped.WriteRune('_')
@@ -333,7 +472,7 @@ func EscapeName(name string, scheme EscapingScheme) string {
}
}
-// lower function taken from strconv.atoi
+// lower function taken from strconv.atoi.
func lower(c byte) byte {
return c | ('x' - 'X')
}
@@ -397,11 +536,12 @@ func UnescapeName(name string, scheme EscapingScheme) string {
}
r := lower(escapedName[i])
utf8Val *= 16
- if r >= '0' && r <= '9' {
+ switch {
+ case r >= '0' && r <= '9':
utf8Val += uint(r) - '0'
- } else if r >= 'a' && r <= 'f' {
+ case r >= 'a' && r <= 'f':
utf8Val += uint(r) - 'a' + 10
- } else {
+ default:
return name
}
i++
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
index 5727452c1e..1730b0fdc1 100644
--- a/vendor/github.com/prometheus/common/model/time.go
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -126,14 +126,14 @@ func (t *Time) UnmarshalJSON(b []byte) error {
p := strings.Split(string(b), ".")
switch len(p) {
case 1:
- v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ v, err := strconv.ParseInt(p[0], 10, 64)
if err != nil {
return err
}
*t = Time(v * second)
case 2:
- v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ v, err := strconv.ParseInt(p[0], 10, 64)
if err != nil {
return err
}
@@ -143,7 +143,7 @@ func (t *Time) UnmarshalJSON(b []byte) error {
if prec < 0 {
p[1] = p[1][:dotPrecision]
} else if prec > 0 {
- p[1] = p[1] + strings.Repeat("0", prec)
+ p[1] += strings.Repeat("0", prec)
}
va, err := strconv.ParseInt(p[1], 10, 32)
@@ -170,15 +170,15 @@ func (t *Time) UnmarshalJSON(b []byte) error {
// This type should not propagate beyond the scope of input/output processing.
type Duration time.Duration
-// Set implements pflag/flag.Value
+// Set implements pflag/flag.Value.
func (d *Duration) Set(s string) error {
var err error
*d, err = ParseDuration(s)
return err
}
-// Type implements pflag.Value
-func (d *Duration) Type() string {
+// Type implements pflag.Value.
+func (*Duration) Type() string {
return "duration"
}
@@ -201,6 +201,7 @@ var unitMap = map[string]struct {
// ParseDuration parses a string into a time.Duration, assuming that a year
// always has 365d, a week always has 7d, and a day always has 24h.
+// Negative durations are not supported.
func ParseDuration(s string) (Duration, error) {
switch s {
case "0":
@@ -253,18 +254,36 @@ func ParseDuration(s string) (Duration, error) {
return 0, errors.New("duration out of range")
}
}
+
return Duration(dur), nil
}
+// ParseDurationAllowNegative is like ParseDuration but also accepts negative durations.
+func ParseDurationAllowNegative(s string) (Duration, error) {
+ if s == "" || s[0] != '-' {
+ return ParseDuration(s)
+ }
+
+ d, err := ParseDuration(s[1:])
+
+ return -d, err
+}
+
func (d Duration) String() string {
var (
- ms = int64(time.Duration(d) / time.Millisecond)
- r = ""
+ ms = int64(time.Duration(d) / time.Millisecond)
+ r = ""
+ sign = ""
)
+
if ms == 0 {
return "0s"
}
+ if ms < 0 {
+ sign, ms = "-", -ms
+ }
+
f := func(unit string, mult int64, exact bool) {
if exact && ms%mult != 0 {
return
@@ -286,7 +305,7 @@ func (d Duration) String() string {
f("s", 1000, false)
f("ms", 1, false)
- return r
+ return sign + r
}
// MarshalJSON implements the json.Marshaler interface.
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
index 8050637d82..a9995a37ee 100644
--- a/vendor/github.com/prometheus/common/model/value.go
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -191,7 +191,8 @@ func (ss SampleStream) String() string {
}
func (ss SampleStream) MarshalJSON() ([]byte, error) {
- if len(ss.Histograms) > 0 && len(ss.Values) > 0 {
+ switch {
+ case len(ss.Histograms) > 0 && len(ss.Values) > 0:
v := struct {
Metric Metric `json:"metric"`
Values []SamplePair `json:"values"`
@@ -202,7 +203,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) {
Histograms: ss.Histograms,
}
return json.Marshal(&v)
- } else if len(ss.Histograms) > 0 {
+ case len(ss.Histograms) > 0:
v := struct {
Metric Metric `json:"metric"`
Histograms []SampleHistogramPair `json:"histograms"`
@@ -211,7 +212,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) {
Histograms: ss.Histograms,
}
return json.Marshal(&v)
- } else {
+ default:
v := struct {
Metric Metric `json:"metric"`
Values []SamplePair `json:"values"`
@@ -258,7 +259,7 @@ func (s Scalar) String() string {
// MarshalJSON implements json.Marshaler.
func (s Scalar) MarshalJSON() ([]byte, error) {
v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
- return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+ return json.Marshal([...]interface{}{s.Timestamp, v})
}
// UnmarshalJSON implements json.Unmarshaler.
@@ -349,9 +350,9 @@ func (m Matrix) Len() int { return len(m) }
func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
-func (mat Matrix) String() string {
- matCp := make(Matrix, len(mat))
- copy(matCp, mat)
+func (m Matrix) String() string {
+ matCp := make(Matrix, len(m))
+ copy(matCp, m)
sort.Sort(matCp)
strs := make([]string, len(matCp))
diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go
index 895e6a3e83..91ce5b7a45 100644
--- a/vendor/github.com/prometheus/common/model/value_histogram.go
+++ b/vendor/github.com/prometheus/common/model/value_histogram.go
@@ -86,22 +86,22 @@ func (s *HistogramBucket) Equal(o *HistogramBucket) bool {
return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count)
}
-func (b HistogramBucket) String() string {
+func (s HistogramBucket) String() string {
var sb strings.Builder
- lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3
- upperInclusive := b.Boundaries == 0 || b.Boundaries == 3
+ lowerInclusive := s.Boundaries == 1 || s.Boundaries == 3
+ upperInclusive := s.Boundaries == 0 || s.Boundaries == 3
if lowerInclusive {
sb.WriteRune('[')
} else {
sb.WriteRune('(')
}
- fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper)
+ fmt.Fprintf(&sb, "%g,%g", s.Lower, s.Upper)
if upperInclusive {
sb.WriteRune(']')
} else {
sb.WriteRune(')')
}
- fmt.Fprintf(&sb, ":%v", b.Count)
+ fmt.Fprintf(&sb, ":%v", s.Count)
return sb.String()
}
diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go
index 726c50ee63..078910f46b 100644
--- a/vendor/github.com/prometheus/common/model/value_type.go
+++ b/vendor/github.com/prometheus/common/model/value_type.go
@@ -66,8 +66,8 @@ func (et *ValueType) UnmarshalJSON(b []byte) error {
return nil
}
-func (e ValueType) String() string {
- switch e {
+func (et ValueType) String() string {
+ switch et {
case ValNone:
return ""
case ValScalar:
diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml
index 126df9e67a..3c3bf910fd 100644
--- a/vendor/github.com/prometheus/procfs/.golangci.yml
+++ b/vendor/github.com/prometheus/procfs/.golangci.yml
@@ -1,22 +1,45 @@
----
+version: "2"
linters:
enable:
- - errcheck
- - godot
- - gosimple
- - govet
- - ineffassign
- - misspell
- - revive
- - staticcheck
- - testifylint
- - unused
-
-linter-settings:
- godot:
- capital: true
- exclude:
- # Ignore "See: URL"
- - 'See:'
- misspell:
- locale: US
+ - forbidigo
+ - godot
+ - misspell
+ - revive
+ - testifylint
+ settings:
+ forbidigo:
+ forbid:
+ - pattern: ^fmt\.Print.*$
+ msg: Do not commit print statements.
+ godot:
+ exclude:
+ # Ignore "See: URL".
+ - 'See:'
+ capital: true
+ misspell:
+ locale: US
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ enable:
+ - gofmt
+ - goimports
+ settings:
+ goimports:
+ local-prefixes:
+ - github.com/prometheus/procfs
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
index 1617292350..4de21512ff 100644
--- a/vendor/github.com/prometheus/procfs/Makefile.common
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -61,7 +61,8 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.59.0
+GOLANGCI_LINT_VERSION ?= v2.1.5
+GOLANGCI_FMT_OPTS ?=
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@@ -156,9 +157,13 @@ $(GOTEST_DIR):
@mkdir -p $@
.PHONY: common-format
-common-format:
+common-format: $(GOLANGCI_LINT)
@echo ">> formatting code"
$(GO) fmt $(pkgs)
+ifdef GOLANGCI_LINT
+ @echo ">> formatting code with golangci-lint"
+ $(GOLANGCI_LINT) fmt $(GOLANGCI_FMT_OPTS)
+endif
.PHONY: common-vet
common-vet:
@@ -248,8 +253,8 @@ $(PROMU):
cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
rm -r $(PROMU_TMP)
-.PHONY: proto
-proto:
+.PHONY: common-proto
+common-proto:
@echo ">> generating code from proto files"
@./scripts/genproto.sh
@@ -275,3 +280,9 @@ $(1)_precheck:
exit 1; \
fi
endef
+
+govulncheck: install-govulncheck
+ govulncheck ./...
+
+install-govulncheck:
+ command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
index 1224816c2a..0718239cf1 100644
--- a/vendor/github.com/prometheus/procfs/README.md
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -47,15 +47,15 @@ However, most of the API includes unit tests which can be run with `make test`.
The procfs library includes a set of test fixtures which include many example files from
the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
which is extracted automatically during testing. To add/update the test fixtures, first
-ensure the `fixtures` directory is up to date by removing the existing directory and then
-extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
+ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then
+extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`.
```bash
rm -rf testdata/fixtures
make test
```
-Next, make the required changes to the extracted files in the `fixtures` directory. When
+Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
based on the updated `fixtures` directory. And finally, verify the changes using
`git diff testdata/fixtures.ttar`.
diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go
index cdcc8a7ccc..2e53344151 100644
--- a/vendor/github.com/prometheus/procfs/arp.go
+++ b/vendor/github.com/prometheus/procfs/arp.go
@@ -23,9 +23,9 @@ import (
// Learned from include/uapi/linux/if_arp.h.
const (
- // completed entry (ha valid).
+ // Completed entry (ha valid).
ATFComplete = 0x02
- // permanent entry.
+ // Permanent entry.
ATFPermanent = 0x04
// Publish entry.
ATFPublish = 0x08
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
index 4980c875bf..9bdaccc7c8 100644
--- a/vendor/github.com/prometheus/procfs/fs.go
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -24,8 +24,14 @@ type FS struct {
isReal bool
}
-// DefaultMountPoint is the common mount point of the proc filesystem.
-const DefaultMountPoint = fs.DefaultProcMountPoint
+const (
+ // DefaultMountPoint is the common mount point of the proc filesystem.
+ DefaultMountPoint = fs.DefaultProcMountPoint
+
+ // SectorSize represents the size of a sector in bytes.
+ // It is specific to Linux block I/O operations.
+ SectorSize = 512
+)
// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
// It will error if the mount point directory can't be read or is a file.
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
index 134767d69a..1b5bdbdf84 100644
--- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
@@ -17,7 +17,7 @@
package procfs
// isRealProc returns true on architectures that don't have a Type argument
-// in their Statfs_t struct
-func isRealProc(mountPoint string) (bool, error) {
+// in their Statfs_t struct.
+func isRealProc(_ string) (bool, error) {
return true, nil
}
diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go
index cf2e3eaa03..7db8633077 100644
--- a/vendor/github.com/prometheus/procfs/fscache.go
+++ b/vendor/github.com/prometheus/procfs/fscache.go
@@ -162,7 +162,7 @@ type Fscacheinfo struct {
ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
// Number of release reqs ignored due to in-progress store
ReleaseRequestsIgnoredDueToInProgressStore uint64
- // Number of page stores cancelled due to release req
+ // Number of page stores canceled due to release req
PageStoresCancelledByReleaseRequests uint64
VmscanWaiting uint64
// Number of times async ops added to pending queues
@@ -171,11 +171,11 @@ type Fscacheinfo struct {
OpsRunning uint64
// Number of times async ops queued for processing
OpsEnqueued uint64
- // Number of async ops cancelled
+ // Number of async ops canceled
OpsCancelled uint64
// Number of async ops rejected due to object lookup/create failure
OpsRejected uint64
- // Number of async ops initialised
+ // Number of async ops initialized
OpsInitialised uint64
// Number of async ops queued for deferred release
OpsDeferred uint64
diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
index 3c18c7610e..3a43e83915 100644
--- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go
+++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
@@ -28,6 +28,9 @@ const (
// DefaultConfigfsMountPoint is the common mount point of the configfs.
DefaultConfigfsMountPoint = "/sys/kernel/config"
+
+ // DefaultSelinuxMountPoint is the common mount point of the selinuxfs.
+ DefaultSelinuxMountPoint = "/sys/fs/selinux"
)
// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
index 14272dc788..5a7d2df06a 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/parse.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -14,6 +14,7 @@
package util
import (
+ "errors"
"os"
"strconv"
"strings"
@@ -110,3 +111,16 @@ func ParseBool(b string) *bool {
}
return &truth
}
+
+// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX.
+func ReadHexFromFile(path string) (uint64, error) {
+ data, err := os.ReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ hexString := strings.TrimSpace(string(data))
+ if !strings.HasPrefix(hexString, "0x") {
+ return 0, errors.New("invalid format: hex string does not start with '0x'")
+ }
+ return strconv.ParseUint(hexString[2:], 16, 64)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
index 1ab875ceec..d5404a6d72 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
@@ -20,6 +20,8 @@ package util
import (
"bytes"
"os"
+ "strconv"
+ "strings"
"syscall"
)
@@ -48,3 +50,21 @@ func SysReadFile(file string) (string, error) {
return string(bytes.TrimSpace(b[:n])), nil
}
+
+// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it.
+func SysReadUintFromFile(path string) (uint64, error) {
+ data, err := SysReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
+
+// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it.
+func SysReadIntFromFile(path string) (int64, error) {
+ data, err := SysReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
+}
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
index 67a9d2b448..1fd4381b22 100644
--- a/vendor/github.com/prometheus/procfs/mdstat.go
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -123,13 +123,16 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
finish := float64(0)
pct := float64(0)
recovering := strings.Contains(lines[syncLineIdx], "recovery")
+ reshaping := strings.Contains(lines[syncLineIdx], "reshape")
resyncing := strings.Contains(lines[syncLineIdx], "resync")
checking := strings.Contains(lines[syncLineIdx], "check")
// Append recovery and resyncing state info.
- if recovering || resyncing || checking {
+ if recovering || resyncing || checking || reshaping {
if recovering {
state = "recovering"
+ } else if reshaping {
+ state = "reshaping"
} else if checking {
state = "checking"
} else {
diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go
index 4b2c4050a3..937e1f9606 100644
--- a/vendor/github.com/prometheus/procfs/meminfo.go
+++ b/vendor/github.com/prometheus/procfs/meminfo.go
@@ -66,6 +66,10 @@ type Meminfo struct {
// Memory which has been evicted from RAM, and is temporarily
// on the disk
SwapFree *uint64
+ // Memory consumed by the zswap backend (compressed size)
+ Zswap *uint64
+ // Amount of anonymous memory stored in zswap (original size)
+ Zswapped *uint64
// Memory which is waiting to get written back to the disk
Dirty *uint64
// Memory which is actively being written back to the disk
@@ -85,6 +89,8 @@ type Meminfo struct {
// amount of memory dedicated to the lowest level of page
// tables.
PageTables *uint64
+ // secondary page tables.
+ SecPageTables *uint64
// NFS pages sent to the server, but not yet committed to
// stable storage
NFSUnstable *uint64
@@ -129,15 +135,18 @@ type Meminfo struct {
Percpu *uint64
HardwareCorrupted *uint64
AnonHugePages *uint64
+ FileHugePages *uint64
ShmemHugePages *uint64
ShmemPmdMapped *uint64
CmaTotal *uint64
CmaFree *uint64
+ Unaccepted *uint64
HugePagesTotal *uint64
HugePagesFree *uint64
HugePagesRsvd *uint64
HugePagesSurp *uint64
Hugepagesize *uint64
+ Hugetlb *uint64
DirectMap4k *uint64
DirectMap2M *uint64
DirectMap1G *uint64
@@ -161,6 +170,8 @@ type Meminfo struct {
MlockedBytes *uint64
SwapTotalBytes *uint64
SwapFreeBytes *uint64
+ ZswapBytes *uint64
+ ZswappedBytes *uint64
DirtyBytes *uint64
WritebackBytes *uint64
AnonPagesBytes *uint64
@@ -171,6 +182,7 @@ type Meminfo struct {
SUnreclaimBytes *uint64
KernelStackBytes *uint64
PageTablesBytes *uint64
+ SecPageTablesBytes *uint64
NFSUnstableBytes *uint64
BounceBytes *uint64
WritebackTmpBytes *uint64
@@ -182,11 +194,14 @@ type Meminfo struct {
PercpuBytes *uint64
HardwareCorruptedBytes *uint64
AnonHugePagesBytes *uint64
+ FileHugePagesBytes *uint64
ShmemHugePagesBytes *uint64
ShmemPmdMappedBytes *uint64
CmaTotalBytes *uint64
CmaFreeBytes *uint64
+ UnacceptedBytes *uint64
HugepagesizeBytes *uint64
+ HugetlbBytes *uint64
DirectMap4kBytes *uint64
DirectMap2MBytes *uint64
DirectMap1GBytes *uint64
@@ -287,6 +302,12 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
case "SwapFree:":
m.SwapFree = &val
m.SwapFreeBytes = &valBytes
+ case "Zswap:":
+ m.Zswap = &val
+ m.ZswapBytes = &valBytes
+ case "Zswapped:":
+ m.Zswapped = &val
+ m.ZswapBytes = &valBytes
case "Dirty:":
m.Dirty = &val
m.DirtyBytes = &valBytes
@@ -317,6 +338,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
case "PageTables:":
m.PageTables = &val
m.PageTablesBytes = &valBytes
+ case "SecPageTables:":
+ m.SecPageTables = &val
+ m.SecPageTablesBytes = &valBytes
case "NFS_Unstable:":
m.NFSUnstable = &val
m.NFSUnstableBytes = &valBytes
@@ -350,6 +374,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
case "AnonHugePages:":
m.AnonHugePages = &val
m.AnonHugePagesBytes = &valBytes
+ case "FileHugePages:":
+ m.FileHugePages = &val
+ m.FileHugePagesBytes = &valBytes
case "ShmemHugePages:":
m.ShmemHugePages = &val
m.ShmemHugePagesBytes = &valBytes
@@ -362,6 +389,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
case "CmaFree:":
m.CmaFree = &val
m.CmaFreeBytes = &valBytes
+ case "Unaccepted:":
+ m.Unaccepted = &val
+ m.UnacceptedBytes = &valBytes
case "HugePages_Total:":
m.HugePagesTotal = &val
case "HugePages_Free:":
@@ -373,6 +403,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
case "Hugepagesize:":
m.Hugepagesize = &val
m.HugepagesizeBytes = &valBytes
+ case "Hugetlb:":
+ m.Hugetlb = &val
+ m.HugetlbBytes = &valBytes
case "DirectMap4k:":
m.DirectMap4k = &val
m.DirectMap4kBytes = &valBytes
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
index 75a3b6c810..50caa73274 100644
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -45,11 +45,11 @@ const (
fieldTransport11TCPLen = 13
fieldTransport11UDPLen = 10
- // kernel version >= 4.14 MaxLen
+ // Kernel version >= 4.14 MaxLen
// See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
fieldTransport11RDMAMaxLen = 28
- // kernel version <= 4.2 MinLen
+ // Kernel version <= 4.2 MinLen
// See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
fieldTransport11RDMAMinLen = 20
)
@@ -601,11 +601,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
switch statVersion {
case statVersion10:
var expectedLength int
- if protocol == "tcp" {
+ switch protocol {
+ case "tcp":
expectedLength = fieldTransport10TCPLen
- } else if protocol == "udp" {
+ case "udp":
expectedLength = fieldTransport10UDPLen
- } else {
+ default:
return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
}
if len(ss) != expectedLength {
@@ -613,13 +614,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
}
case statVersion11:
var expectedLength int
- if protocol == "tcp" {
+ switch protocol {
+ case "tcp":
expectedLength = fieldTransport11TCPLen
- } else if protocol == "udp" {
+ case "udp":
expectedLength = fieldTransport11UDPLen
- } else if protocol == "rdma" {
+ case "rdma":
expectedLength = fieldTransport11RDMAMinLen
- } else {
+ default:
return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
}
if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
@@ -655,11 +657,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
// For the udp RPC transport there is no connection count, connect idle time,
// or idle time (fields #3, #4, and #5); all other fields are the same. So
// we set them to 0 here.
- if protocol == "udp" {
+ switch protocol {
+ case "udp":
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
- } else if protocol == "tcp" {
+ case "tcp":
ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
- } else if protocol == "rdma" {
+ case "rdma":
ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
}
diff --git a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go
new file mode 100644
index 0000000000..f50b38e352
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go
@@ -0,0 +1,96 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc//net/dev_snmp6/.
+// The outer map's keys are interface names and the inner map's keys are stat names.
+//
+// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type.
+type NetDevSNMP6 map[string]map[string]uint64
+
+// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/
+// directory.
+func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) {
+ return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6"))
+}
+
+// Returns kernel/system statistics read from interface files within the /proc//net/dev_snmp6/
+// directory.
+func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) {
+ return newNetDevSNMP6(p.path("net/dev_snmp6"))
+}
+
+// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory.
+func newNetDevSNMP6(dir string) (NetDevSNMP6, error) {
+ netDevSNMP6 := make(NetDevSNMP6)
+
+ // The net/dev_snmp6 folders contain one file per interface
+ ifaceFiles, err := os.ReadDir(dir)
+ if err != nil {
+ // On systems with IPv6 disabled, this directory won't exist.
+ // Do nothing.
+ if errors.Is(err, os.ErrNotExist) {
+ return netDevSNMP6, err
+ }
+ return netDevSNMP6, err
+ }
+
+ for _, iFaceFile := range ifaceFiles {
+ f, err := os.Open(dir + "/" + iFaceFile.Name())
+ if err != nil {
+ return netDevSNMP6, err
+ }
+ defer f.Close()
+
+ netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f)
+ if err != nil {
+ return netDevSNMP6, err
+ }
+ }
+
+ return netDevSNMP6, nil
+}
+
+func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) {
+ m := make(map[string]uint64)
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ stat := strings.Fields(scanner.Text())
+ if len(stat) < 2 {
+ continue
+ }
+ key, val := stat[0], stat[1]
+
+ // Expect stat name to contain "6" or be "ifIndex"
+ if strings.Contains(key, "6") || key == "ifIndex" {
+ v, err := strconv.ParseUint(val, 10, 64)
+ if err != nil {
+ return m, err
+ }
+
+ m[key] = v
+ }
+ }
+ return m, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go
index b70f1fc7a4..19e3378f72 100644
--- a/vendor/github.com/prometheus/procfs/net_ip_socket.go
+++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go
@@ -25,7 +25,7 @@ import (
)
const (
- // readLimit is used by io.LimitReader while reading the content of the
+ // Maximum size limit used by io.LimitReader while reading the content of the
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
// as each line represents a single used socket.
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
@@ -50,12 +50,12 @@ type (
// UsedSockets shows the total number of parsed lines representing the
// number of used sockets.
UsedSockets uint64
- // Drops shows the total number of dropped packets of all UPD sockets.
+ // Drops shows the total number of dropped packets of all UDP sockets.
Drops *uint64
}
- // netIPSocketLine represents the fields parsed from a single line
- // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
+ // A single line parser for fields from /proc/net/{t,u}dp{,6}.
+ // Fields which are not used by IPSocket are skipped.
// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
// For the proc file format details, see https://linux.die.net/man/5/proc.
netIPSocketLine struct {
diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go
index b6c77b709f..8d4b1ac05b 100644
--- a/vendor/github.com/prometheus/procfs/net_protocols.go
+++ b/vendor/github.com/prometheus/procfs/net_protocols.go
@@ -115,22 +115,24 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro
if err != nil {
return nil, err
}
- if fields[4] == enabled {
+ switch fields[4] {
+ case enabled:
line.Pressure = 1
- } else if fields[4] == disabled {
+ case disabled:
line.Pressure = 0
- } else {
+ default:
line.Pressure = -1
}
line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
if err != nil {
return nil, err
}
- if fields[6] == enabled {
+ switch fields[6] {
+ case enabled:
line.Slab = true
- } else if fields[6] == disabled {
+ case disabled:
line.Slab = false
- } else {
+ default:
return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
}
line.ModuleName = fields[7]
@@ -168,11 +170,12 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro
}
for i := 0; i < len(capabilities); i++ {
- if capabilities[i] == "y" {
+ switch capabilities[i] {
+ case "y":
*capabilityFields[i] = true
- } else if capabilities[i] == "n" {
+ case "n":
*capabilityFields[i] = false
- } else {
+ default:
return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
}
}
diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go
index 5277629557..0396d72015 100644
--- a/vendor/github.com/prometheus/procfs/net_tcp.go
+++ b/vendor/github.com/prometheus/procfs/net_tcp.go
@@ -25,24 +25,28 @@ type (
// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
// read from /proc/net/tcp.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
func (fs FS) NetTCP() (NetTCP, error) {
return newNetTCP(fs.proc.Path("net/tcp"))
}
// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
// read from /proc/net/tcp6.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
func (fs FS) NetTCP6() (NetTCP, error) {
return newNetTCP(fs.proc.Path("net/tcp6"))
}
// NetTCPSummary returns already computed statistics like the total queue lengths
// for TCP datagrams read from /proc/net/tcp.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
return newNetTCPSummary(fs.proc.Path("net/tcp"))
}
// NetTCP6Summary returns already computed statistics like the total queue lengths
// for TCP datagrams read from /proc/net/tcp6.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
return newNetTCPSummary(fs.proc.Path("net/tcp6"))
}
diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go
index d868cebdaa..d7e0cacb4c 100644
--- a/vendor/github.com/prometheus/procfs/net_unix.go
+++ b/vendor/github.com/prometheus/procfs/net_unix.go
@@ -121,12 +121,12 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
return &nu, nil
}
-func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
+func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) {
fields := strings.Fields(line)
l := len(fields)
- if l < min {
- return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l)
+ if l < minFields {
+ return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l)
}
// Field offsets are as follows:
@@ -172,7 +172,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
}
// Path field is optional.
- if l > min {
+ if l > minFields {
// Path occurs at either index 6 or 7 depending on whether inode is
// already present.
pathIdx := 7
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
index 142796368f..368187fa88 100644
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -37,9 +37,9 @@ type Proc struct {
type Procs []Proc
var (
- ErrFileParse = errors.New("Error Parsing File")
- ErrFileRead = errors.New("Error Reading File")
- ErrMountPoint = errors.New("Error Accessing Mount point")
+ ErrFileParse = errors.New("error parsing file")
+ ErrFileRead = errors.New("error reading file")
+ ErrMountPoint = errors.New("error accessing mount point")
)
func (p Procs) Len() int { return len(p) }
@@ -79,7 +79,7 @@ func (fs FS) Self() (Proc, error) {
if err != nil {
return Proc{}, err
}
- pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
+ pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), ""))
if err != nil {
return Proc{}, err
}
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go
index daeed7f571..4a64347c03 100644
--- a/vendor/github.com/prometheus/procfs/proc_cgroup.go
+++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go
@@ -24,7 +24,7 @@ import (
)
// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
-// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
+// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
index 776f349717..d15b66ddb6 100644
--- a/vendor/github.com/prometheus/procfs/proc_io.go
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -50,7 +50,7 @@ func (p Proc) IO() (ProcIO, error) {
ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
"read_bytes: %d\nwrite_bytes: %d\n" +
- "cancelled_write_bytes: %d\n"
+ "cancelled_write_bytes: %d\n" //nolint:misspell
_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go
index 8e3ff4d794..4248c1716e 100644
--- a/vendor/github.com/prometheus/procfs/proc_netstat.go
+++ b/vendor/github.com/prometheus/procfs/proc_netstat.go
@@ -209,232 +209,232 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
case "TcpExt":
switch key {
case "SyncookiesSent":
- procNetstat.TcpExt.SyncookiesSent = &value
+ procNetstat.SyncookiesSent = &value
case "SyncookiesRecv":
- procNetstat.TcpExt.SyncookiesRecv = &value
+ procNetstat.SyncookiesRecv = &value
case "SyncookiesFailed":
- procNetstat.TcpExt.SyncookiesFailed = &value
+ procNetstat.SyncookiesFailed = &value
case "EmbryonicRsts":
- procNetstat.TcpExt.EmbryonicRsts = &value
+ procNetstat.EmbryonicRsts = &value
case "PruneCalled":
- procNetstat.TcpExt.PruneCalled = &value
+ procNetstat.PruneCalled = &value
case "RcvPruned":
- procNetstat.TcpExt.RcvPruned = &value
+ procNetstat.RcvPruned = &value
case "OfoPruned":
- procNetstat.TcpExt.OfoPruned = &value
+ procNetstat.OfoPruned = &value
case "OutOfWindowIcmps":
- procNetstat.TcpExt.OutOfWindowIcmps = &value
+ procNetstat.OutOfWindowIcmps = &value
case "LockDroppedIcmps":
- procNetstat.TcpExt.LockDroppedIcmps = &value
+ procNetstat.LockDroppedIcmps = &value
case "ArpFilter":
- procNetstat.TcpExt.ArpFilter = &value
+ procNetstat.ArpFilter = &value
case "TW":
- procNetstat.TcpExt.TW = &value
+ procNetstat.TW = &value
case "TWRecycled":
- procNetstat.TcpExt.TWRecycled = &value
+ procNetstat.TWRecycled = &value
case "TWKilled":
- procNetstat.TcpExt.TWKilled = &value
+ procNetstat.TWKilled = &value
case "PAWSActive":
- procNetstat.TcpExt.PAWSActive = &value
+ procNetstat.PAWSActive = &value
case "PAWSEstab":
- procNetstat.TcpExt.PAWSEstab = &value
+ procNetstat.PAWSEstab = &value
case "DelayedACKs":
- procNetstat.TcpExt.DelayedACKs = &value
+ procNetstat.DelayedACKs = &value
case "DelayedACKLocked":
- procNetstat.TcpExt.DelayedACKLocked = &value
+ procNetstat.DelayedACKLocked = &value
case "DelayedACKLost":
- procNetstat.TcpExt.DelayedACKLost = &value
+ procNetstat.DelayedACKLost = &value
case "ListenOverflows":
- procNetstat.TcpExt.ListenOverflows = &value
+ procNetstat.ListenOverflows = &value
case "ListenDrops":
- procNetstat.TcpExt.ListenDrops = &value
+ procNetstat.ListenDrops = &value
case "TCPHPHits":
- procNetstat.TcpExt.TCPHPHits = &value
+ procNetstat.TCPHPHits = &value
case "TCPPureAcks":
- procNetstat.TcpExt.TCPPureAcks = &value
+ procNetstat.TCPPureAcks = &value
case "TCPHPAcks":
- procNetstat.TcpExt.TCPHPAcks = &value
+ procNetstat.TCPHPAcks = &value
case "TCPRenoRecovery":
- procNetstat.TcpExt.TCPRenoRecovery = &value
+ procNetstat.TCPRenoRecovery = &value
case "TCPSackRecovery":
- procNetstat.TcpExt.TCPSackRecovery = &value
+ procNetstat.TCPSackRecovery = &value
case "TCPSACKReneging":
- procNetstat.TcpExt.TCPSACKReneging = &value
+ procNetstat.TCPSACKReneging = &value
case "TCPSACKReorder":
- procNetstat.TcpExt.TCPSACKReorder = &value
+ procNetstat.TCPSACKReorder = &value
case "TCPRenoReorder":
- procNetstat.TcpExt.TCPRenoReorder = &value
+ procNetstat.TCPRenoReorder = &value
case "TCPTSReorder":
- procNetstat.TcpExt.TCPTSReorder = &value
+ procNetstat.TCPTSReorder = &value
case "TCPFullUndo":
- procNetstat.TcpExt.TCPFullUndo = &value
+ procNetstat.TCPFullUndo = &value
case "TCPPartialUndo":
- procNetstat.TcpExt.TCPPartialUndo = &value
+ procNetstat.TCPPartialUndo = &value
case "TCPDSACKUndo":
- procNetstat.TcpExt.TCPDSACKUndo = &value
+ procNetstat.TCPDSACKUndo = &value
case "TCPLossUndo":
- procNetstat.TcpExt.TCPLossUndo = &value
+ procNetstat.TCPLossUndo = &value
case "TCPLostRetransmit":
- procNetstat.TcpExt.TCPLostRetransmit = &value
+ procNetstat.TCPLostRetransmit = &value
case "TCPRenoFailures":
- procNetstat.TcpExt.TCPRenoFailures = &value
+ procNetstat.TCPRenoFailures = &value
case "TCPSackFailures":
- procNetstat.TcpExt.TCPSackFailures = &value
+ procNetstat.TCPSackFailures = &value
case "TCPLossFailures":
- procNetstat.TcpExt.TCPLossFailures = &value
+ procNetstat.TCPLossFailures = &value
case "TCPFastRetrans":
- procNetstat.TcpExt.TCPFastRetrans = &value
+ procNetstat.TCPFastRetrans = &value
case "TCPSlowStartRetrans":
- procNetstat.TcpExt.TCPSlowStartRetrans = &value
+ procNetstat.TCPSlowStartRetrans = &value
case "TCPTimeouts":
- procNetstat.TcpExt.TCPTimeouts = &value
+ procNetstat.TCPTimeouts = &value
case "TCPLossProbes":
- procNetstat.TcpExt.TCPLossProbes = &value
+ procNetstat.TCPLossProbes = &value
case "TCPLossProbeRecovery":
- procNetstat.TcpExt.TCPLossProbeRecovery = &value
+ procNetstat.TCPLossProbeRecovery = &value
case "TCPRenoRecoveryFail":
- procNetstat.TcpExt.TCPRenoRecoveryFail = &value
+ procNetstat.TCPRenoRecoveryFail = &value
case "TCPSackRecoveryFail":
- procNetstat.TcpExt.TCPSackRecoveryFail = &value
+ procNetstat.TCPSackRecoveryFail = &value
case "TCPRcvCollapsed":
- procNetstat.TcpExt.TCPRcvCollapsed = &value
+ procNetstat.TCPRcvCollapsed = &value
case "TCPDSACKOldSent":
- procNetstat.TcpExt.TCPDSACKOldSent = &value
+ procNetstat.TCPDSACKOldSent = &value
case "TCPDSACKOfoSent":
- procNetstat.TcpExt.TCPDSACKOfoSent = &value
+ procNetstat.TCPDSACKOfoSent = &value
case "TCPDSACKRecv":
- procNetstat.TcpExt.TCPDSACKRecv = &value
+ procNetstat.TCPDSACKRecv = &value
case "TCPDSACKOfoRecv":
- procNetstat.TcpExt.TCPDSACKOfoRecv = &value
+ procNetstat.TCPDSACKOfoRecv = &value
case "TCPAbortOnData":
- procNetstat.TcpExt.TCPAbortOnData = &value
+ procNetstat.TCPAbortOnData = &value
case "TCPAbortOnClose":
- procNetstat.TcpExt.TCPAbortOnClose = &value
+ procNetstat.TCPAbortOnClose = &value
case "TCPDeferAcceptDrop":
- procNetstat.TcpExt.TCPDeferAcceptDrop = &value
+ procNetstat.TCPDeferAcceptDrop = &value
case "IPReversePathFilter":
- procNetstat.TcpExt.IPReversePathFilter = &value
+ procNetstat.IPReversePathFilter = &value
case "TCPTimeWaitOverflow":
- procNetstat.TcpExt.TCPTimeWaitOverflow = &value
+ procNetstat.TCPTimeWaitOverflow = &value
case "TCPReqQFullDoCookies":
- procNetstat.TcpExt.TCPReqQFullDoCookies = &value
+ procNetstat.TCPReqQFullDoCookies = &value
case "TCPReqQFullDrop":
- procNetstat.TcpExt.TCPReqQFullDrop = &value
+ procNetstat.TCPReqQFullDrop = &value
case "TCPRetransFail":
- procNetstat.TcpExt.TCPRetransFail = &value
+ procNetstat.TCPRetransFail = &value
case "TCPRcvCoalesce":
- procNetstat.TcpExt.TCPRcvCoalesce = &value
+ procNetstat.TCPRcvCoalesce = &value
case "TCPRcvQDrop":
- procNetstat.TcpExt.TCPRcvQDrop = &value
+ procNetstat.TCPRcvQDrop = &value
case "TCPOFOQueue":
- procNetstat.TcpExt.TCPOFOQueue = &value
+ procNetstat.TCPOFOQueue = &value
case "TCPOFODrop":
- procNetstat.TcpExt.TCPOFODrop = &value
+ procNetstat.TCPOFODrop = &value
case "TCPOFOMerge":
- procNetstat.TcpExt.TCPOFOMerge = &value
+ procNetstat.TCPOFOMerge = &value
case "TCPChallengeACK":
- procNetstat.TcpExt.TCPChallengeACK = &value
+ procNetstat.TCPChallengeACK = &value
case "TCPSYNChallenge":
- procNetstat.TcpExt.TCPSYNChallenge = &value
+ procNetstat.TCPSYNChallenge = &value
case "TCPFastOpenActive":
- procNetstat.TcpExt.TCPFastOpenActive = &value
+ procNetstat.TCPFastOpenActive = &value
case "TCPFastOpenActiveFail":
- procNetstat.TcpExt.TCPFastOpenActiveFail = &value
+ procNetstat.TCPFastOpenActiveFail = &value
case "TCPFastOpenPassive":
- procNetstat.TcpExt.TCPFastOpenPassive = &value
+ procNetstat.TCPFastOpenPassive = &value
case "TCPFastOpenPassiveFail":
- procNetstat.TcpExt.TCPFastOpenPassiveFail = &value
+ procNetstat.TCPFastOpenPassiveFail = &value
case "TCPFastOpenListenOverflow":
- procNetstat.TcpExt.TCPFastOpenListenOverflow = &value
+ procNetstat.TCPFastOpenListenOverflow = &value
case "TCPFastOpenCookieReqd":
- procNetstat.TcpExt.TCPFastOpenCookieReqd = &value
+ procNetstat.TCPFastOpenCookieReqd = &value
case "TCPFastOpenBlackhole":
- procNetstat.TcpExt.TCPFastOpenBlackhole = &value
+ procNetstat.TCPFastOpenBlackhole = &value
case "TCPSpuriousRtxHostQueues":
- procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value
+ procNetstat.TCPSpuriousRtxHostQueues = &value
case "BusyPollRxPackets":
- procNetstat.TcpExt.BusyPollRxPackets = &value
+ procNetstat.BusyPollRxPackets = &value
case "TCPAutoCorking":
- procNetstat.TcpExt.TCPAutoCorking = &value
+ procNetstat.TCPAutoCorking = &value
case "TCPFromZeroWindowAdv":
- procNetstat.TcpExt.TCPFromZeroWindowAdv = &value
+ procNetstat.TCPFromZeroWindowAdv = &value
case "TCPToZeroWindowAdv":
- procNetstat.TcpExt.TCPToZeroWindowAdv = &value
+ procNetstat.TCPToZeroWindowAdv = &value
case "TCPWantZeroWindowAdv":
- procNetstat.TcpExt.TCPWantZeroWindowAdv = &value
+ procNetstat.TCPWantZeroWindowAdv = &value
case "TCPSynRetrans":
- procNetstat.TcpExt.TCPSynRetrans = &value
+ procNetstat.TCPSynRetrans = &value
case "TCPOrigDataSent":
- procNetstat.TcpExt.TCPOrigDataSent = &value
+ procNetstat.TCPOrigDataSent = &value
case "TCPHystartTrainDetect":
- procNetstat.TcpExt.TCPHystartTrainDetect = &value
+ procNetstat.TCPHystartTrainDetect = &value
case "TCPHystartTrainCwnd":
- procNetstat.TcpExt.TCPHystartTrainCwnd = &value
+ procNetstat.TCPHystartTrainCwnd = &value
case "TCPHystartDelayDetect":
- procNetstat.TcpExt.TCPHystartDelayDetect = &value
+ procNetstat.TCPHystartDelayDetect = &value
case "TCPHystartDelayCwnd":
- procNetstat.TcpExt.TCPHystartDelayCwnd = &value
+ procNetstat.TCPHystartDelayCwnd = &value
case "TCPACKSkippedSynRecv":
- procNetstat.TcpExt.TCPACKSkippedSynRecv = &value
+ procNetstat.TCPACKSkippedSynRecv = &value
case "TCPACKSkippedPAWS":
- procNetstat.TcpExt.TCPACKSkippedPAWS = &value
+ procNetstat.TCPACKSkippedPAWS = &value
case "TCPACKSkippedSeq":
- procNetstat.TcpExt.TCPACKSkippedSeq = &value
+ procNetstat.TCPACKSkippedSeq = &value
case "TCPACKSkippedFinWait2":
- procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value
+ procNetstat.TCPACKSkippedFinWait2 = &value
case "TCPACKSkippedTimeWait":
- procNetstat.TcpExt.TCPACKSkippedTimeWait = &value
+ procNetstat.TCPACKSkippedTimeWait = &value
case "TCPACKSkippedChallenge":
- procNetstat.TcpExt.TCPACKSkippedChallenge = &value
+ procNetstat.TCPACKSkippedChallenge = &value
case "TCPWinProbe":
- procNetstat.TcpExt.TCPWinProbe = &value
+ procNetstat.TCPWinProbe = &value
case "TCPKeepAlive":
- procNetstat.TcpExt.TCPKeepAlive = &value
+ procNetstat.TCPKeepAlive = &value
case "TCPMTUPFail":
- procNetstat.TcpExt.TCPMTUPFail = &value
+ procNetstat.TCPMTUPFail = &value
case "TCPMTUPSuccess":
- procNetstat.TcpExt.TCPMTUPSuccess = &value
+ procNetstat.TCPMTUPSuccess = &value
case "TCPWqueueTooBig":
- procNetstat.TcpExt.TCPWqueueTooBig = &value
+ procNetstat.TCPWqueueTooBig = &value
}
case "IpExt":
switch key {
case "InNoRoutes":
- procNetstat.IpExt.InNoRoutes = &value
+ procNetstat.InNoRoutes = &value
case "InTruncatedPkts":
- procNetstat.IpExt.InTruncatedPkts = &value
+ procNetstat.InTruncatedPkts = &value
case "InMcastPkts":
- procNetstat.IpExt.InMcastPkts = &value
+ procNetstat.InMcastPkts = &value
case "OutMcastPkts":
- procNetstat.IpExt.OutMcastPkts = &value
+ procNetstat.OutMcastPkts = &value
case "InBcastPkts":
- procNetstat.IpExt.InBcastPkts = &value
+ procNetstat.InBcastPkts = &value
case "OutBcastPkts":
- procNetstat.IpExt.OutBcastPkts = &value
+ procNetstat.OutBcastPkts = &value
case "InOctets":
- procNetstat.IpExt.InOctets = &value
+ procNetstat.InOctets = &value
case "OutOctets":
- procNetstat.IpExt.OutOctets = &value
+ procNetstat.OutOctets = &value
case "InMcastOctets":
- procNetstat.IpExt.InMcastOctets = &value
+ procNetstat.InMcastOctets = &value
case "OutMcastOctets":
- procNetstat.IpExt.OutMcastOctets = &value
+ procNetstat.OutMcastOctets = &value
case "InBcastOctets":
- procNetstat.IpExt.InBcastOctets = &value
+ procNetstat.InBcastOctets = &value
case "OutBcastOctets":
- procNetstat.IpExt.OutBcastOctets = &value
+ procNetstat.OutBcastOctets = &value
case "InCsumErrors":
- procNetstat.IpExt.InCsumErrors = &value
+ procNetstat.InCsumErrors = &value
case "InNoECTPkts":
- procNetstat.IpExt.InNoECTPkts = &value
+ procNetstat.InNoECTPkts = &value
case "InECT1Pkts":
- procNetstat.IpExt.InECT1Pkts = &value
+ procNetstat.InECT1Pkts = &value
case "InECT0Pkts":
- procNetstat.IpExt.InECT0Pkts = &value
+ procNetstat.InECT0Pkts = &value
case "InCEPkts":
- procNetstat.IpExt.InCEPkts = &value
+ procNetstat.InCEPkts = &value
case "ReasmOverlaps":
- procNetstat.IpExt.ReasmOverlaps = &value
+ procNetstat.ReasmOverlaps = &value
}
}
}
diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go
index 09060e8208..9a297afcf8 100644
--- a/vendor/github.com/prometheus/procfs/proc_smaps.go
+++ b/vendor/github.com/prometheus/procfs/proc_smaps.go
@@ -19,7 +19,6 @@ package procfs
import (
"bufio"
"errors"
- "fmt"
"os"
"regexp"
"strconv"
@@ -29,7 +28,7 @@ import (
)
var (
- // match the header line before each mapped zone in `/proc/pid/smaps`.
+ // Match the header line before each mapped zone in `/proc/pid/smaps`.
procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
)
@@ -117,7 +116,6 @@ func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
func (s *ProcSMapsRollup) parseLine(line string) error {
kv := strings.SplitN(line, ":", 2)
if len(kv) != 2 {
- fmt.Println(line)
return errors.New("invalid net/dev line, missing colon")
}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go
index b9d2cf642a..4bdc90b07e 100644
--- a/vendor/github.com/prometheus/procfs/proc_snmp.go
+++ b/vendor/github.com/prometheus/procfs/proc_snmp.go
@@ -173,138 +173,138 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
case "Ip":
switch key {
case "Forwarding":
- procSnmp.Ip.Forwarding = &value
+ procSnmp.Forwarding = &value
case "DefaultTTL":
- procSnmp.Ip.DefaultTTL = &value
+ procSnmp.DefaultTTL = &value
case "InReceives":
- procSnmp.Ip.InReceives = &value
+ procSnmp.InReceives = &value
case "InHdrErrors":
- procSnmp.Ip.InHdrErrors = &value
+ procSnmp.InHdrErrors = &value
case "InAddrErrors":
- procSnmp.Ip.InAddrErrors = &value
+ procSnmp.InAddrErrors = &value
case "ForwDatagrams":
- procSnmp.Ip.ForwDatagrams = &value
+ procSnmp.ForwDatagrams = &value
case "InUnknownProtos":
- procSnmp.Ip.InUnknownProtos = &value
+ procSnmp.InUnknownProtos = &value
case "InDiscards":
- procSnmp.Ip.InDiscards = &value
+ procSnmp.InDiscards = &value
case "InDelivers":
- procSnmp.Ip.InDelivers = &value
+ procSnmp.InDelivers = &value
case "OutRequests":
- procSnmp.Ip.OutRequests = &value
+ procSnmp.OutRequests = &value
case "OutDiscards":
- procSnmp.Ip.OutDiscards = &value
+ procSnmp.OutDiscards = &value
case "OutNoRoutes":
- procSnmp.Ip.OutNoRoutes = &value
+ procSnmp.OutNoRoutes = &value
case "ReasmTimeout":
- procSnmp.Ip.ReasmTimeout = &value
+ procSnmp.ReasmTimeout = &value
case "ReasmReqds":
- procSnmp.Ip.ReasmReqds = &value
+ procSnmp.ReasmReqds = &value
case "ReasmOKs":
- procSnmp.Ip.ReasmOKs = &value
+ procSnmp.ReasmOKs = &value
case "ReasmFails":
- procSnmp.Ip.ReasmFails = &value
+ procSnmp.ReasmFails = &value
case "FragOKs":
- procSnmp.Ip.FragOKs = &value
+ procSnmp.FragOKs = &value
case "FragFails":
- procSnmp.Ip.FragFails = &value
+ procSnmp.FragFails = &value
case "FragCreates":
- procSnmp.Ip.FragCreates = &value
+ procSnmp.FragCreates = &value
}
case "Icmp":
switch key {
case "InMsgs":
- procSnmp.Icmp.InMsgs = &value
+ procSnmp.InMsgs = &value
case "InErrors":
procSnmp.Icmp.InErrors = &value
case "InCsumErrors":
procSnmp.Icmp.InCsumErrors = &value
case "InDestUnreachs":
- procSnmp.Icmp.InDestUnreachs = &value
+ procSnmp.InDestUnreachs = &value
case "InTimeExcds":
- procSnmp.Icmp.InTimeExcds = &value
+ procSnmp.InTimeExcds = &value
case "InParmProbs":
- procSnmp.Icmp.InParmProbs = &value
+ procSnmp.InParmProbs = &value
case "InSrcQuenchs":
- procSnmp.Icmp.InSrcQuenchs = &value
+ procSnmp.InSrcQuenchs = &value
case "InRedirects":
- procSnmp.Icmp.InRedirects = &value
+ procSnmp.InRedirects = &value
case "InEchos":
- procSnmp.Icmp.InEchos = &value
+ procSnmp.InEchos = &value
case "InEchoReps":
- procSnmp.Icmp.InEchoReps = &value
+ procSnmp.InEchoReps = &value
case "InTimestamps":
- procSnmp.Icmp.InTimestamps = &value
+ procSnmp.InTimestamps = &value
case "InTimestampReps":
- procSnmp.Icmp.InTimestampReps = &value
+ procSnmp.InTimestampReps = &value
case "InAddrMasks":
- procSnmp.Icmp.InAddrMasks = &value
+ procSnmp.InAddrMasks = &value
case "InAddrMaskReps":
- procSnmp.Icmp.InAddrMaskReps = &value
+ procSnmp.InAddrMaskReps = &value
case "OutMsgs":
- procSnmp.Icmp.OutMsgs = &value
+ procSnmp.OutMsgs = &value
case "OutErrors":
- procSnmp.Icmp.OutErrors = &value
+ procSnmp.OutErrors = &value
case "OutDestUnreachs":
- procSnmp.Icmp.OutDestUnreachs = &value
+ procSnmp.OutDestUnreachs = &value
case "OutTimeExcds":
- procSnmp.Icmp.OutTimeExcds = &value
+ procSnmp.OutTimeExcds = &value
case "OutParmProbs":
- procSnmp.Icmp.OutParmProbs = &value
+ procSnmp.OutParmProbs = &value
case "OutSrcQuenchs":
- procSnmp.Icmp.OutSrcQuenchs = &value
+ procSnmp.OutSrcQuenchs = &value
case "OutRedirects":
- procSnmp.Icmp.OutRedirects = &value
+ procSnmp.OutRedirects = &value
case "OutEchos":
- procSnmp.Icmp.OutEchos = &value
+ procSnmp.OutEchos = &value
case "OutEchoReps":
- procSnmp.Icmp.OutEchoReps = &value
+ procSnmp.OutEchoReps = &value
case "OutTimestamps":
- procSnmp.Icmp.OutTimestamps = &value
+ procSnmp.OutTimestamps = &value
case "OutTimestampReps":
- procSnmp.Icmp.OutTimestampReps = &value
+ procSnmp.OutTimestampReps = &value
case "OutAddrMasks":
- procSnmp.Icmp.OutAddrMasks = &value
+ procSnmp.OutAddrMasks = &value
case "OutAddrMaskReps":
- procSnmp.Icmp.OutAddrMaskReps = &value
+ procSnmp.OutAddrMaskReps = &value
}
case "IcmpMsg":
switch key {
case "InType3":
- procSnmp.IcmpMsg.InType3 = &value
+ procSnmp.InType3 = &value
case "OutType3":
- procSnmp.IcmpMsg.OutType3 = &value
+ procSnmp.OutType3 = &value
}
case "Tcp":
switch key {
case "RtoAlgorithm":
- procSnmp.Tcp.RtoAlgorithm = &value
+ procSnmp.RtoAlgorithm = &value
case "RtoMin":
- procSnmp.Tcp.RtoMin = &value
+ procSnmp.RtoMin = &value
case "RtoMax":
- procSnmp.Tcp.RtoMax = &value
+ procSnmp.RtoMax = &value
case "MaxConn":
- procSnmp.Tcp.MaxConn = &value
+ procSnmp.MaxConn = &value
case "ActiveOpens":
- procSnmp.Tcp.ActiveOpens = &value
+ procSnmp.ActiveOpens = &value
case "PassiveOpens":
- procSnmp.Tcp.PassiveOpens = &value
+ procSnmp.PassiveOpens = &value
case "AttemptFails":
- procSnmp.Tcp.AttemptFails = &value
+ procSnmp.AttemptFails = &value
case "EstabResets":
- procSnmp.Tcp.EstabResets = &value
+ procSnmp.EstabResets = &value
case "CurrEstab":
- procSnmp.Tcp.CurrEstab = &value
+ procSnmp.CurrEstab = &value
case "InSegs":
- procSnmp.Tcp.InSegs = &value
+ procSnmp.InSegs = &value
case "OutSegs":
- procSnmp.Tcp.OutSegs = &value
+ procSnmp.OutSegs = &value
case "RetransSegs":
- procSnmp.Tcp.RetransSegs = &value
+ procSnmp.RetransSegs = &value
case "InErrs":
- procSnmp.Tcp.InErrs = &value
+ procSnmp.InErrs = &value
case "OutRsts":
- procSnmp.Tcp.OutRsts = &value
+ procSnmp.OutRsts = &value
case "InCsumErrors":
procSnmp.Tcp.InCsumErrors = &value
}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go
index 3059cc6a13..fb7fd3995b 100644
--- a/vendor/github.com/prometheus/procfs/proc_snmp6.go
+++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go
@@ -182,161 +182,161 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
case "Ip6":
switch key {
case "InReceives":
- procSnmp6.Ip6.InReceives = &value
+ procSnmp6.InReceives = &value
case "InHdrErrors":
- procSnmp6.Ip6.InHdrErrors = &value
+ procSnmp6.InHdrErrors = &value
case "InTooBigErrors":
- procSnmp6.Ip6.InTooBigErrors = &value
+ procSnmp6.InTooBigErrors = &value
case "InNoRoutes":
- procSnmp6.Ip6.InNoRoutes = &value
+ procSnmp6.InNoRoutes = &value
case "InAddrErrors":
- procSnmp6.Ip6.InAddrErrors = &value
+ procSnmp6.InAddrErrors = &value
case "InUnknownProtos":
- procSnmp6.Ip6.InUnknownProtos = &value
+ procSnmp6.InUnknownProtos = &value
case "InTruncatedPkts":
- procSnmp6.Ip6.InTruncatedPkts = &value
+ procSnmp6.InTruncatedPkts = &value
case "InDiscards":
- procSnmp6.Ip6.InDiscards = &value
+ procSnmp6.InDiscards = &value
case "InDelivers":
- procSnmp6.Ip6.InDelivers = &value
+ procSnmp6.InDelivers = &value
case "OutForwDatagrams":
- procSnmp6.Ip6.OutForwDatagrams = &value
+ procSnmp6.OutForwDatagrams = &value
case "OutRequests":
- procSnmp6.Ip6.OutRequests = &value
+ procSnmp6.OutRequests = &value
case "OutDiscards":
- procSnmp6.Ip6.OutDiscards = &value
+ procSnmp6.OutDiscards = &value
case "OutNoRoutes":
- procSnmp6.Ip6.OutNoRoutes = &value
+ procSnmp6.OutNoRoutes = &value
case "ReasmTimeout":
- procSnmp6.Ip6.ReasmTimeout = &value
+ procSnmp6.ReasmTimeout = &value
case "ReasmReqds":
- procSnmp6.Ip6.ReasmReqds = &value
+ procSnmp6.ReasmReqds = &value
case "ReasmOKs":
- procSnmp6.Ip6.ReasmOKs = &value
+ procSnmp6.ReasmOKs = &value
case "ReasmFails":
- procSnmp6.Ip6.ReasmFails = &value
+ procSnmp6.ReasmFails = &value
case "FragOKs":
- procSnmp6.Ip6.FragOKs = &value
+ procSnmp6.FragOKs = &value
case "FragFails":
- procSnmp6.Ip6.FragFails = &value
+ procSnmp6.FragFails = &value
case "FragCreates":
- procSnmp6.Ip6.FragCreates = &value
+ procSnmp6.FragCreates = &value
case "InMcastPkts":
- procSnmp6.Ip6.InMcastPkts = &value
+ procSnmp6.InMcastPkts = &value
case "OutMcastPkts":
- procSnmp6.Ip6.OutMcastPkts = &value
+ procSnmp6.OutMcastPkts = &value
case "InOctets":
- procSnmp6.Ip6.InOctets = &value
+ procSnmp6.InOctets = &value
case "OutOctets":
- procSnmp6.Ip6.OutOctets = &value
+ procSnmp6.OutOctets = &value
case "InMcastOctets":
- procSnmp6.Ip6.InMcastOctets = &value
+ procSnmp6.InMcastOctets = &value
case "OutMcastOctets":
- procSnmp6.Ip6.OutMcastOctets = &value
+ procSnmp6.OutMcastOctets = &value
case "InBcastOctets":
- procSnmp6.Ip6.InBcastOctets = &value
+ procSnmp6.InBcastOctets = &value
case "OutBcastOctets":
- procSnmp6.Ip6.OutBcastOctets = &value
+ procSnmp6.OutBcastOctets = &value
case "InNoECTPkts":
- procSnmp6.Ip6.InNoECTPkts = &value
+ procSnmp6.InNoECTPkts = &value
case "InECT1Pkts":
- procSnmp6.Ip6.InECT1Pkts = &value
+ procSnmp6.InECT1Pkts = &value
case "InECT0Pkts":
- procSnmp6.Ip6.InECT0Pkts = &value
+ procSnmp6.InECT0Pkts = &value
case "InCEPkts":
- procSnmp6.Ip6.InCEPkts = &value
+ procSnmp6.InCEPkts = &value
}
case "Icmp6":
switch key {
case "InMsgs":
- procSnmp6.Icmp6.InMsgs = &value
+ procSnmp6.InMsgs = &value
case "InErrors":
procSnmp6.Icmp6.InErrors = &value
case "OutMsgs":
- procSnmp6.Icmp6.OutMsgs = &value
+ procSnmp6.OutMsgs = &value
case "OutErrors":
- procSnmp6.Icmp6.OutErrors = &value
+ procSnmp6.OutErrors = &value
case "InCsumErrors":
procSnmp6.Icmp6.InCsumErrors = &value
case "InDestUnreachs":
- procSnmp6.Icmp6.InDestUnreachs = &value
+ procSnmp6.InDestUnreachs = &value
case "InPktTooBigs":
- procSnmp6.Icmp6.InPktTooBigs = &value
+ procSnmp6.InPktTooBigs = &value
case "InTimeExcds":
- procSnmp6.Icmp6.InTimeExcds = &value
+ procSnmp6.InTimeExcds = &value
case "InParmProblems":
- procSnmp6.Icmp6.InParmProblems = &value
+ procSnmp6.InParmProblems = &value
case "InEchos":
- procSnmp6.Icmp6.InEchos = &value
+ procSnmp6.InEchos = &value
case "InEchoReplies":
- procSnmp6.Icmp6.InEchoReplies = &value
+ procSnmp6.InEchoReplies = &value
case "InGroupMembQueries":
- procSnmp6.Icmp6.InGroupMembQueries = &value
+ procSnmp6.InGroupMembQueries = &value
case "InGroupMembResponses":
- procSnmp6.Icmp6.InGroupMembResponses = &value
+ procSnmp6.InGroupMembResponses = &value
case "InGroupMembReductions":
- procSnmp6.Icmp6.InGroupMembReductions = &value
+ procSnmp6.InGroupMembReductions = &value
case "InRouterSolicits":
- procSnmp6.Icmp6.InRouterSolicits = &value
+ procSnmp6.InRouterSolicits = &value
case "InRouterAdvertisements":
- procSnmp6.Icmp6.InRouterAdvertisements = &value
+ procSnmp6.InRouterAdvertisements = &value
case "InNeighborSolicits":
- procSnmp6.Icmp6.InNeighborSolicits = &value
+ procSnmp6.InNeighborSolicits = &value
case "InNeighborAdvertisements":
- procSnmp6.Icmp6.InNeighborAdvertisements = &value
+ procSnmp6.InNeighborAdvertisements = &value
case "InRedirects":
- procSnmp6.Icmp6.InRedirects = &value
+ procSnmp6.InRedirects = &value
case "InMLDv2Reports":
- procSnmp6.Icmp6.InMLDv2Reports = &value
+ procSnmp6.InMLDv2Reports = &value
case "OutDestUnreachs":
- procSnmp6.Icmp6.OutDestUnreachs = &value
+ procSnmp6.OutDestUnreachs = &value
case "OutPktTooBigs":
- procSnmp6.Icmp6.OutPktTooBigs = &value
+ procSnmp6.OutPktTooBigs = &value
case "OutTimeExcds":
- procSnmp6.Icmp6.OutTimeExcds = &value
+ procSnmp6.OutTimeExcds = &value
case "OutParmProblems":
- procSnmp6.Icmp6.OutParmProblems = &value
+ procSnmp6.OutParmProblems = &value
case "OutEchos":
- procSnmp6.Icmp6.OutEchos = &value
+ procSnmp6.OutEchos = &value
case "OutEchoReplies":
- procSnmp6.Icmp6.OutEchoReplies = &value
+ procSnmp6.OutEchoReplies = &value
case "OutGroupMembQueries":
- procSnmp6.Icmp6.OutGroupMembQueries = &value
+ procSnmp6.OutGroupMembQueries = &value
case "OutGroupMembResponses":
- procSnmp6.Icmp6.OutGroupMembResponses = &value
+ procSnmp6.OutGroupMembResponses = &value
case "OutGroupMembReductions":
- procSnmp6.Icmp6.OutGroupMembReductions = &value
+ procSnmp6.OutGroupMembReductions = &value
case "OutRouterSolicits":
- procSnmp6.Icmp6.OutRouterSolicits = &value
+ procSnmp6.OutRouterSolicits = &value
case "OutRouterAdvertisements":
- procSnmp6.Icmp6.OutRouterAdvertisements = &value
+ procSnmp6.OutRouterAdvertisements = &value
case "OutNeighborSolicits":
- procSnmp6.Icmp6.OutNeighborSolicits = &value
+ procSnmp6.OutNeighborSolicits = &value
case "OutNeighborAdvertisements":
- procSnmp6.Icmp6.OutNeighborAdvertisements = &value
+ procSnmp6.OutNeighborAdvertisements = &value
case "OutRedirects":
- procSnmp6.Icmp6.OutRedirects = &value
+ procSnmp6.OutRedirects = &value
case "OutMLDv2Reports":
- procSnmp6.Icmp6.OutMLDv2Reports = &value
+ procSnmp6.OutMLDv2Reports = &value
case "InType1":
- procSnmp6.Icmp6.InType1 = &value
+ procSnmp6.InType1 = &value
case "InType134":
- procSnmp6.Icmp6.InType134 = &value
+ procSnmp6.InType134 = &value
case "InType135":
- procSnmp6.Icmp6.InType135 = &value
+ procSnmp6.InType135 = &value
case "InType136":
- procSnmp6.Icmp6.InType136 = &value
+ procSnmp6.InType136 = &value
case "InType143":
- procSnmp6.Icmp6.InType143 = &value
+ procSnmp6.InType143 = &value
case "OutType133":
- procSnmp6.Icmp6.OutType133 = &value
+ procSnmp6.OutType133 = &value
case "OutType135":
- procSnmp6.Icmp6.OutType135 = &value
+ procSnmp6.OutType135 = &value
case "OutType136":
- procSnmp6.Icmp6.OutType136 = &value
+ procSnmp6.OutType136 = &value
case "OutType143":
- procSnmp6.Icmp6.OutType143 = &value
+ procSnmp6.OutType143 = &value
}
case "Udp6":
switch key {
@@ -355,7 +355,7 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
case "InCsumErrors":
procSnmp6.Udp6.InCsumErrors = &value
case "IgnoredMulti":
- procSnmp6.Udp6.IgnoredMulti = &value
+ procSnmp6.IgnoredMulti = &value
}
case "UdpLite6":
switch key {
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
index 06a8d931c9..3328556bdc 100644
--- a/vendor/github.com/prometheus/procfs/proc_stat.go
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -101,6 +101,12 @@ type ProcStat struct {
RSS int
// Soft limit in bytes on the rss of the process.
RSSLimit uint64
+ // The address above which program text can run.
+ StartCode uint64
+ // The address below which program text can run.
+ EndCode uint64
+ // The address of the start (i.e., bottom) of the stack.
+ StartStack uint64
// CPU number last executed on.
Processor uint
// Real-time scheduling priority, a number in the range 1 to 99 for processes
@@ -177,9 +183,9 @@ func (p Proc) Stat() (ProcStat, error) {
&s.VSize,
&s.RSS,
&s.RSSLimit,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
+ &s.StartCode,
+ &s.EndCode,
+ &s.StartStack,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
diff --git a/vendor/github.com/prometheus/procfs/proc_statm.go b/vendor/github.com/prometheus/procfs/proc_statm.go
new file mode 100644
index 0000000000..ed57984243
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_statm.go
@@ -0,0 +1,116 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// - https://man7.org/linux/man-pages/man5/proc_pid_statm.5.html
+
+// ProcStatm Provides memory usage information for a process, measured in memory pages.
+// Read from /proc/[pid]/statm.
+type ProcStatm struct {
+ // The process ID.
+ PID int
+ // total program size (same as VmSize in status)
+ Size uint64
+ // resident set size (same as VmRSS in status)
+ Resident uint64
+ // number of resident shared pages (i.e., backed by a file)
+ Shared uint64
+ // text (code)
+ Text uint64
+ // library (unused since Linux 2.6; always 0)
+ Lib uint64
+ // data + stack
+ Data uint64
+ // dirty pages (unused since Linux 2.6; always 0)
+ Dt uint64
+}
+
+// NewStatm returns the current status information of the process.
+// Deprecated: Use p.Statm() instead.
+func (p Proc) NewStatm() (ProcStatm, error) {
+ return p.Statm()
+}
+
+// Statm returns the current memory usage information of the process.
+func (p Proc) Statm() (ProcStatm, error) {
+ data, err := util.ReadFileNoStat(p.path("statm"))
+ if err != nil {
+ return ProcStatm{}, err
+ }
+
+ statmSlice, err := parseStatm(data)
+ if err != nil {
+ return ProcStatm{}, err
+ }
+
+ procStatm := ProcStatm{
+ PID: p.PID,
+ Size: statmSlice[0],
+ Resident: statmSlice[1],
+ Shared: statmSlice[2],
+ Text: statmSlice[3],
+ Lib: statmSlice[4],
+ Data: statmSlice[5],
+ Dt: statmSlice[6],
+ }
+
+ return procStatm, nil
+}
+
+// parseStatm return /proc/[pid]/statm data to uint64 slice.
+func parseStatm(data []byte) ([]uint64, error) {
+ var statmSlice []uint64
+ statmItems := strings.Fields(string(data))
+ for i := 0; i < len(statmItems); i++ {
+ statmItem, err := strconv.ParseUint(statmItems[i], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ statmSlice = append(statmSlice, statmItem)
+ }
+ return statmSlice, nil
+}
+
+// SizeBytes returns the process of total program size in bytes.
+func (s ProcStatm) SizeBytes() uint64 {
+ return s.Size * uint64(os.Getpagesize())
+}
+
+// ResidentBytes returns the process of resident set size in bytes.
+func (s ProcStatm) ResidentBytes() uint64 {
+ return s.Resident * uint64(os.Getpagesize())
+}
+
+// SHRBytes returns the process of share memory size in bytes.
+func (s ProcStatm) SHRBytes() uint64 {
+ return s.Shared * uint64(os.Getpagesize())
+}
+
+// TextBytes returns the process of text (code) size in bytes.
+func (s ProcStatm) TextBytes() uint64 {
+ return s.Text * uint64(os.Getpagesize())
+}
+
+// DataBytes returns the process of data + stack size in bytes.
+func (s ProcStatm) DataBytes() uint64 {
+ return s.Data * uint64(os.Getpagesize())
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
index a055197c63..dd8aa56885 100644
--- a/vendor/github.com/prometheus/procfs/proc_status.go
+++ b/vendor/github.com/prometheus/procfs/proc_status.go
@@ -146,7 +146,11 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
}
}
case "NSpid":
- s.NSpids = calcNSPidsList(vString)
+ nspids, err := calcNSPidsList(vString)
+ if err != nil {
+ return err
+ }
+ s.NSpids = nspids
case "VmPeak":
s.VmPeak = vUintBytes
case "VmSize":
@@ -222,17 +226,17 @@ func calcCpusAllowedList(cpuString string) []uint64 {
return g
}
-func calcNSPidsList(nspidsString string) []uint64 {
- s := strings.Split(nspidsString, " ")
+func calcNSPidsList(nspidsString string) ([]uint64, error) {
+ s := strings.Split(nspidsString, "\t")
var nspids []uint64
for _, nspid := range s {
- nspid, _ := strconv.ParseUint(nspid, 10, 64)
- if nspid == 0 {
- continue
+ nspid, err := strconv.ParseUint(nspid, 10, 64)
+ if err != nil {
+ return nil, err
}
nspids = append(nspids, nspid)
}
- return nspids
+ return nspids, nil
}
diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go
index 5eefbe2ef8..3810d1ac99 100644
--- a/vendor/github.com/prometheus/procfs/proc_sys.go
+++ b/vendor/github.com/prometheus/procfs/proc_sys.go
@@ -21,7 +21,7 @@ import (
)
func sysctlToPath(sysctl string) string {
- return strings.Replace(sysctl, ".", "/", -1)
+ return strings.ReplaceAll(sysctl, ".", "/")
}
func (fs FS) SysctlStrings(sysctl string) ([]string, error) {
diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go
index 28708e0745..403e6ae708 100644
--- a/vendor/github.com/prometheus/procfs/softirqs.go
+++ b/vendor/github.com/prometheus/procfs/softirqs.go
@@ -68,8 +68,8 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
if len(parts) < 2 {
continue
}
- switch {
- case parts[0] == "HI:":
+ switch parts[0] {
+ case "HI:":
perCPU := parts[1:]
softirqs.Hi = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -77,7 +77,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "TIMER:":
+ case "TIMER:":
perCPU := parts[1:]
softirqs.Timer = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -85,7 +85,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "NET_TX:":
+ case "NET_TX:":
perCPU := parts[1:]
softirqs.NetTx = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -93,7 +93,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "NET_RX:":
+ case "NET_RX:":
perCPU := parts[1:]
softirqs.NetRx = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -101,7 +101,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "BLOCK:":
+ case "BLOCK:":
perCPU := parts[1:]
softirqs.Block = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -109,7 +109,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "IRQ_POLL:":
+ case "IRQ_POLL:":
perCPU := parts[1:]
softirqs.IRQPoll = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -117,7 +117,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "TASKLET:":
+ case "TASKLET:":
perCPU := parts[1:]
softirqs.Tasklet = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -125,7 +125,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "SCHED:":
+ case "SCHED:":
perCPU := parts[1:]
softirqs.Sched = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -133,7 +133,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "HRTIMER:":
+ case "HRTIMER:":
perCPU := parts[1:]
softirqs.HRTimer = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -141,7 +141,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "RCU:":
+ case "RCU:":
perCPU := parts[1:]
softirqs.RCU = make([]uint64, len(perCPU))
for i, count := range perCPU {
diff --git a/vendor/github.com/rcrowley/go-metrics/.travis.yml b/vendor/github.com/rcrowley/go-metrics/.travis.yml
index 409a5b631c..ce9afeaeea 100644
--- a/vendor/github.com/rcrowley/go-metrics/.travis.yml
+++ b/vendor/github.com/rcrowley/go-metrics/.travis.yml
@@ -13,6 +13,7 @@ go:
- "1.12"
- "1.13"
- "1.14"
+ - "1.15"
script:
- ./validate.sh
diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md
index 27ddfee8b8..6492bfe851 100644
--- a/vendor/github.com/rcrowley/go-metrics/README.md
+++ b/vendor/github.com/rcrowley/go-metrics/README.md
@@ -7,6 +7,15 @@ Go port of Coda Hale's Metrics library: .
Documentation: .
+Archived as of April 1 2025
+-----
+This repository is no longer maintained. The authors recommend you explore the
+following newer, more widely adopted libraries for your Go instrumentation
+needs:
+
+* [OpenTelemetry Go SDK](https://opentelemetry.io/docs/languages/go/instrumentation/#metrics)
+* [Prometheus Go Client Library](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus)
+
Usage
-----
diff --git a/vendor/github.com/segmentio/asm/LICENSE b/vendor/github.com/segmentio/asm/LICENSE
new file mode 100644
index 0000000000..29e1ab6b05
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 Segment
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/segmentio/asm/base64/base64.go b/vendor/github.com/segmentio/asm/base64/base64.go
new file mode 100644
index 0000000000..dd2128d4a9
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/base64.go
@@ -0,0 +1,67 @@
+package base64
+
+import (
+ "encoding/base64"
+)
+
+const (
+ StdPadding rune = base64.StdPadding
+ NoPadding rune = base64.NoPadding
+
+ encodeStd = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
+ encodeURL = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"
+ encodeIMAP = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,"
+
+ letterRange = int8('Z' - 'A' + 1)
+)
+
+// StdEncoding is the standard base64 encoding, as defined in RFC 4648.
+var StdEncoding = NewEncoding(encodeStd)
+
+// URLEncoding is the alternate base64 encoding defined in RFC 4648.
+// It is typically used in URLs and file names.
+var URLEncoding = NewEncoding(encodeURL)
+
+// RawStdEncoding is the standard unpadded base64 encoding defined in RFC 4648 section 3.2.
+// This is the same as StdEncoding but omits padding characters.
+var RawStdEncoding = StdEncoding.WithPadding(NoPadding)
+
+// RawURLEncoding is the unpadded alternate base64 encoding defined in RFC 4648.
+// This is the same as URLEncoding but omits padding characters.
+var RawURLEncoding = URLEncoding.WithPadding(NoPadding)
+
+// NewEncoding returns a new padded Encoding defined by the given alphabet,
+// which must be a 64-byte string that does not contain the padding character
+// or CR / LF ('\r', '\n'). Unlike the standard library, the encoding alphabet
+// cannot be abitrary, and it must follow one of the know standard encoding
+// variants.
+//
+// Required alphabet values:
+// * [0,26): characters 'A'..'Z'
+// * [26,52): characters 'a'..'z'
+// * [52,62): characters '0'..'9'
+// Flexible alphabet value options:
+// * RFC 4648, RFC 1421, RFC 2045, RFC 2152, RFC 4880: '+' and '/'
+// * RFC 4648 URI: '-' and '_'
+// * RFC 3501: '+' and ','
+//
+// The resulting Encoding uses the default padding character ('='), which may
+// be changed or disabled via WithPadding. The padding characters is urestricted,
+// but it must be a character outside of the encoder alphabet.
+func NewEncoding(encoder string) *Encoding {
+ if len(encoder) != 64 {
+ panic("encoding alphabet is not 64-bytes long")
+ }
+
+ if _, ok := allowedEncoding[encoder]; !ok {
+ panic("non-standard encoding alphabets are not supported")
+ }
+
+ return newEncoding(encoder)
+}
+
+var allowedEncoding = map[string]struct{}{
+ encodeStd: {},
+ encodeURL: {},
+ encodeIMAP: {},
+}
diff --git a/vendor/github.com/segmentio/asm/base64/base64_amd64.go b/vendor/github.com/segmentio/asm/base64/base64_amd64.go
new file mode 100644
index 0000000000..4136098eaa
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/base64_amd64.go
@@ -0,0 +1,78 @@
+//go:build amd64 && !purego
+// +build amd64,!purego
+
+package base64
+
+import (
+ "encoding/base64"
+
+ "github.com/segmentio/asm/cpu"
+ "github.com/segmentio/asm/cpu/x86"
+)
+
+const (
+ encLutSize = 32
+ decLutSize = 48
+ minEncodeLen = 28
+ minDecodeLen = 45
+)
+
+func newEncoding(encoder string) *Encoding {
+ e := &Encoding{base: base64.NewEncoding(encoder)}
+ if cpu.X86.Has(x86.AVX2) {
+ e.enableEncodeAVX2(encoder)
+ e.enableDecodeAVX2(encoder)
+ }
+ return e
+}
+
+func (e *Encoding) enableEncodeAVX2(encoder string) {
+ // Translate values 0..63 to the Base64 alphabet. There are five sets:
+ //
+ // From To Add Index Example
+ // [0..25] [65..90] +65 0 ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ // [26..51] [97..122] +71 1 abcdefghijklmnopqrstuvwxyz
+ // [52..61] [48..57] -4 [2..11] 0123456789
+ // [62] [43] -19 12 +
+ // [63] [47] -16 13 /
+ tab := [encLutSize]int8{int8(encoder[0]), int8(encoder[letterRange]) - letterRange}
+ for i, ch := range encoder[2*letterRange:] {
+ tab[2+i] = int8(ch) - 2*letterRange - int8(i)
+ }
+
+ e.enc = encodeAVX2
+ e.enclut = tab
+}
+
+func (e *Encoding) enableDecodeAVX2(encoder string) {
+ c62, c63 := int8(encoder[62]), int8(encoder[63])
+ url := c63 == '_'
+ if url {
+ c63 = '/'
+ }
+
+ // Translate values from the Base64 alphabet using five sets. Values outside
+ // of these ranges are considered invalid:
+ //
+ // From To Add Index Example
+ // [47] [63] +16 1 /
+ // [43] [62] +19 2 +
+ // [48..57] [52..61] +4 3 0123456789
+ // [65..90] [0..25] -65 4,5 ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ // [97..122] [26..51] -71 6,7 abcdefghijklmnopqrstuvwxyz
+ tab := [decLutSize]int8{
+ 0, 63 - c63, 62 - c62, 4, -65, -65, -71, -71,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x15, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x13, 0x1B, 0x1B, 0x1B, 0x1B, 0x1B,
+ }
+ tab[(c62&15)+16] = 0x1A
+ tab[(c63&15)+16] = 0x1A
+
+ if url {
+ e.dec = decodeAVX2URI
+ } else {
+ e.dec = decodeAVX2
+ }
+ e.declut = tab
+}
diff --git a/vendor/github.com/segmentio/asm/base64/base64_arm64.go b/vendor/github.com/segmentio/asm/base64/base64_arm64.go
new file mode 100644
index 0000000000..276f300287
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/base64_arm64.go
@@ -0,0 +1,42 @@
+//go:build arm64 && !purego
+// +build arm64,!purego
+
+package base64
+
+import (
+ "encoding/base64"
+)
+
+const (
+ encLutSize = 16
+ decLutSize = 2
+ minEncodeLen = 16 * 3
+ minDecodeLen = 8 * 4
+)
+
+func newEncoding(encoder string) *Encoding {
+ e := &Encoding{base: base64.NewEncoding(encoder)}
+ e.enableEncodeARM64(encoder)
+ e.enableDecodeARM64(encoder)
+ return e
+}
+
+func (e *Encoding) enableEncodeARM64(encoder string) {
+ c62, c63 := int8(encoder[62]), int8(encoder[63])
+ tab := [encLutSize]int8{
+ 'a' - 26, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52,
+ '0' - 52, '0' - 52, '0' - 52, c62 - 62, c63 - 63, 'A', 0, 0,
+ }
+
+ e.enc = encodeARM64
+ e.enclut = tab
+}
+
+func (e *Encoding) enableDecodeARM64(encoder string) {
+ if encoder == encodeStd {
+ e.dec = decodeStdARM64
+ } else {
+ e.dec = decodeARM64
+ }
+ e.declut = [decLutSize]int8{int8(encoder[62]), int8(encoder[63])}
+}
diff --git a/vendor/github.com/segmentio/asm/base64/base64_asm.go b/vendor/github.com/segmentio/asm/base64/base64_asm.go
new file mode 100644
index 0000000000..f9afadd7f2
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/base64_asm.go
@@ -0,0 +1,94 @@
+//go:build (amd64 || arm64) && !purego
+// +build amd64 arm64
+// +build !purego
+
+package base64
+
+import (
+ "encoding/base64"
+
+ "github.com/segmentio/asm/internal/unsafebytes"
+)
+
+// An Encoding is a radix 64 encoding/decoding scheme, defined by a
+// 64-character alphabet.
+type Encoding struct {
+ enc func(dst []byte, src []byte, lut *int8) (int, int)
+ enclut [encLutSize]int8
+
+ dec func(dst []byte, src []byte, lut *int8) (int, int)
+ declut [decLutSize]int8
+
+ base *base64.Encoding
+}
+
+// WithPadding creates a duplicate Encoding updated with a specified padding
+// character, or NoPadding to disable padding. The padding character must not
+// be contained in the encoding alphabet, must not be '\r' or '\n', and must
+// be no greater than '\xFF'.
+func (enc Encoding) WithPadding(padding rune) *Encoding {
+ enc.base = enc.base.WithPadding(padding)
+ return &enc
+}
+
+// Strict creates a duplicate encoding updated with strict decoding enabled.
+// This requires that trailing padding bits are zero.
+func (enc Encoding) Strict() *Encoding {
+ enc.base = enc.base.Strict()
+ return &enc
+}
+
+// Encode encodes src using the defined encoding alphabet.
+// This will write EncodedLen(len(src)) bytes to dst.
+func (enc *Encoding) Encode(dst, src []byte) {
+ if len(src) >= minEncodeLen && enc.enc != nil {
+ d, s := enc.enc(dst, src, &enc.enclut[0])
+ dst = dst[d:]
+ src = src[s:]
+ }
+ enc.base.Encode(dst, src)
+}
+
+// Encode encodes src using the encoding enc, writing
+// EncodedLen(len(src)) bytes to dst.
+func (enc *Encoding) EncodeToString(src []byte) string {
+ buf := make([]byte, enc.base.EncodedLen(len(src)))
+ enc.Encode(buf, src)
+ return string(buf)
+}
+
+// EncodedLen calculates the base64-encoded byte length for a message
+// of length n.
+func (enc *Encoding) EncodedLen(n int) int {
+ return enc.base.EncodedLen(n)
+}
+
+// Decode decodes src using the defined encoding alphabet.
+// This will write DecodedLen(len(src)) bytes to dst and return the number of
+// bytes written.
+func (enc *Encoding) Decode(dst, src []byte) (n int, err error) {
+ var d, s int
+ if len(src) >= minDecodeLen && enc.dec != nil {
+ d, s = enc.dec(dst, src, &enc.declut[0])
+ dst = dst[d:]
+ src = src[s:]
+ }
+ n, err = enc.base.Decode(dst, src)
+ n += d
+ return
+}
+
+// DecodeString decodes the base64 encoded string s, returns the decoded
+// value as bytes.
+func (enc *Encoding) DecodeString(s string) ([]byte, error) {
+ src := unsafebytes.BytesOf(s)
+ dst := make([]byte, enc.base.DecodedLen(len(s)))
+ n, err := enc.Decode(dst, src)
+ return dst[:n], err
+}
+
+// DecodedLen calculates the decoded byte length for a base64-encoded message
+// of length n.
+func (enc *Encoding) DecodedLen(n int) int {
+ return enc.base.DecodedLen(n)
+}
diff --git a/vendor/github.com/segmentio/asm/base64/base64_default.go b/vendor/github.com/segmentio/asm/base64/base64_default.go
new file mode 100644
index 0000000000..1720da5ca7
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/base64_default.go
@@ -0,0 +1,14 @@
+//go:build purego || !(amd64 || arm64)
+// +build purego !amd64,!arm64
+
+package base64
+
+import "encoding/base64"
+
+// An Encoding is a radix 64 encoding/decoding scheme, defined by a
+// 64-character alphabet.
+type Encoding = base64.Encoding
+
+func newEncoding(encoder string) *Encoding {
+ return base64.NewEncoding(encoder)
+}
diff --git a/vendor/github.com/segmentio/asm/base64/decode_amd64.go b/vendor/github.com/segmentio/asm/base64/decode_amd64.go
new file mode 100644
index 0000000000..e85bf6a925
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/decode_amd64.go
@@ -0,0 +1,9 @@
+// Code generated by command: go run decode_asm.go -pkg base64 -out ../base64/decode_amd64.s -stubs ../base64/decode_amd64.go. DO NOT EDIT.
+
+//go:build !purego
+
+package base64
+
+func decodeAVX2(dst []byte, src []byte, lut *int8) (int, int)
+
+func decodeAVX2URI(dst []byte, src []byte, lut *int8) (int, int)
diff --git a/vendor/github.com/segmentio/asm/base64/decode_amd64.s b/vendor/github.com/segmentio/asm/base64/decode_amd64.s
new file mode 100644
index 0000000000..ade5442c3b
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/decode_amd64.s
@@ -0,0 +1,143 @@
+// Code generated by command: go run decode_asm.go -pkg base64 -out ../base64/decode_amd64.s -stubs ../base64/decode_amd64.go. DO NOT EDIT.
+
+//go:build !purego
+
+#include "textflag.h"
+
+DATA b64_dec_lut_hi<>+0(SB)/8, $0x0804080402011010
+DATA b64_dec_lut_hi<>+8(SB)/8, $0x1010101010101010
+DATA b64_dec_lut_hi<>+16(SB)/8, $0x0804080402011010
+DATA b64_dec_lut_hi<>+24(SB)/8, $0x1010101010101010
+GLOBL b64_dec_lut_hi<>(SB), RODATA|NOPTR, $32
+
+DATA b64_dec_madd1<>+0(SB)/8, $0x0140014001400140
+DATA b64_dec_madd1<>+8(SB)/8, $0x0140014001400140
+DATA b64_dec_madd1<>+16(SB)/8, $0x0140014001400140
+DATA b64_dec_madd1<>+24(SB)/8, $0x0140014001400140
+GLOBL b64_dec_madd1<>(SB), RODATA|NOPTR, $32
+
+DATA b64_dec_madd2<>+0(SB)/8, $0x0001100000011000
+DATA b64_dec_madd2<>+8(SB)/8, $0x0001100000011000
+DATA b64_dec_madd2<>+16(SB)/8, $0x0001100000011000
+DATA b64_dec_madd2<>+24(SB)/8, $0x0001100000011000
+GLOBL b64_dec_madd2<>(SB), RODATA|NOPTR, $32
+
+DATA b64_dec_shuf_lo<>+0(SB)/8, $0x0000000000000000
+DATA b64_dec_shuf_lo<>+8(SB)/8, $0x0600010200000000
+GLOBL b64_dec_shuf_lo<>(SB), RODATA|NOPTR, $16
+
+DATA b64_dec_shuf<>+0(SB)/8, $0x090a040506000102
+DATA b64_dec_shuf<>+8(SB)/8, $0x000000000c0d0e08
+DATA b64_dec_shuf<>+16(SB)/8, $0x0c0d0e08090a0405
+DATA b64_dec_shuf<>+24(SB)/8, $0x0000000000000000
+GLOBL b64_dec_shuf<>(SB), RODATA|NOPTR, $32
+
+// func decodeAVX2(dst []byte, src []byte, lut *int8) (int, int)
+// Requires: AVX, AVX2, SSE4.1
+TEXT ·decodeAVX2(SB), NOSPLIT, $0-72
+ MOVQ dst_base+0(FP), AX
+ MOVQ src_base+24(FP), DX
+ MOVQ lut+48(FP), SI
+ MOVQ src_len+32(FP), DI
+ MOVB $0x2f, CL
+ PINSRB $0x00, CX, X8
+ VPBROADCASTB X8, Y8
+ XORQ CX, CX
+ XORQ BX, BX
+ VPXOR Y7, Y7, Y7
+ VPERMQ $0x44, (SI), Y6
+ VPERMQ $0x44, 16(SI), Y4
+ VMOVDQA b64_dec_lut_hi<>+0(SB), Y5
+
+loop:
+ VMOVDQU (DX)(BX*1), Y0
+ VPSRLD $0x04, Y0, Y2
+ VPAND Y8, Y0, Y3
+ VPSHUFB Y3, Y4, Y3
+ VPAND Y8, Y2, Y2
+ VPSHUFB Y2, Y5, Y9
+ VPTEST Y9, Y3
+ JNE done
+ VPCMPEQB Y8, Y0, Y3
+ VPADDB Y3, Y2, Y2
+ VPSHUFB Y2, Y6, Y2
+ VPADDB Y0, Y2, Y0
+ VPMADDUBSW b64_dec_madd1<>+0(SB), Y0, Y0
+ VPMADDWD b64_dec_madd2<>+0(SB), Y0, Y0
+ VEXTRACTI128 $0x01, Y0, X1
+ VPSHUFB b64_dec_shuf_lo<>+0(SB), X1, X1
+ VPSHUFB b64_dec_shuf<>+0(SB), Y0, Y0
+ VPBLENDD $0x08, Y1, Y0, Y1
+ VPBLENDD $0xc0, Y7, Y1, Y1
+ VMOVDQU Y1, (AX)(CX*1)
+ ADDQ $0x18, CX
+ ADDQ $0x20, BX
+ SUBQ $0x20, DI
+ CMPQ DI, $0x2d
+ JB done
+ JMP loop
+
+done:
+ MOVQ CX, ret+56(FP)
+ MOVQ BX, ret1+64(FP)
+ VZEROUPPER
+ RET
+
+// func decodeAVX2URI(dst []byte, src []byte, lut *int8) (int, int)
+// Requires: AVX, AVX2, SSE4.1
+TEXT ·decodeAVX2URI(SB), NOSPLIT, $0-72
+ MOVB $0x2f, AL
+ PINSRB $0x00, AX, X0
+ VPBROADCASTB X0, Y0
+ MOVB $0x5f, AL
+ PINSRB $0x00, AX, X1
+ VPBROADCASTB X1, Y1
+ MOVQ dst_base+0(FP), AX
+ MOVQ src_base+24(FP), DX
+ MOVQ lut+48(FP), SI
+ MOVQ src_len+32(FP), DI
+ MOVB $0x2f, CL
+ PINSRB $0x00, CX, X10
+ VPBROADCASTB X10, Y10
+ XORQ CX, CX
+ XORQ BX, BX
+ VPXOR Y9, Y9, Y9
+ VPERMQ $0x44, (SI), Y8
+ VPERMQ $0x44, 16(SI), Y6
+ VMOVDQA b64_dec_lut_hi<>+0(SB), Y7
+
+loop:
+ VMOVDQU (DX)(BX*1), Y2
+ VPCMPEQB Y2, Y1, Y4
+ VPBLENDVB Y4, Y0, Y2, Y2
+ VPSRLD $0x04, Y2, Y4
+ VPAND Y10, Y2, Y5
+ VPSHUFB Y5, Y6, Y5
+ VPAND Y10, Y4, Y4
+ VPSHUFB Y4, Y7, Y11
+ VPTEST Y11, Y5
+ JNE done
+ VPCMPEQB Y10, Y2, Y5
+ VPADDB Y5, Y4, Y4
+ VPSHUFB Y4, Y8, Y4
+ VPADDB Y2, Y4, Y2
+ VPMADDUBSW b64_dec_madd1<>+0(SB), Y2, Y2
+ VPMADDWD b64_dec_madd2<>+0(SB), Y2, Y2
+ VEXTRACTI128 $0x01, Y2, X3
+ VPSHUFB b64_dec_shuf_lo<>+0(SB), X3, X3
+ VPSHUFB b64_dec_shuf<>+0(SB), Y2, Y2
+ VPBLENDD $0x08, Y3, Y2, Y3
+ VPBLENDD $0xc0, Y9, Y3, Y3
+ VMOVDQU Y3, (AX)(CX*1)
+ ADDQ $0x18, CX
+ ADDQ $0x20, BX
+ SUBQ $0x20, DI
+ CMPQ DI, $0x2d
+ JB done
+ JMP loop
+
+done:
+ MOVQ CX, ret+56(FP)
+ MOVQ BX, ret1+64(FP)
+ VZEROUPPER
+ RET
diff --git a/vendor/github.com/segmentio/asm/base64/decode_arm64.go b/vendor/github.com/segmentio/asm/base64/decode_arm64.go
new file mode 100644
index 0000000000..d44baa1dc5
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/decode_arm64.go
@@ -0,0 +1,7 @@
+//go:build !purego
+// +build !purego
+
+package base64
+
+func decodeARM64(dst []byte, src []byte, lut *int8) (int, int)
+func decodeStdARM64(dst []byte, src []byte, lut *int8) (int, int)
diff --git a/vendor/github.com/segmentio/asm/base64/decode_arm64.s b/vendor/github.com/segmentio/asm/base64/decode_arm64.s
new file mode 100644
index 0000000000..4374d5ce17
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/decode_arm64.s
@@ -0,0 +1,203 @@
+#include "textflag.h"
+
+#define LOAD_ARGS() \
+ MOVD dst_base+0(FP), R0; \
+ MOVD R0, R3; \
+ MOVD src_base+24(FP), R1; \
+ MOVD R1, R4; \
+ MOVD src_len+32(FP), R2; \
+ BIC $31, R2, R2; \
+ ADD R1, R2, R2
+
+#define LOAD_ARG_LUT() \
+ MOVD lut+48(FP), R5; \
+ VLD2R (R5), [V0.B16, V1.B16]
+
+#define LOAD_CONST_LUT() \
+ MOVD $·mask_lut(SB), R6; \
+ MOVD $·bpos_lut(SB), R7; \
+ MOVD $·shft_lut(SB), R8; \
+ VLD1 (R6), [V2.B16]; \
+ VLD1 (R7), [V3.B16]; \
+ VLD1 (R8), [V4.B16]; \
+ VMOVI $43, V5.B8; \
+ VMOVI $47, V6.B8; \
+ VMOVI $15, V7.B8; \
+ VMOVI $16, V8.B8; \
+
+#define LOAD_INPUT() \
+ VLD4 (R4), [V10.B8, V11.B8, V12.B8, V13.B8]
+
+#define COMPARE_INPUT(v) \
+ VCMEQ V10.B8, v.B8, V14.B8; \
+ VCMEQ V11.B8, v.B8, V15.B8; \
+ VCMEQ V12.B8, v.B8, V16.B8; \
+ VCMEQ V13.B8, v.B8, V17.B8
+
+#define UPDATE_INPUT(v) \
+ VBIT V14.B8, v.B8, V10.B8; \
+ VBIT V15.B8, v.B8, V11.B8; \
+ VBIT V16.B8, v.B8, V12.B8; \
+ VBIT V17.B8, v.B8, V13.B8
+
+#define DECODE_INPUT(goto_err) \
+ /* Create hi/lo nibles */ \
+ VUSHR $4, V10.B8, V18.B8; \
+ VUSHR $4, V11.B8, V19.B8; \
+ VUSHR $4, V12.B8, V20.B8; \
+ VUSHR $4, V13.B8, V21.B8; \
+ VAND V7.B8, V10.B8, V22.B8; \
+ VAND V7.B8, V11.B8, V23.B8; \
+ VAND V7.B8, V12.B8, V24.B8; \
+ VAND V7.B8, V13.B8, V25.B8; \
+ /* Detect invalid input characters */ \
+ VTBL V22.B8, [V2.B8], V22.B8; \
+ VTBL V23.B8, [V2.B8], V23.B8; \
+ VTBL V24.B8, [V2.B8], V24.B8; \
+ VTBL V25.B8, [V2.B8], V25.B8; \
+ VTBL V18.B8, [V3.B8], V26.B8; \
+ VTBL V19.B8, [V3.B8], V27.B8; \
+ VTBL V20.B8, [V3.B8], V28.B8; \
+ VTBL V21.B8, [V3.B8], V29.B8; \
+ VAND V22.B8, V26.B8, V26.B8; \
+ VAND V23.B8, V27.B8, V27.B8; \
+ VAND V24.B8, V28.B8, V28.B8; \
+ VAND V25.B8, V29.B8, V29.B8; \
+ WORD $0x0e209b5a /* VCMEQ $0, V26.B8, V26.B8 */; \
+ WORD $0x0e209b7b /* VCMEQ $0, V27.B8, V27.B8 */; \
+ WORD $0x0e209b9c /* VCMEQ $0, V28.B8, V28.B8 */; \
+ WORD $0x0e209bbd /* VCMEQ $0, V29.B8, V29.B8 */; \
+ VORR V26.B8, V27.B8, V26.B8; \
+ VORR V28.B8, V29.B8, V28.B8; \
+ VORR V26.B8, V28.B8, V26.B8; \
+ VMOV V26.D[0], R5; \
+ VMOV V26.D[1], R6; \
+ ORR R6, R5; \
+ CBNZ R5, goto_err; \
+ /* Shift hi nibles */ \
+ VTBL V18.B8, [V4.B8], V18.B8; \
+ VTBL V19.B8, [V4.B8], V19.B8; \
+ VTBL V20.B8, [V4.B8], V20.B8; \
+ VTBL V21.B8, [V4.B8], V21.B8; \
+ VBIT V14.B8, V8.B8, V18.B8; \
+ VBIT V15.B8, V8.B8, V19.B8; \
+ VBIT V16.B8, V8.B8, V20.B8; \
+ VBIT V17.B8, V8.B8, V21.B8; \
+ /* Combine results */ \
+ VADD V18.B8, V10.B8, V10.B8; \
+ VADD V19.B8, V11.B8, V11.B8; \
+ VADD V20.B8, V12.B8, V12.B8; \
+ VADD V21.B8, V13.B8, V13.B8; \
+ VUSHR $4, V11.B8, V14.B8; \
+ VUSHR $2, V12.B8, V15.B8; \
+ VSHL $2, V10.B8, V10.B8; \
+ VSHL $4, V11.B8, V11.B8; \
+ VSHL $6, V12.B8, V12.B8; \
+ VORR V10.B8, V14.B8, V16.B8; \
+ VORR V11.B8, V15.B8, V17.B8; \
+ VORR V12.B8, V13.B8, V18.B8
+
+#define ADVANCE_LOOP(goto_loop) \
+ VST3.P [V16.B8, V17.B8, V18.B8], 24(R3); \
+ ADD $32, R4; \
+ CMP R4, R2; \
+ BGT goto_loop
+
+#define RETURN() \
+ SUB R0, R3; \
+ SUB R1, R4; \
+ MOVD R3, ret+56(FP); \
+ MOVD R4, ret1+64(FP); \
+ RET
+
+
+// func decodeARM64(dst []byte, src []byte, lut *int8) (int, int)
+TEXT ·decodeARM64(SB),NOSPLIT,$0-72
+ LOAD_ARGS()
+ LOAD_ARG_LUT()
+ LOAD_CONST_LUT()
+
+loop:
+ LOAD_INPUT()
+
+ // Compare and normalize the 63rd and 64th characters
+ COMPARE_INPUT(V0)
+ UPDATE_INPUT(V5)
+ COMPARE_INPUT(V1)
+ UPDATE_INPUT(V6)
+
+ DECODE_INPUT(done) // Detect invalid input characters
+ ADVANCE_LOOP(loop) // Store results and continue
+
+done:
+ RETURN()
+
+
+// func decodeStdARM64(dst []byte, src []byte, lut *int8) (int, int)
+TEXT ·decodeStdARM64(SB),NOSPLIT,$0-72
+ LOAD_ARGS()
+ LOAD_CONST_LUT()
+
+loop:
+ LOAD_INPUT()
+ COMPARE_INPUT(V6) // Compare to '+'
+ DECODE_INPUT(done) // Detect invalid input characters
+ ADVANCE_LOOP(loop) // Store results and continue
+
+done:
+ RETURN()
+
+
+DATA ·mask_lut+0x00(SB)/1, $0xa8
+DATA ·mask_lut+0x01(SB)/1, $0xf8
+DATA ·mask_lut+0x02(SB)/1, $0xf8
+DATA ·mask_lut+0x03(SB)/1, $0xf8
+DATA ·mask_lut+0x04(SB)/1, $0xf8
+DATA ·mask_lut+0x05(SB)/1, $0xf8
+DATA ·mask_lut+0x06(SB)/1, $0xf8
+DATA ·mask_lut+0x07(SB)/1, $0xf8
+DATA ·mask_lut+0x08(SB)/1, $0xf8
+DATA ·mask_lut+0x09(SB)/1, $0xf8
+DATA ·mask_lut+0x0a(SB)/1, $0xf0
+DATA ·mask_lut+0x0b(SB)/1, $0x54
+DATA ·mask_lut+0x0c(SB)/1, $0x50
+DATA ·mask_lut+0x0d(SB)/1, $0x50
+DATA ·mask_lut+0x0e(SB)/1, $0x50
+DATA ·mask_lut+0x0f(SB)/1, $0x54
+GLOBL ·mask_lut(SB), NOPTR|RODATA, $16
+
+DATA ·bpos_lut+0x00(SB)/1, $0x01
+DATA ·bpos_lut+0x01(SB)/1, $0x02
+DATA ·bpos_lut+0x02(SB)/1, $0x04
+DATA ·bpos_lut+0x03(SB)/1, $0x08
+DATA ·bpos_lut+0x04(SB)/1, $0x10
+DATA ·bpos_lut+0x05(SB)/1, $0x20
+DATA ·bpos_lut+0x06(SB)/1, $0x40
+DATA ·bpos_lut+0x07(SB)/1, $0x80
+DATA ·bpos_lut+0x08(SB)/1, $0x00
+DATA ·bpos_lut+0x09(SB)/1, $0x00
+DATA ·bpos_lut+0x0a(SB)/1, $0x00
+DATA ·bpos_lut+0x0b(SB)/1, $0x00
+DATA ·bpos_lut+0x0c(SB)/1, $0x00
+DATA ·bpos_lut+0x0d(SB)/1, $0x00
+DATA ·bpos_lut+0x0e(SB)/1, $0x00
+DATA ·bpos_lut+0x0f(SB)/1, $0x00
+GLOBL ·bpos_lut(SB), NOPTR|RODATA, $16
+
+DATA ·shft_lut+0x00(SB)/1, $0x00
+DATA ·shft_lut+0x01(SB)/1, $0x00
+DATA ·shft_lut+0x02(SB)/1, $0x13
+DATA ·shft_lut+0x03(SB)/1, $0x04
+DATA ·shft_lut+0x04(SB)/1, $0xbf
+DATA ·shft_lut+0x05(SB)/1, $0xbf
+DATA ·shft_lut+0x06(SB)/1, $0xb9
+DATA ·shft_lut+0x07(SB)/1, $0xb9
+DATA ·shft_lut+0x08(SB)/1, $0x00
+DATA ·shft_lut+0x09(SB)/1, $0x00
+DATA ·shft_lut+0x0a(SB)/1, $0x00
+DATA ·shft_lut+0x0b(SB)/1, $0x00
+DATA ·shft_lut+0x0c(SB)/1, $0x00
+DATA ·shft_lut+0x0d(SB)/1, $0x00
+DATA ·shft_lut+0x0e(SB)/1, $0x00
+DATA ·shft_lut+0x0f(SB)/1, $0x00
+GLOBL ·shft_lut(SB), NOPTR|RODATA, $16
diff --git a/vendor/github.com/segmentio/asm/base64/encode_amd64.go b/vendor/github.com/segmentio/asm/base64/encode_amd64.go
new file mode 100644
index 0000000000..a83c81f157
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/encode_amd64.go
@@ -0,0 +1,7 @@
+// Code generated by command: go run encode_asm.go -pkg base64 -out ../base64/encode_amd64.s -stubs ../base64/encode_amd64.go. DO NOT EDIT.
+
+//go:build !purego
+
+package base64
+
+func encodeAVX2(dst []byte, src []byte, lut *int8) (int, int)
diff --git a/vendor/github.com/segmentio/asm/base64/encode_amd64.s b/vendor/github.com/segmentio/asm/base64/encode_amd64.s
new file mode 100644
index 0000000000..6797c977e8
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/encode_amd64.s
@@ -0,0 +1,87 @@
+// Code generated by command: go run encode_asm.go -pkg base64 -out ../base64/encode_amd64.s -stubs ../base64/encode_amd64.go. DO NOT EDIT.
+
+//go:build !purego
+
+#include "textflag.h"
+
+// func encodeAVX2(dst []byte, src []byte, lut *int8) (int, int)
+// Requires: AVX, AVX2, SSE4.1
+TEXT ·encodeAVX2(SB), NOSPLIT, $0-72
+ MOVQ dst_base+0(FP), AX
+ MOVQ src_base+24(FP), DX
+ MOVQ lut+48(FP), SI
+ MOVQ src_len+32(FP), DI
+ MOVB $0x33, CL
+ PINSRB $0x00, CX, X4
+ VPBROADCASTB X4, Y4
+ MOVB $0x19, CL
+ PINSRB $0x00, CX, X5
+ VPBROADCASTB X5, Y5
+ XORQ CX, CX
+ XORQ BX, BX
+
+ // Load the 16-byte LUT into both lanes of the register
+ VPERMQ $0x44, (SI), Y3
+
+ // Load the first block using a mask to avoid potential fault
+ VMOVDQU b64_enc_load<>+0(SB), Y0
+ VPMASKMOVD -4(DX)(BX*1), Y0, Y0
+
+loop:
+ VPSHUFB b64_enc_shuf<>+0(SB), Y0, Y0
+ VPAND b64_enc_mask1<>+0(SB), Y0, Y1
+ VPSLLW $0x08, Y1, Y2
+ VPSLLW $0x04, Y1, Y1
+ VPBLENDW $0xaa, Y2, Y1, Y2
+ VPAND b64_enc_mask2<>+0(SB), Y0, Y1
+ VPMULHUW b64_enc_mult<>+0(SB), Y1, Y0
+ VPOR Y0, Y2, Y0
+ VPSUBUSB Y4, Y0, Y1
+ VPCMPGTB Y5, Y0, Y2
+ VPSUBB Y2, Y1, Y1
+ VPSHUFB Y1, Y3, Y1
+ VPADDB Y0, Y1, Y0
+ VMOVDQU Y0, (AX)(CX*1)
+ ADDQ $0x20, CX
+ ADDQ $0x18, BX
+ SUBQ $0x18, DI
+ CMPQ DI, $0x20
+ JB done
+ VMOVDQU -4(DX)(BX*1), Y0
+ JMP loop
+
+done:
+ MOVQ CX, ret+56(FP)
+ MOVQ BX, ret1+64(FP)
+ VZEROUPPER
+ RET
+
+DATA b64_enc_load<>+0(SB)/8, $0x8000000000000000
+DATA b64_enc_load<>+8(SB)/8, $0x8000000080000000
+DATA b64_enc_load<>+16(SB)/8, $0x8000000080000000
+DATA b64_enc_load<>+24(SB)/8, $0x8000000080000000
+GLOBL b64_enc_load<>(SB), RODATA|NOPTR, $32
+
+DATA b64_enc_shuf<>+0(SB)/8, $0x0809070805060405
+DATA b64_enc_shuf<>+8(SB)/8, $0x0e0f0d0e0b0c0a0b
+DATA b64_enc_shuf<>+16(SB)/8, $0x0405030401020001
+DATA b64_enc_shuf<>+24(SB)/8, $0x0a0b090a07080607
+GLOBL b64_enc_shuf<>(SB), RODATA|NOPTR, $32
+
+DATA b64_enc_mask1<>+0(SB)/8, $0x003f03f0003f03f0
+DATA b64_enc_mask1<>+8(SB)/8, $0x003f03f0003f03f0
+DATA b64_enc_mask1<>+16(SB)/8, $0x003f03f0003f03f0
+DATA b64_enc_mask1<>+24(SB)/8, $0x003f03f0003f03f0
+GLOBL b64_enc_mask1<>(SB), RODATA|NOPTR, $32
+
+DATA b64_enc_mask2<>+0(SB)/8, $0x0fc0fc000fc0fc00
+DATA b64_enc_mask2<>+8(SB)/8, $0x0fc0fc000fc0fc00
+DATA b64_enc_mask2<>+16(SB)/8, $0x0fc0fc000fc0fc00
+DATA b64_enc_mask2<>+24(SB)/8, $0x0fc0fc000fc0fc00
+GLOBL b64_enc_mask2<>(SB), RODATA|NOPTR, $32
+
+DATA b64_enc_mult<>+0(SB)/8, $0x0400004004000040
+DATA b64_enc_mult<>+8(SB)/8, $0x0400004004000040
+DATA b64_enc_mult<>+16(SB)/8, $0x0400004004000040
+DATA b64_enc_mult<>+24(SB)/8, $0x0400004004000040
+GLOBL b64_enc_mult<>(SB), RODATA|NOPTR, $32
diff --git a/vendor/github.com/segmentio/asm/base64/encode_arm64.go b/vendor/github.com/segmentio/asm/base64/encode_arm64.go
new file mode 100644
index 0000000000..b6a3814928
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/encode_arm64.go
@@ -0,0 +1,6 @@
+//go:build !purego
+// +build !purego
+
+package base64
+
+func encodeARM64(dst []byte, src []byte, lut *int8) (int, int)
diff --git a/vendor/github.com/segmentio/asm/base64/encode_arm64.s b/vendor/github.com/segmentio/asm/base64/encode_arm64.s
new file mode 100644
index 0000000000..4654313bbd
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/encode_arm64.s
@@ -0,0 +1,97 @@
+#include "textflag.h"
+
+#define Rdst R0
+#define Rsrc R1
+#define Rlen R2
+#define Rwr R3
+#define Rrem R4
+#define Rtmp R5
+
+#define Vlut V0
+#define Vfld0 V6
+#define Vfld1 V7
+#define Vfld2 V8
+#define Vfld3 V9
+#define Vsrc0 V10
+#define Vsrc1 V11
+#define Vsrc2 V12
+#define Vr0a V13
+#define Vr1a V14
+#define Vr2a V15
+#define Vr3a V16
+#define Vr0b V17
+#define Vr1b V18
+#define Vr2b V19
+#define Vr3b V20
+
+// func encodeARM64(dst []byte, src []byte, lut *int8) (int, int)
+TEXT ·encodeARM64(SB),NOSPLIT,$0-72
+ // Load dst/src info
+ MOVD dst_base+0(FP), Rdst
+ MOVD src_base+24(FP), Rsrc
+ MOVD src_len+32(FP), Rlen
+ MOVD lut+48(FP), Rtmp
+ VLD1 (Rtmp), [Vlut.B16]
+
+ MOVD Rlen, Rrem
+ MOVD Rdst, Rwr
+
+ VMOVI $51, V1.B16
+ VMOVI $26, V2.B16
+ VMOVI $63, V3.B16
+ VMOVI $13, V4.B16
+
+loop:
+ VLD3.P 48(Rsrc), [Vsrc0.B16, Vsrc1.B16, Vsrc2.B16]
+
+ // Split 3 source blocks into 4 lookup inputs
+ VUSHR $2, Vsrc0.B16, Vfld0.B16
+ VUSHR $4, Vsrc1.B16, Vfld1.B16
+ VUSHR $6, Vsrc2.B16, Vfld2.B16
+ VSHL $4, Vsrc0.B16, Vsrc0.B16
+ VSHL $2, Vsrc1.B16, Vsrc1.B16
+ VORR Vsrc0.B16, Vfld1.B16, Vfld1.B16
+ VORR Vsrc1.B16, Vfld2.B16, Vfld2.B16
+ VAND V3.B16, Vfld1.B16, Vfld1.B16
+ VAND V3.B16, Vfld2.B16, Vfld2.B16
+ VAND V3.B16, Vsrc2.B16, Vfld3.B16
+
+ WORD $0x6e212ccd // VUQSUB V1.B16, Vfld0.B16, Vr0a.B16
+ WORD $0x4e263451 // VCMGT V2.B16, Vfld0.B16, Vr0b.B16
+ VAND V4.B16, Vr0b.B16, Vr0b.B16
+ VORR Vr0b.B16, Vr0a.B16, Vr0a.B16
+ WORD $0x6e212cee // VUQSUB V1.B16, Vfld1.B16, Vr1a.B16
+ WORD $0x4e273452 // VCMGT V2.B16, Vfld1.B16, Vr1b.B16
+ VAND V4.B16, Vr1b.B16, Vr1b.B16
+ VORR Vr1b.B16, Vr1a.B16, Vr1a.B16
+ WORD $0x6e212d0f // VUQSUB V1.B16, Vfld2.B16, Vr2a.B16
+ WORD $0x4e283453 // VCMGT V2.B16, Vfld2.B16, Vr2b.B16
+ VAND V4.B16, Vr2b.B16, Vr2b.B16
+ VORR Vr2b.B16, Vr2a.B16, Vr2a.B16
+ WORD $0x6e212d30 // VUQSUB V1.B16, Vfld3.B16, Vr3a.B16
+ WORD $0x4e293454 // VCMGT V2.B16, Vfld3.B16, Vr3b.B16
+ VAND V4.B16, Vr3b.B16, Vr3b.B16
+ VORR Vr3b.B16, Vr3a.B16, Vr3a.B16
+
+ // Add result of lookup table to each field
+ VTBL Vr0a.B16, [Vlut.B16], Vr0a.B16
+ VADD Vr0a.B16, Vfld0.B16, Vfld0.B16
+ VTBL Vr1a.B16, [Vlut.B16], Vr1a.B16
+ VADD Vr1a.B16, Vfld1.B16, Vfld1.B16
+ VTBL Vr2a.B16, [Vlut.B16], Vr2a.B16
+ VADD Vr2a.B16, Vfld2.B16, Vfld2.B16
+ VTBL Vr3a.B16, [Vlut.B16], Vr3a.B16
+ VADD Vr3a.B16, Vfld3.B16, Vfld3.B16
+
+ VST4.P [Vfld0.B16, Vfld1.B16, Vfld2.B16, Vfld3.B16], 64(Rwr)
+ SUB $48, Rrem
+ CMP $48, Rrem
+ BGE loop
+
+done:
+ SUB Rdst, Rwr
+ SUB Rrem, Rlen
+ MOVD Rwr, ret+56(FP)
+ MOVD Rlen, ret1+64(FP)
+ RET
+
diff --git a/vendor/github.com/segmentio/asm/cpu/arm/arm.go b/vendor/github.com/segmentio/asm/cpu/arm/arm.go
new file mode 100644
index 0000000000..47c695a075
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/cpu/arm/arm.go
@@ -0,0 +1,80 @@
+package arm
+
+import (
+ "github.com/segmentio/asm/cpu/cpuid"
+ . "golang.org/x/sys/cpu"
+)
+
+type CPU cpuid.CPU
+
+func (cpu CPU) Has(feature Feature) bool {
+ return cpuid.CPU(cpu).Has(cpuid.Feature(feature))
+}
+
+func (cpu *CPU) set(feature Feature, enable bool) {
+ (*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable)
+}
+
+type Feature cpuid.Feature
+
+const (
+ SWP Feature = 1 << iota // SWP instruction support
+ HALF // Half-word load and store support
+ THUMB // ARM Thumb instruction set
+ BIT26 // Address space limited to 26-bits
+ FASTMUL // 32-bit operand, 64-bit result multiplication support
+ FPA // Floating point arithmetic support
+ VFP // Vector floating point support
+ EDSP // DSP Extensions support
+ JAVA // Java instruction set
+ IWMMXT // Intel Wireless MMX technology support
+ CRUNCH // MaverickCrunch context switching and handling
+ THUMBEE // Thumb EE instruction set
+ NEON // NEON instruction set
+ VFPv3 // Vector floating point version 3 support
+ VFPv3D16 // Vector floating point version 3 D8-D15
+ TLS // Thread local storage support
+ VFPv4 // Vector floating point version 4 support
+ IDIVA // Integer divide instruction support in ARM mode
+ IDIVT // Integer divide instruction support in Thumb mode
+ VFPD32 // Vector floating point version 3 D15-D31
+ LPAE // Large Physical Address Extensions
+ EVTSTRM // Event stream support
+ AES // AES hardware implementation
+ PMULL // Polynomial multiplication instruction set
+ SHA1 // SHA1 hardware implementation
+ SHA2 // SHA2 hardware implementation
+ CRC32 // CRC32 hardware implementation
+)
+
+func ABI() CPU {
+ cpu := CPU(0)
+ cpu.set(SWP, ARM.HasSWP)
+ cpu.set(HALF, ARM.HasHALF)
+ cpu.set(THUMB, ARM.HasTHUMB)
+ cpu.set(BIT26, ARM.Has26BIT)
+ cpu.set(FASTMUL, ARM.HasFASTMUL)
+ cpu.set(FPA, ARM.HasFPA)
+ cpu.set(VFP, ARM.HasVFP)
+ cpu.set(EDSP, ARM.HasEDSP)
+ cpu.set(JAVA, ARM.HasJAVA)
+ cpu.set(IWMMXT, ARM.HasIWMMXT)
+ cpu.set(CRUNCH, ARM.HasCRUNCH)
+ cpu.set(THUMBEE, ARM.HasTHUMBEE)
+ cpu.set(NEON, ARM.HasNEON)
+ cpu.set(VFPv3, ARM.HasVFPv3)
+ cpu.set(VFPv3D16, ARM.HasVFPv3D16)
+ cpu.set(TLS, ARM.HasTLS)
+ cpu.set(VFPv4, ARM.HasVFPv4)
+ cpu.set(IDIVA, ARM.HasIDIVA)
+ cpu.set(IDIVT, ARM.HasIDIVT)
+ cpu.set(VFPD32, ARM.HasVFPD32)
+ cpu.set(LPAE, ARM.HasLPAE)
+ cpu.set(EVTSTRM, ARM.HasEVTSTRM)
+ cpu.set(AES, ARM.HasAES)
+ cpu.set(PMULL, ARM.HasPMULL)
+ cpu.set(SHA1, ARM.HasSHA1)
+ cpu.set(SHA2, ARM.HasSHA2)
+ cpu.set(CRC32, ARM.HasCRC32)
+ return cpu
+}
diff --git a/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go b/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go
new file mode 100644
index 0000000000..0c5134c76e
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go
@@ -0,0 +1,74 @@
+package arm64
+
+import (
+ "github.com/segmentio/asm/cpu/cpuid"
+ . "golang.org/x/sys/cpu"
+)
+
+type CPU cpuid.CPU
+
+func (cpu CPU) Has(feature Feature) bool {
+ return cpuid.CPU(cpu).Has(cpuid.Feature(feature))
+}
+
+func (cpu *CPU) set(feature Feature, enable bool) {
+ (*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable)
+}
+
+type Feature cpuid.Feature
+
+const (
+ FP Feature = 1 << iota // Floating-point instruction set (always available)
+ ASIMD // Advanced SIMD (always available)
+ EVTSTRM // Event stream support
+ AES // AES hardware implementation
+ PMULL // Polynomial multiplication instruction set
+ SHA1 // SHA1 hardware implementation
+ SHA2 // SHA2 hardware implementation
+ CRC32 // CRC32 hardware implementation
+ ATOMICS // Atomic memory operation instruction set
+ FPHP // Half precision floating-point instruction set
+ ASIMDHP // Advanced SIMD half precision instruction set
+ CPUID // CPUID identification scheme registers
+ ASIMDRDM // Rounding double multiply add/subtract instruction set
+ JSCVT // Javascript conversion from floating-point to integer
+ FCMA // Floating-point multiplication and addition of complex numbers
+ LRCPC // Release Consistent processor consistent support
+ DCPOP // Persistent memory support
+ SHA3 // SHA3 hardware implementation
+ SM3 // SM3 hardware implementation
+ SM4 // SM4 hardware implementation
+ ASIMDDP // Advanced SIMD double precision instruction set
+ SHA512 // SHA512 hardware implementation
+ SVE // Scalable Vector Extensions
+ ASIMDFHM // Advanced SIMD multiplication FP16 to FP32
+)
+
+func ABI() CPU {
+ cpu := CPU(0)
+ cpu.set(FP, ARM64.HasFP)
+ cpu.set(ASIMD, ARM64.HasASIMD)
+ cpu.set(EVTSTRM, ARM64.HasEVTSTRM)
+ cpu.set(AES, ARM64.HasAES)
+ cpu.set(PMULL, ARM64.HasPMULL)
+ cpu.set(SHA1, ARM64.HasSHA1)
+ cpu.set(SHA2, ARM64.HasSHA2)
+ cpu.set(CRC32, ARM64.HasCRC32)
+ cpu.set(ATOMICS, ARM64.HasATOMICS)
+ cpu.set(FPHP, ARM64.HasFPHP)
+ cpu.set(ASIMDHP, ARM64.HasASIMDHP)
+ cpu.set(CPUID, ARM64.HasCPUID)
+ cpu.set(ASIMDRDM, ARM64.HasASIMDRDM)
+ cpu.set(JSCVT, ARM64.HasJSCVT)
+ cpu.set(FCMA, ARM64.HasFCMA)
+ cpu.set(LRCPC, ARM64.HasLRCPC)
+ cpu.set(DCPOP, ARM64.HasDCPOP)
+ cpu.set(SHA3, ARM64.HasSHA3)
+ cpu.set(SM3, ARM64.HasSM3)
+ cpu.set(SM4, ARM64.HasSM4)
+ cpu.set(ASIMDDP, ARM64.HasASIMDDP)
+ cpu.set(SHA512, ARM64.HasSHA512)
+ cpu.set(SVE, ARM64.HasSVE)
+ cpu.set(ASIMDFHM, ARM64.HasASIMDFHM)
+ return cpu
+}
diff --git a/vendor/github.com/segmentio/asm/cpu/cpu.go b/vendor/github.com/segmentio/asm/cpu/cpu.go
new file mode 100644
index 0000000000..6ddf4973f5
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/cpu/cpu.go
@@ -0,0 +1,22 @@
+// Pakage cpu provides APIs to detect CPU features available at runtime.
+package cpu
+
+import (
+ "github.com/segmentio/asm/cpu/arm"
+ "github.com/segmentio/asm/cpu/arm64"
+ "github.com/segmentio/asm/cpu/x86"
+)
+
+var (
+ // X86 is the bitset representing the set of the x86 instruction sets are
+ // supported by the CPU.
+ X86 = x86.ABI()
+
+ // ARM is the bitset representing which parts of the arm instruction sets
+ // are supported by the CPU.
+ ARM = arm.ABI()
+
+ // ARM64 is the bitset representing which parts of the arm64 instruction
+ // sets are supported by the CPU.
+ ARM64 = arm64.ABI()
+)
diff --git a/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go b/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go
new file mode 100644
index 0000000000..0949d3d584
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go
@@ -0,0 +1,32 @@
+// Package cpuid provides generic types used to represent CPU features supported
+// by the architecture.
+package cpuid
+
+// CPU is a bitset of feature flags representing the capabilities of various CPU
+// architeectures that this package provides optimized assembly routines for.
+//
+// The intent is to provide a stable ABI between the Go code that generate the
+// assembly, and the program that uses the library functions.
+type CPU uint64
+
+// Feature represents a single CPU feature.
+type Feature uint64
+
+const (
+ // None is a Feature value that has no CPU features enabled.
+ None Feature = 0
+ // All is a Feature value that has all CPU features enabled.
+ All Feature = 0xFFFFFFFFFFFFFFFF
+)
+
+func (cpu CPU) Has(feature Feature) bool {
+ return (Feature(cpu) & feature) == feature
+}
+
+func (cpu *CPU) Set(feature Feature, enabled bool) {
+ if enabled {
+ *cpu |= CPU(feature)
+ } else {
+ *cpu &= ^CPU(feature)
+ }
+}
diff --git a/vendor/github.com/segmentio/asm/cpu/x86/x86.go b/vendor/github.com/segmentio/asm/cpu/x86/x86.go
new file mode 100644
index 0000000000..9e93537583
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/cpu/x86/x86.go
@@ -0,0 +1,76 @@
+package x86
+
+import (
+ "github.com/segmentio/asm/cpu/cpuid"
+ . "golang.org/x/sys/cpu"
+)
+
+type CPU cpuid.CPU
+
+func (cpu CPU) Has(feature Feature) bool {
+ return cpuid.CPU(cpu).Has(cpuid.Feature(feature))
+}
+
+func (cpu *CPU) set(feature Feature, enable bool) {
+ (*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable)
+}
+
+type Feature cpuid.Feature
+
+const (
+ SSE Feature = 1 << iota // SSE functions
+ SSE2 // P4 SSE functions
+ SSE3 // Prescott SSE3 functions
+ SSE41 // Penryn SSE4.1 functions
+ SSE42 // Nehalem SSE4.2 functions
+ SSE4A // AMD Barcelona microarchitecture SSE4a instructions
+ SSSE3 // Conroe SSSE3 functions
+ AVX // AVX functions
+ AVX2 // AVX2 functions
+ AVX512BF16 // AVX-512 BFLOAT16 Instructions
+ AVX512BITALG // AVX-512 Bit Algorithms
+ AVX512BW // AVX-512 Byte and Word Instructions
+ AVX512CD // AVX-512 Conflict Detection Instructions
+ AVX512DQ // AVX-512 Doubleword and Quadword Instructions
+ AVX512ER // AVX-512 Exponential and Reciprocal Instructions
+ AVX512F // AVX-512 Foundation
+ AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions
+ AVX512PF // AVX-512 Prefetch Instructions
+ AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions
+ AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2
+ AVX512VL // AVX-512 Vector Length Extensions
+ AVX512VNNI // AVX-512 Vector Neural Network Instructions
+ AVX512VP2INTERSECT // AVX-512 Intersect for D/Q
+ AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword
+ CMOV // Conditional move
+)
+
+func ABI() CPU {
+ cpu := CPU(0)
+ cpu.set(SSE, true) // TODO: golang.org/x/sys/cpu assumes all CPUs have SEE?
+ cpu.set(SSE2, X86.HasSSE2)
+ cpu.set(SSE3, X86.HasSSE3)
+ cpu.set(SSE41, X86.HasSSE41)
+ cpu.set(SSE42, X86.HasSSE42)
+ cpu.set(SSE4A, false) // TODO: add upstream support in golang.org/x/sys/cpu?
+ cpu.set(SSSE3, X86.HasSSSE3)
+ cpu.set(AVX, X86.HasAVX)
+ cpu.set(AVX2, X86.HasAVX2)
+ cpu.set(AVX512BF16, X86.HasAVX512BF16)
+ cpu.set(AVX512BITALG, X86.HasAVX512BITALG)
+ cpu.set(AVX512BW, X86.HasAVX512BW)
+ cpu.set(AVX512CD, X86.HasAVX512CD)
+ cpu.set(AVX512DQ, X86.HasAVX512DQ)
+ cpu.set(AVX512ER, X86.HasAVX512ER)
+ cpu.set(AVX512F, X86.HasAVX512F)
+ cpu.set(AVX512IFMA, X86.HasAVX512IFMA)
+ cpu.set(AVX512PF, X86.HasAVX512PF)
+ cpu.set(AVX512VBMI, X86.HasAVX512VBMI)
+ cpu.set(AVX512VBMI2, X86.HasAVX512VBMI2)
+ cpu.set(AVX512VL, X86.HasAVX512VL)
+ cpu.set(AVX512VNNI, X86.HasAVX512VNNI)
+ cpu.set(AVX512VP2INTERSECT, false) // TODO: add upstream support in golang.org/x/sys/cpu?
+ cpu.set(AVX512VPOPCNTDQ, X86.HasAVX512VPOPCNTDQ)
+ cpu.set(CMOV, true) // TODO: golang.org/x/sys/cpu assumes all CPUs have CMOV?
+ return cpu
+}
diff --git a/vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go b/vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go
new file mode 100644
index 0000000000..913c9cc68b
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go
@@ -0,0 +1,20 @@
+package unsafebytes
+
+import "unsafe"
+
+func Pointer(b []byte) *byte {
+ return *(**byte)(unsafe.Pointer(&b))
+}
+
+func String(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+func BytesOf(s string) []byte {
+ return *(*[]byte)(unsafe.Pointer(&sliceHeader{str: s, cap: len(s)}))
+}
+
+type sliceHeader struct {
+ str string
+ cap int
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
index 499789984d..69956b425a 100644
--- a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
@@ -1,4 +1,4 @@
-// +build darwin dragonfly freebsd netbsd openbsd
+// +build darwin dragonfly freebsd netbsd openbsd hurd
// +build !js
package logrus
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
index 04748b8515..c9aed267a4 100644
--- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
@@ -1,5 +1,7 @@
+//go:build (linux || aix || zos) && !js && !wasi
// +build linux aix zos
// +build !js
+// +build !wasi
package logrus
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_wasi.go b/vendor/github.com/sirupsen/logrus/terminal_check_wasi.go
new file mode 100644
index 0000000000..2822b212fb
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_wasi.go
@@ -0,0 +1,8 @@
+//go:build wasi
+// +build wasi
+
+package logrus
+
+func isTerminal(fd int) bool {
+ return false
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go b/vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go
new file mode 100644
index 0000000000..108a6be12b
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go
@@ -0,0 +1,8 @@
+//go:build wasip1
+// +build wasip1
+
+package logrus
+
+func isTerminal(fd int) bool {
+ return false
+}
diff --git a/vendor/github.com/tchap/go-patricia/v2/patricia/patricia.go b/vendor/github.com/tchap/go-patricia/v2/patricia/patricia.go
index 7b9975e383..9bf8f4fec5 100644
--- a/vendor/github.com/tchap/go-patricia/v2/patricia/patricia.go
+++ b/vendor/github.com/tchap/go-patricia/v2/patricia/patricia.go
@@ -465,7 +465,7 @@ func (trie *Trie) compact() *Trie {
// If any item is set, we cannot compact since we want to retain
// the ability to do searching by key. This makes compaction less usable,
// but that simply cannot be avoided.
- if trie.item != nil || child.item != nil {
+ if child == nil || trie.item != nil || child.item != nil {
return trie
}
diff --git a/vendor/github.com/valyala/fastjson/.gitignore b/vendor/github.com/valyala/fastjson/.gitignore
new file mode 100644
index 0000000000..6e92f57d46
--- /dev/null
+++ b/vendor/github.com/valyala/fastjson/.gitignore
@@ -0,0 +1 @@
+tags
diff --git a/vendor/github.com/valyala/fastjson/.travis.yml b/vendor/github.com/valyala/fastjson/.travis.yml
new file mode 100644
index 0000000000..472a82190c
--- /dev/null
+++ b/vendor/github.com/valyala/fastjson/.travis.yml
@@ -0,0 +1,19 @@
+language: go
+
+go:
+ - 1.10.x
+
+script:
+ # build test for supported platforms
+ - GOOS=linux go build
+ - GOOS=darwin go build
+ - GOOS=freebsd go build
+ - GOOS=windows go build
+
+ # run tests on a standard platform
+ - go test -v ./... -coverprofile=coverage.txt -covermode=atomic
+ - go test -v ./... -race
+
+after_success:
+ # Upload coverage results to codecov.io
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/valyala/fastjson/LICENSE b/vendor/github.com/valyala/fastjson/LICENSE
new file mode 100644
index 0000000000..6f665f3e29
--- /dev/null
+++ b/vendor/github.com/valyala/fastjson/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2018 Aliaksandr Valialkin
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/valyala/fastjson/README.md b/vendor/github.com/valyala/fastjson/README.md
new file mode 100644
index 0000000000..f32c693937
--- /dev/null
+++ b/vendor/github.com/valyala/fastjson/README.md
@@ -0,0 +1,227 @@
+[](https://travis-ci.org/valyala/fastjson)
+[](http://godoc.org/github.com/valyala/fastjson)
+[](https://goreportcard.com/report/github.com/valyala/fastjson)
+[](https://codecov.io/gh/valyala/fastjson)
+
+# fastjson - fast JSON parser and validator for Go
+
+
+## Features
+
+ * Fast. As usual, up to 15x faster than the standard [encoding/json](https://golang.org/pkg/encoding/json/).
+ See [benchmarks](#benchmarks).
+ * Parses arbitrary JSON without schema, reflection, struct magic and code generation
+ contrary to [easyjson](https://github.com/mailru/easyjson).
+ * Provides simple [API](http://godoc.org/github.com/valyala/fastjson).
+ * Outperforms [jsonparser](https://github.com/buger/jsonparser) and [gjson](https://github.com/tidwall/gjson)
+ when accessing multiple unrelated fields, since `fastjson` parses the input JSON only once.
+ * Validates the parsed JSON unlike [jsonparser](https://github.com/buger/jsonparser)
+ and [gjson](https://github.com/tidwall/gjson).
+ * May quickly extract a part of the original JSON with `Value.Get(...).MarshalTo` and modify it
+ with [Del](https://godoc.org/github.com/valyala/fastjson#Value.Del)
+ and [Set](https://godoc.org/github.com/valyala/fastjson#Value.Set) functions.
+ * May parse array containing values with distinct types (aka non-homogenous types).
+ For instance, `fastjson` easily parses the following JSON array `[123, "foo", [456], {"k": "v"}, null]`.
+ * `fastjson` preserves the original order of object items when calling
+ [Object.Visit](https://godoc.org/github.com/valyala/fastjson#Object.Visit).
+
+
+## Known limitations
+
+ * Requies extra care to work with - references to certain objects recursively
+ returned by [Parser](https://godoc.org/github.com/valyala/fastjson#Parser)
+ must be released before the next call to [Parse](https://godoc.org/github.com/valyala/fastjson#Parser.Parse).
+ Otherwise the program may work improperly. The same applies to objects returned by [Arena](https://godoc.org/github.com/valyala/fastjson#Arena).
+ Adhere recommendations from [docs](https://godoc.org/github.com/valyala/fastjson).
+ * Cannot parse JSON from `io.Reader`. There is [Scanner](https://godoc.org/github.com/valyala/fastjson#Scanner)
+ for parsing stream of JSON values from a string.
+
+
+## Usage
+
+One-liner accessing a single field:
+```go
+ s := []byte(`{"foo": [123, "bar"]}`)
+ fmt.Printf("foo.0=%d\n", fastjson.GetInt(s, "foo", "0"))
+
+ // Output:
+ // foo.0=123
+```
+
+Accessing multiple fields with error handling:
+```go
+ var p fastjson.Parser
+ v, err := p.Parse(`{
+ "str": "bar",
+ "int": 123,
+ "float": 1.23,
+ "bool": true,
+ "arr": [1, "foo", {}]
+ }`)
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("foo=%s\n", v.GetStringBytes("str"))
+ fmt.Printf("int=%d\n", v.GetInt("int"))
+ fmt.Printf("float=%f\n", v.GetFloat64("float"))
+ fmt.Printf("bool=%v\n", v.GetBool("bool"))
+ fmt.Printf("arr.1=%s\n", v.GetStringBytes("arr", "1"))
+
+ // Output:
+ // foo=bar
+ // int=123
+ // float=1.230000
+ // bool=true
+ // arr.1=foo
+```
+
+See also [examples](https://godoc.org/github.com/valyala/fastjson#pkg-examples).
+
+
+## Security
+
+ * `fastjson` shouldn't crash or panic when parsing input strings specially crafted
+ by an attacker. It must return error on invalid input JSON.
+ * `fastjson` requires up to `sizeof(Value) * len(inputJSON)` bytes of memory
+ for parsing `inputJSON` string. Limit the maximum size of the `inputJSON`
+ before parsing it in order to limit the maximum memory usage.
+
+
+## Performance optimization tips
+
+ * Re-use [Parser](https://godoc.org/github.com/valyala/fastjson#Parser) and [Scanner](https://godoc.org/github.com/valyala/fastjson#Scanner)
+ for parsing many JSONs. This reduces memory allocations overhead.
+ [ParserPool](https://godoc.org/github.com/valyala/fastjson#ParserPool) may be useful in this case.
+ * Prefer calling `Value.Get*` on the value returned from [Parser](https://godoc.org/github.com/valyala/fastjson#Parser)
+ instead of calling `Get*` one-liners when multiple fields
+ must be obtained from JSON, since each `Get*` one-liner re-parses
+ the input JSON again.
+ * Prefer calling once [Value.Get](https://godoc.org/github.com/valyala/fastjson#Value.Get)
+ for common prefix paths and then calling `Value.Get*` on the returned value
+ for distinct suffix paths.
+ * Prefer iterating over array returned from [Value.GetArray](https://godoc.org/github.com/valyala/fastjson#Object.Visit)
+ with a range loop instead of calling `Value.Get*` for each array item.
+
+## Fuzzing
+Install [go-fuzz](https://github.com/dvyukov/go-fuzz) & optionally the go-fuzz-corpus.
+
+```bash
+go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
+```
+
+Build using `go-fuzz-build` and run `go-fuzz` with an optional corpus.
+
+```bash
+mkdir -p workdir/corpus
+cp $GOPATH/src/github.com/dvyukov/go-fuzz-corpus/json/corpus/* workdir/corpus
+go-fuzz-build github.com/valyala/fastjson
+go-fuzz -bin=fastjson-fuzz.zip -workdir=workdir
+```
+
+## Benchmarks
+
+Go 1.12 has been used for benchmarking.
+
+Legend:
+
+ * `small` - parse [small.json](testdata/small.json) (190 bytes).
+ * `medium` - parse [medium.json](testdata/medium.json) (2.3KB).
+ * `large` - parse [large.json](testdata/large.json) (28KB).
+ * `canada` - parse [canada.json](testdata/canada.json) (2.2MB).
+ * `citm` - parse [citm_catalog.json](testdata/citm_catalog.json) (1.7MB).
+ * `twitter` - parse [twitter.json](testdata/twitter.json) (617KB).
+
+ * `stdjson-map` - parse into a `map[string]interface{}` using `encoding/json`.
+ * `stdjson-struct` - parse into a struct containing
+ a subset of fields of the parsed JSON, using `encoding/json`.
+ * `stdjson-empty-struct` - parse into an empty struct using `encoding/json`.
+ This is the fastest possible solution for `encoding/json`, may be used
+ for json validation. See also benchmark results for json validation.
+ * `fastjson` - parse using `fastjson` without fields access.
+ * `fastjson-get` - parse using `fastjson` with fields access similar to `stdjson-struct`.
+
+```
+$ GOMAXPROCS=1 go test github.com/valyala/fastjson -bench='Parse$'
+goos: linux
+goarch: amd64
+pkg: github.com/valyala/fastjson
+BenchmarkParse/small/stdjson-map 200000 7305 ns/op 26.01 MB/s 960 B/op 51 allocs/op
+BenchmarkParse/small/stdjson-struct 500000 3431 ns/op 55.37 MB/s 224 B/op 4 allocs/op
+BenchmarkParse/small/stdjson-empty-struct 500000 2273 ns/op 83.58 MB/s 168 B/op 2 allocs/op
+BenchmarkParse/small/fastjson 5000000 347 ns/op 547.53 MB/s 0 B/op 0 allocs/op
+BenchmarkParse/small/fastjson-get 2000000 620 ns/op 306.39 MB/s 0 B/op 0 allocs/op
+BenchmarkParse/medium/stdjson-map 30000 40672 ns/op 57.26 MB/s 10196 B/op 208 allocs/op
+BenchmarkParse/medium/stdjson-struct 30000 47792 ns/op 48.73 MB/s 9174 B/op 258 allocs/op
+BenchmarkParse/medium/stdjson-empty-struct 100000 22096 ns/op 105.40 MB/s 280 B/op 5 allocs/op
+BenchmarkParse/medium/fastjson 500000 3025 ns/op 769.90 MB/s 0 B/op 0 allocs/op
+BenchmarkParse/medium/fastjson-get 500000 3211 ns/op 725.20 MB/s 0 B/op 0 allocs/op
+BenchmarkParse/large/stdjson-map 2000 614079 ns/op 45.79 MB/s 210734 B/op 2785 allocs/op
+BenchmarkParse/large/stdjson-struct 5000 298554 ns/op 94.18 MB/s 15616 B/op 353 allocs/op
+BenchmarkParse/large/stdjson-empty-struct 5000 268577 ns/op 104.69 MB/s 280 B/op 5 allocs/op
+BenchmarkParse/large/fastjson 50000 35210 ns/op 798.56 MB/s 5 B/op 0 allocs/op
+BenchmarkParse/large/fastjson-get 50000 35171 ns/op 799.46 MB/s 5 B/op 0 allocs/op
+BenchmarkParse/canada/stdjson-map 20 68147307 ns/op 33.03 MB/s 12260502 B/op 392539 allocs/op
+BenchmarkParse/canada/stdjson-struct 20 68044518 ns/op 33.08 MB/s 12260123 B/op 392534 allocs/op
+BenchmarkParse/canada/stdjson-empty-struct 100 17709250 ns/op 127.11 MB/s 280 B/op 5 allocs/op
+BenchmarkParse/canada/fastjson 300 4182404 ns/op 538.22 MB/s 254902 B/op 381 allocs/op
+BenchmarkParse/canada/fastjson-get 300 4274744 ns/op 526.60 MB/s 254902 B/op 381 allocs/op
+BenchmarkParse/citm/stdjson-map 50 27772612 ns/op 62.19 MB/s 5214163 B/op 95402 allocs/op
+BenchmarkParse/citm/stdjson-struct 100 14936191 ns/op 115.64 MB/s 1989 B/op 75 allocs/op
+BenchmarkParse/citm/stdjson-empty-struct 100 14946034 ns/op 115.56 MB/s 280 B/op 5 allocs/op
+BenchmarkParse/citm/fastjson 1000 1879714 ns/op 918.87 MB/s 17628 B/op 30 allocs/op
+BenchmarkParse/citm/fastjson-get 1000 1881598 ns/op 917.94 MB/s 17628 B/op 30 allocs/op
+BenchmarkParse/twitter/stdjson-map 100 11289146 ns/op 55.94 MB/s 2187878 B/op 31266 allocs/op
+BenchmarkParse/twitter/stdjson-struct 300 5779442 ns/op 109.27 MB/s 408 B/op 6 allocs/op
+BenchmarkParse/twitter/stdjson-empty-struct 300 5738504 ns/op 110.05 MB/s 408 B/op 6 allocs/op
+BenchmarkParse/twitter/fastjson 2000 774042 ns/op 815.86 MB/s 2541 B/op 2 allocs/op
+BenchmarkParse/twitter/fastjson-get 2000 777833 ns/op 811.89 MB/s 2541 B/op 2 allocs/op
+```
+
+Benchmark results for json validation:
+
+```
+$ GOMAXPROCS=1 go test github.com/valyala/fastjson -bench='Validate$'
+goos: linux
+goarch: amd64
+pkg: github.com/valyala/fastjson
+BenchmarkValidate/small/stdjson 2000000 955 ns/op 198.83 MB/s 72 B/op 2 allocs/op
+BenchmarkValidate/small/fastjson 5000000 384 ns/op 493.60 MB/s 0 B/op 0 allocs/op
+BenchmarkValidate/medium/stdjson 200000 10799 ns/op 215.66 MB/s 184 B/op 5 allocs/op
+BenchmarkValidate/medium/fastjson 300000 3809 ns/op 611.30 MB/s 0 B/op 0 allocs/op
+BenchmarkValidate/large/stdjson 10000 133064 ns/op 211.31 MB/s 184 B/op 5 allocs/op
+BenchmarkValidate/large/fastjson 30000 45268 ns/op 621.14 MB/s 0 B/op 0 allocs/op
+BenchmarkValidate/canada/stdjson 200 8470904 ns/op 265.74 MB/s 184 B/op 5 allocs/op
+BenchmarkValidate/canada/fastjson 500 2973377 ns/op 757.07 MB/s 0 B/op 0 allocs/op
+BenchmarkValidate/citm/stdjson 200 7273172 ns/op 237.48 MB/s 184 B/op 5 allocs/op
+BenchmarkValidate/citm/fastjson 1000 1684430 ns/op 1025.39 MB/s 0 B/op 0 allocs/op
+BenchmarkValidate/twitter/stdjson 500 2849439 ns/op 221.63 MB/s 312 B/op 6 allocs/op
+BenchmarkValidate/twitter/fastjson 2000 1036796 ns/op 609.10 MB/s 0 B/op 0 allocs/op
+```
+
+## FAQ
+
+ * Q: _There are a ton of other high-perf packages for JSON parsing in Go. Why creating yet another package?_
+ A: Because other packages require either rigid JSON schema via struct magic
+ and code generation or perform poorly when multiple unrelated fields
+ must be obtained from the parsed JSON.
+ Additionally, `fastjson` provides nicer [API](http://godoc.org/github.com/valyala/fastjson).
+
+ * Q: _What is the main purpose for `fastjson`?_
+ A: High-perf JSON parsing for [RTB](https://www.iab.com/wp-content/uploads/2015/05/OpenRTB_API_Specification_Version_2_3_1.pdf)
+ and other [JSON-RPC](https://en.wikipedia.org/wiki/JSON-RPC) services.
+
+ * Q: _Why fastjson doesn't provide fast marshaling (serialization)?_
+ A: Actually it provides some sort of marshaling - see [Value.MarshalTo](https://godoc.org/github.com/valyala/fastjson#Value.MarshalTo).
+ But I'd recommend using [quicktemplate](https://github.com/valyala/quicktemplate#use-cases)
+ for high-performance JSON marshaling :)
+
+ * Q: _`fastjson` crashes my program!_
+ A: There is high probability of improper use.
+ * Make sure you don't hold references to objects recursively returned by `Parser` / `Scanner`
+ beyond the next `Parser.Parse` / `Scanner.Next` call
+ if such restriction is mentioned in [docs](https://github.com/valyala/fastjson/issues/new).
+ * Make sure you don't access `fastjson` objects from concurrently running goroutines
+ if such restriction is mentioned in [docs](https://github.com/valyala/fastjson/issues/new).
+ * Build and run your program with [-race](https://golang.org/doc/articles/race_detector.html) flag.
+ Make sure the race detector detects zero races.
+ * If your program continue crashing after fixing issues mentioned above, [file a bug](https://github.com/valyala/fastjson/issues/new).
diff --git a/vendor/github.com/valyala/fastjson/arena.go b/vendor/github.com/valyala/fastjson/arena.go
new file mode 100644
index 0000000000..9fe21a48c8
--- /dev/null
+++ b/vendor/github.com/valyala/fastjson/arena.go
@@ -0,0 +1,126 @@
+package fastjson
+
+import (
+ "strconv"
+)
+
+// Arena may be used for fast creation and re-use of Values.
+//
+// Typical Arena lifecycle:
+//
+// 1) Construct Values via the Arena and Value.Set* calls.
+// 2) Marshal the constructed Values with Value.MarshalTo call.
+// 3) Reset all the constructed Values at once by Arena.Reset call.
+// 4) Go to 1 and re-use the Arena.
+//
+// It is unsafe calling Arena methods from concurrent goroutines.
+// Use per-goroutine Arenas or ArenaPool instead.
+type Arena struct {
+ b []byte
+ c cache
+}
+
+// Reset resets all the Values allocated by a.
+//
+// Values previously allocated by a cannot be used after the Reset call.
+func (a *Arena) Reset() {
+ a.b = a.b[:0]
+ a.c.reset()
+}
+
+// NewObject returns new empty object value.
+//
+// New entries may be added to the returned object via Set call.
+//
+// The returned object is valid until Reset is called on a.
+func (a *Arena) NewObject() *Value {
+ v := a.c.getValue()
+ v.t = TypeObject
+ v.o.reset()
+ return v
+}
+
+// NewArray returns new empty array value.
+//
+// New entries may be added to the returned array via Set* calls.
+//
+// The returned array is valid until Reset is called on a.
+func (a *Arena) NewArray() *Value {
+ v := a.c.getValue()
+ v.t = TypeArray
+ v.a = v.a[:0]
+ return v
+}
+
+// NewString returns new string value containing s.
+//
+// The returned string is valid until Reset is called on a.
+func (a *Arena) NewString(s string) *Value {
+ v := a.c.getValue()
+ v.t = typeRawString
+ bLen := len(a.b)
+ a.b = escapeString(a.b, s)
+ v.s = b2s(a.b[bLen+1 : len(a.b)-1])
+ return v
+}
+
+// NewStringBytes returns new string value containing b.
+//
+// The returned string is valid until Reset is called on a.
+func (a *Arena) NewStringBytes(b []byte) *Value {
+ v := a.c.getValue()
+ v.t = typeRawString
+ bLen := len(a.b)
+ a.b = escapeString(a.b, b2s(b))
+ v.s = b2s(a.b[bLen+1 : len(a.b)-1])
+ return v
+}
+
+// NewNumberFloat64 returns new number value containing f.
+//
+// The returned number is valid until Reset is called on a.
+func (a *Arena) NewNumberFloat64(f float64) *Value {
+ v := a.c.getValue()
+ v.t = TypeNumber
+ bLen := len(a.b)
+ a.b = strconv.AppendFloat(a.b, f, 'g', -1, 64)
+ v.s = b2s(a.b[bLen:])
+ return v
+}
+
+// NewNumberInt returns new number value containing n.
+//
+// The returned number is valid until Reset is called on a.
+func (a *Arena) NewNumberInt(n int) *Value {
+ v := a.c.getValue()
+ v.t = TypeNumber
+ bLen := len(a.b)
+ a.b = strconv.AppendInt(a.b, int64(n), 10)
+ v.s = b2s(a.b[bLen:])
+ return v
+}
+
+// NewNumberString returns new number value containing s.
+//
+// The returned number is valid until Reset is called on a.
+func (a *Arena) NewNumberString(s string) *Value {
+ v := a.c.getValue()
+ v.t = TypeNumber
+ v.s = s
+ return v
+}
+
+// NewNull returns null value.
+func (a *Arena) NewNull() *Value {
+ return valueNull
+}
+
+// NewTrue returns true value.
+func (a *Arena) NewTrue() *Value {
+ return valueTrue
+}
+
+// NewFalse return false value.
+func (a *Arena) NewFalse() *Value {
+ return valueFalse
+}
diff --git a/vendor/github.com/valyala/fastjson/doc.go b/vendor/github.com/valyala/fastjson/doc.go
new file mode 100644
index 0000000000..8076189cfe
--- /dev/null
+++ b/vendor/github.com/valyala/fastjson/doc.go
@@ -0,0 +1,9 @@
+/*
+Package fastjson provides fast JSON parsing.
+
+Arbitrary JSON may be parsed by fastjson without the need for creating structs
+or for generating go code. Just parse JSON and get the required fields with
+Get* functions.
+
+*/
+package fastjson
diff --git a/vendor/github.com/valyala/fastjson/fastfloat/parse.go b/vendor/github.com/valyala/fastjson/fastfloat/parse.go
new file mode 100644
index 0000000000..b37838da62
--- /dev/null
+++ b/vendor/github.com/valyala/fastjson/fastfloat/parse.go
@@ -0,0 +1,515 @@
+package fastfloat
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+)
+
+// ParseUint64BestEffort parses uint64 number s.
+//
+// It is equivalent to strconv.ParseUint(s, 10, 64), but is faster.
+//
+// 0 is returned if the number cannot be parsed.
+// See also ParseUint64, which returns parse error if the number cannot be parsed.
+func ParseUint64BestEffort(s string) uint64 {
+ if len(s) == 0 {
+ return 0
+ }
+ i := uint(0)
+ d := uint64(0)
+ j := i
+ for i < uint(len(s)) {
+ if s[i] >= '0' && s[i] <= '9' {
+ d = d*10 + uint64(s[i]-'0')
+ i++
+ if i > 18 {
+ // The integer part may be out of range for uint64.
+ // Fall back to slow parsing.
+ dd, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return 0
+ }
+ return dd
+ }
+ continue
+ }
+ break
+ }
+ if i <= j {
+ return 0
+ }
+ if i < uint(len(s)) {
+ // Unparsed tail left.
+ return 0
+ }
+ return d
+}
+
+// ParseUint64 parses uint64 from s.
+//
+// It is equivalent to strconv.ParseUint(s, 10, 64), but is faster.
+//
+// See also ParseUint64BestEffort.
+func ParseUint64(s string) (uint64, error) {
+ if len(s) == 0 {
+ return 0, fmt.Errorf("cannot parse uint64 from empty string")
+ }
+ i := uint(0)
+ d := uint64(0)
+ j := i
+ for i < uint(len(s)) {
+ if s[i] >= '0' && s[i] <= '9' {
+ d = d*10 + uint64(s[i]-'0')
+ i++
+ if i > 18 {
+ // The integer part may be out of range for uint64.
+ // Fall back to slow parsing.
+ dd, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return dd, nil
+ }
+ continue
+ }
+ break
+ }
+ if i <= j {
+ return 0, fmt.Errorf("cannot parse uint64 from %q", s)
+ }
+ if i < uint(len(s)) {
+ // Unparsed tail left.
+ return 0, fmt.Errorf("unparsed tail left after parsing uint64 from %q: %q", s, s[i:])
+ }
+ return d, nil
+}
+
+// ParseInt64BestEffort parses int64 number s.
+//
+// It is equivalent to strconv.ParseInt(s, 10, 64), but is faster.
+//
+// 0 is returned if the number cannot be parsed.
+// See also ParseInt64, which returns parse error if the number cannot be parsed.
+func ParseInt64BestEffort(s string) int64 {
+ if len(s) == 0 {
+ return 0
+ }
+ i := uint(0)
+ minus := s[0] == '-'
+ if minus {
+ i++
+ if i >= uint(len(s)) {
+ return 0
+ }
+ }
+
+ d := int64(0)
+ j := i
+ for i < uint(len(s)) {
+ if s[i] >= '0' && s[i] <= '9' {
+ d = d*10 + int64(s[i]-'0')
+ i++
+ if i > 18 {
+ // The integer part may be out of range for int64.
+ // Fall back to slow parsing.
+ dd, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return 0
+ }
+ return dd
+ }
+ continue
+ }
+ break
+ }
+ if i <= j {
+ return 0
+ }
+ if i < uint(len(s)) {
+ // Unparsed tail left.
+ return 0
+ }
+ if minus {
+ d = -d
+ }
+ return d
+}
+
+// ParseInt64 parses int64 number s.
+//
+// It is equivalent to strconv.ParseInt(s, 10, 64), but is faster.
+//
+// See also ParseInt64BestEffort.
+func ParseInt64(s string) (int64, error) {
+ if len(s) == 0 {
+ return 0, fmt.Errorf("cannot parse int64 from empty string")
+ }
+ i := uint(0)
+ minus := s[0] == '-'
+ if minus {
+ i++
+ if i >= uint(len(s)) {
+ return 0, fmt.Errorf("cannot parse int64 from %q", s)
+ }
+ }
+
+ d := int64(0)
+ j := i
+ for i < uint(len(s)) {
+ if s[i] >= '0' && s[i] <= '9' {
+ d = d*10 + int64(s[i]-'0')
+ i++
+ if i > 18 {
+ // The integer part may be out of range for int64.
+ // Fall back to slow parsing.
+ dd, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return dd, nil
+ }
+ continue
+ }
+ break
+ }
+ if i <= j {
+ return 0, fmt.Errorf("cannot parse int64 from %q", s)
+ }
+ if i < uint(len(s)) {
+ // Unparsed tail left.
+ return 0, fmt.Errorf("unparsed tail left after parsing int64 form %q: %q", s, s[i:])
+ }
+ if minus {
+ d = -d
+ }
+ return d, nil
+}
+
+// Exact powers of 10.
+//
+// This works faster than math.Pow10, since it avoids additional multiplication.
+var float64pow10 = [...]float64{
+ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16,
+}
+
+// ParseBestEffort parses floating-point number s.
+//
+// It is equivalent to strconv.ParseFloat(s, 64), but is faster.
+//
+// 0 is returned if the number cannot be parsed.
+// See also Parse, which returns parse error if the number cannot be parsed.
+func ParseBestEffort(s string) float64 {
+ if len(s) == 0 {
+ return 0
+ }
+ i := uint(0)
+ minus := s[0] == '-'
+ if minus {
+ i++
+ if i >= uint(len(s)) {
+ return 0
+ }
+ }
+
+ // the integer part might be elided to remain compliant
+ // with https://go.dev/ref/spec#Floating-point_literals
+ if s[i] == '.' && (i+1 >= uint(len(s)) || s[i+1] < '0' || s[i+1] > '9') {
+ return 0
+ }
+
+ d := uint64(0)
+ j := i
+ for i < uint(len(s)) {
+ if s[i] >= '0' && s[i] <= '9' {
+ d = d*10 + uint64(s[i]-'0')
+ i++
+ if i > 18 {
+ // The integer part may be out of range for uint64.
+ // Fall back to slow parsing.
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil && !math.IsInf(f, 0) {
+ return 0
+ }
+ return f
+ }
+ continue
+ }
+ break
+ }
+ if i <= j && s[i] != '.' {
+ s = s[i:]
+ if strings.HasPrefix(s, "+") {
+ s = s[1:]
+ }
+ // "infinity" is needed for OpenMetrics support.
+ // See https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md
+ if strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") {
+ if minus {
+ return -inf
+ }
+ return inf
+ }
+ if strings.EqualFold(s, "nan") {
+ return nan
+ }
+ return 0
+ }
+ f := float64(d)
+ if i >= uint(len(s)) {
+ // Fast path - just integer.
+ if minus {
+ f = -f
+ }
+ return f
+ }
+
+ if s[i] == '.' {
+ // Parse fractional part.
+ i++
+ if i >= uint(len(s)) {
+ // the fractional part may be elided to remain compliant
+ // with https://go.dev/ref/spec#Floating-point_literals
+ return f
+ }
+ k := i
+ for i < uint(len(s)) {
+ if s[i] >= '0' && s[i] <= '9' {
+ d = d*10 + uint64(s[i]-'0')
+ i++
+ if i-j >= uint(len(float64pow10)) {
+ // The mantissa is out of range. Fall back to standard parsing.
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil && !math.IsInf(f, 0) {
+ return 0
+ }
+ return f
+ }
+ continue
+ }
+ break
+ }
+ if i < k {
+ return 0
+ }
+ // Convert the entire mantissa to a float at once to avoid rounding errors.
+ f = float64(d) / float64pow10[i-k]
+ if i >= uint(len(s)) {
+ // Fast path - parsed fractional number.
+ if minus {
+ f = -f
+ }
+ return f
+ }
+ }
+ if s[i] == 'e' || s[i] == 'E' {
+ // Parse exponent part.
+ i++
+ if i >= uint(len(s)) {
+ return 0
+ }
+ expMinus := false
+ if s[i] == '+' || s[i] == '-' {
+ expMinus = s[i] == '-'
+ i++
+ if i >= uint(len(s)) {
+ return 0
+ }
+ }
+ exp := int16(0)
+ j := i
+ for i < uint(len(s)) {
+ if s[i] >= '0' && s[i] <= '9' {
+ exp = exp*10 + int16(s[i]-'0')
+ i++
+ if exp > 300 {
+ // The exponent may be too big for float64.
+ // Fall back to standard parsing.
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil && !math.IsInf(f, 0) {
+ return 0
+ }
+ return f
+ }
+ continue
+ }
+ break
+ }
+ if i <= j {
+ return 0
+ }
+ if expMinus {
+ exp = -exp
+ }
+ f *= math.Pow10(int(exp))
+ if i >= uint(len(s)) {
+ if minus {
+ f = -f
+ }
+ return f
+ }
+ }
+ return 0
+}
+
+// Parse parses floating-point number s.
+//
+// It is equivalent to strconv.ParseFloat(s, 64), but is faster.
+//
+// See also ParseBestEffort.
+func Parse(s string) (float64, error) {
+ if len(s) == 0 {
+ return 0, fmt.Errorf("cannot parse float64 from empty string")
+ }
+ i := uint(0)
+ minus := s[0] == '-'
+ if minus {
+ i++
+ if i >= uint(len(s)) {
+ return 0, fmt.Errorf("cannot parse float64 from %q", s)
+ }
+ }
+
+ // the integer part might be elided to remain compliant
+ // with https://go.dev/ref/spec#Floating-point_literals
+ if s[i] == '.' && (i+1 >= uint(len(s)) || s[i+1] < '0' || s[i+1] > '9') {
+ return 0, fmt.Errorf("missing integer and fractional part in %q", s)
+ }
+
+ d := uint64(0)
+ j := i
+ for i < uint(len(s)) {
+ if s[i] >= '0' && s[i] <= '9' {
+ d = d*10 + uint64(s[i]-'0')
+ i++
+ if i > 18 {
+ // The integer part may be out of range for uint64.
+ // Fall back to slow parsing.
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil && !math.IsInf(f, 0) {
+ return 0, err
+ }
+ return f, nil
+ }
+ continue
+ }
+ break
+ }
+ if i <= j && s[i] != '.' {
+ ss := s[i:]
+ if strings.HasPrefix(ss, "+") {
+ ss = ss[1:]
+ }
+ // "infinity" is needed for OpenMetrics support.
+ // See https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md
+ if strings.EqualFold(ss, "inf") || strings.EqualFold(ss, "infinity") {
+ if minus {
+ return -inf, nil
+ }
+ return inf, nil
+ }
+ if strings.EqualFold(ss, "nan") {
+ return nan, nil
+ }
+ return 0, fmt.Errorf("unparsed tail left after parsing float64 from %q: %q", s, ss)
+ }
+ f := float64(d)
+ if i >= uint(len(s)) {
+ // Fast path - just integer.
+ if minus {
+ f = -f
+ }
+ return f, nil
+ }
+
+ if s[i] == '.' {
+ // Parse fractional part.
+ i++
+ if i >= uint(len(s)) {
+ // the fractional part might be elided to remain compliant
+ // with https://go.dev/ref/spec#Floating-point_literals
+ return f, nil
+ }
+ k := i
+ for i < uint(len(s)) {
+ if s[i] >= '0' && s[i] <= '9' {
+ d = d*10 + uint64(s[i]-'0')
+ i++
+ if i-j >= uint(len(float64pow10)) {
+ // The mantissa is out of range. Fall back to standard parsing.
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil && !math.IsInf(f, 0) {
+ return 0, fmt.Errorf("cannot parse mantissa in %q: %s", s, err)
+ }
+ return f, nil
+ }
+ continue
+ }
+ break
+ }
+ if i < k {
+ return 0, fmt.Errorf("cannot find mantissa in %q", s)
+ }
+ // Convert the entire mantissa to a float at once to avoid rounding errors.
+ f = float64(d) / float64pow10[i-k]
+ if i >= uint(len(s)) {
+ // Fast path - parsed fractional number.
+ if minus {
+ f = -f
+ }
+ return f, nil
+ }
+ }
+ if s[i] == 'e' || s[i] == 'E' {
+ // Parse exponent part.
+ i++
+ if i >= uint(len(s)) {
+ return 0, fmt.Errorf("cannot parse exponent in %q", s)
+ }
+ expMinus := false
+ if s[i] == '+' || s[i] == '-' {
+ expMinus = s[i] == '-'
+ i++
+ if i >= uint(len(s)) {
+ return 0, fmt.Errorf("cannot parse exponent in %q", s)
+ }
+ }
+ exp := int16(0)
+ j := i
+ for i < uint(len(s)) {
+ if s[i] >= '0' && s[i] <= '9' {
+ exp = exp*10 + int16(s[i]-'0')
+ i++
+ if exp > 300 {
+ // The exponent may be too big for float64.
+ // Fall back to standard parsing.
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil && !math.IsInf(f, 0) {
+ return 0, fmt.Errorf("cannot parse exponent in %q: %s", s, err)
+ }
+ return f, nil
+ }
+ continue
+ }
+ break
+ }
+ if i <= j {
+ return 0, fmt.Errorf("cannot parse exponent in %q", s)
+ }
+ if expMinus {
+ exp = -exp
+ }
+ f *= math.Pow10(int(exp))
+ if i >= uint(len(s)) {
+ if minus {
+ f = -f
+ }
+ return f, nil
+ }
+ }
+ return 0, fmt.Errorf("cannot parse float64 from %q", s)
+}
+
+var inf = math.Inf(1)
+var nan = math.NaN()
diff --git a/vendor/github.com/valyala/fastjson/fuzz.go b/vendor/github.com/valyala/fastjson/fuzz.go
new file mode 100644
index 0000000000..9130797c70
--- /dev/null
+++ b/vendor/github.com/valyala/fastjson/fuzz.go
@@ -0,0 +1,22 @@
+// +build gofuzz
+
+package fastjson
+
+func Fuzz(data []byte) int {
+ err := ValidateBytes(data)
+ if err != nil {
+ return 0
+ }
+
+ v := MustParseBytes(data)
+
+ dst := make([]byte, 0)
+ dst = v.MarshalTo(dst)
+
+ err = ValidateBytes(dst)
+ if err != nil {
+ panic(err)
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/valyala/fastjson/handy.go b/vendor/github.com/valyala/fastjson/handy.go
new file mode 100644
index 0000000000..a5d5618f09
--- /dev/null
+++ b/vendor/github.com/valyala/fastjson/handy.go
@@ -0,0 +1,170 @@
+package fastjson
+
+var handyPool ParserPool
+
+// GetString returns string value for the field identified by keys path
+// in JSON data.
+//
+// Array indexes may be represented as decimal numbers in keys.
+//
+// An empty string is returned on error. Use Parser for proper error handling.
+//
+// Parser is faster for obtaining multiple fields from JSON.
+func GetString(data []byte, keys ...string) string {
+ p := handyPool.Get()
+ v, err := p.ParseBytes(data)
+ if err != nil {
+ handyPool.Put(p)
+ return ""
+ }
+ sb := v.GetStringBytes(keys...)
+ str := string(sb)
+ handyPool.Put(p)
+ return str
+}
+
+// GetBytes returns string value for the field identified by keys path
+// in JSON data.
+//
+// Array indexes may be represented as decimal numbers in keys.
+//
+// nil is returned on error. Use Parser for proper error handling.
+//
+// Parser is faster for obtaining multiple fields from JSON.
+func GetBytes(data []byte, keys ...string) []byte {
+ p := handyPool.Get()
+ v, err := p.ParseBytes(data)
+ if err != nil {
+ handyPool.Put(p)
+ return nil
+ }
+ sb := v.GetStringBytes(keys...)
+
+ // Make a copy of sb, since sb belongs to p.
+ var b []byte
+ if sb != nil {
+ b = append(b, sb...)
+ }
+
+ handyPool.Put(p)
+ return b
+}
+
+// GetInt returns int value for the field identified by keys path
+// in JSON data.
+//
+// Array indexes may be represented as decimal numbers in keys.
+//
+// 0 is returned on error. Use Parser for proper error handling.
+//
+// Parser is faster for obtaining multiple fields from JSON.
+func GetInt(data []byte, keys ...string) int {
+ p := handyPool.Get()
+ v, err := p.ParseBytes(data)
+ if err != nil {
+ handyPool.Put(p)
+ return 0
+ }
+ n := v.GetInt(keys...)
+ handyPool.Put(p)
+ return n
+}
+
+// GetFloat64 returns float64 value for the field identified by keys path
+// in JSON data.
+//
+// Array indexes may be represented as decimal numbers in keys.
+//
+// 0 is returned on error. Use Parser for proper error handling.
+//
+// Parser is faster for obtaining multiple fields from JSON.
+func GetFloat64(data []byte, keys ...string) float64 {
+ p := handyPool.Get()
+ v, err := p.ParseBytes(data)
+ if err != nil {
+ handyPool.Put(p)
+ return 0
+ }
+ f := v.GetFloat64(keys...)
+ handyPool.Put(p)
+ return f
+}
+
+// GetBool returns boolean value for the field identified by keys path
+// in JSON data.
+//
+// Array indexes may be represented as decimal numbers in keys.
+//
+// False is returned on error. Use Parser for proper error handling.
+//
+// Parser is faster for obtaining multiple fields from JSON.
+func GetBool(data []byte, keys ...string) bool {
+ p := handyPool.Get()
+ v, err := p.ParseBytes(data)
+ if err != nil {
+ handyPool.Put(p)
+ return false
+ }
+ b := v.GetBool(keys...)
+ handyPool.Put(p)
+ return b
+}
+
+// Exists returns true if the field identified by keys path exists in JSON data.
+//
+// Array indexes may be represented as decimal numbers in keys.
+//
+// False is returned on error. Use Parser for proper error handling.
+//
+// Parser is faster when multiple fields must be checked in the JSON.
+func Exists(data []byte, keys ...string) bool {
+ p := handyPool.Get()
+ v, err := p.ParseBytes(data)
+ if err != nil {
+ handyPool.Put(p)
+ return false
+ }
+ ok := v.Exists(keys...)
+ handyPool.Put(p)
+ return ok
+}
+
+// Parse parses json string s.
+//
+// The function is slower than the Parser.Parse for re-used Parser.
+func Parse(s string) (*Value, error) {
+ var p Parser
+ return p.Parse(s)
+}
+
+// MustParse parses json string s.
+//
+// The function panics if s cannot be parsed.
+// The function is slower than the Parser.Parse for re-used Parser.
+func MustParse(s string) *Value {
+ v, err := Parse(s)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// ParseBytes parses b containing json.
+//
+// The function is slower than the Parser.ParseBytes for re-used Parser.
+func ParseBytes(b []byte) (*Value, error) {
+ var p Parser
+ return p.ParseBytes(b)
+}
+
+// MustParseBytes parses b containing json.
+//
+// The function panics if b cannot be parsed.
+// The function is slower than the Parser.ParseBytes for re-used Parser.
+func MustParseBytes(b []byte) *Value {
+ v, err := ParseBytes(b)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
diff --git a/vendor/github.com/valyala/fastjson/parser.go b/vendor/github.com/valyala/fastjson/parser.go
new file mode 100644
index 0000000000..885e1841ef
--- /dev/null
+++ b/vendor/github.com/valyala/fastjson/parser.go
@@ -0,0 +1,976 @@
+package fastjson
+
+import (
+ "fmt"
+ "github.com/valyala/fastjson/fastfloat"
+ "strconv"
+ "strings"
+ "unicode/utf16"
+)
+
+// Parser parses JSON.
+//
+// Parser may be re-used for subsequent parsing.
+//
+// Parser cannot be used from concurrent goroutines.
+// Use per-goroutine parsers or ParserPool instead.
+type Parser struct {
+ // b contains working copy of the string to be parsed.
+ b []byte
+
+ // c is a cache for json values.
+ c cache
+}
+
+// Parse parses s containing JSON.
+//
+// The returned value is valid until the next call to Parse*.
+//
+// Use Scanner if a stream of JSON values must be parsed.
+func (p *Parser) Parse(s string) (*Value, error) {
+ s = skipWS(s)
+ p.b = append(p.b[:0], s...)
+ p.c.reset()
+
+ v, tail, err := parseValue(b2s(p.b), &p.c, 0)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse JSON: %s; unparsed tail: %q", err, startEndString(tail))
+ }
+ tail = skipWS(tail)
+ if len(tail) > 0 {
+ return nil, fmt.Errorf("unexpected tail: %q", startEndString(tail))
+ }
+ return v, nil
+}
+
+// ParseBytes parses b containing JSON.
+//
+// The returned Value is valid until the next call to Parse*.
+//
+// Use Scanner if a stream of JSON values must be parsed.
+func (p *Parser) ParseBytes(b []byte) (*Value, error) {
+ return p.Parse(b2s(b))
+}
+
+type cache struct {
+ vs []Value
+}
+
+func (c *cache) reset() {
+ c.vs = c.vs[:0]
+}
+
+func (c *cache) getValue() *Value {
+ if cap(c.vs) > len(c.vs) {
+ c.vs = c.vs[:len(c.vs)+1]
+ } else {
+ c.vs = append(c.vs, Value{})
+ }
+ // Do not reset the value, since the caller must properly init it.
+ return &c.vs[len(c.vs)-1]
+}
+
+func skipWS(s string) string {
+ if len(s) == 0 || s[0] > 0x20 {
+ // Fast path.
+ return s
+ }
+ return skipWSSlow(s)
+}
+
+func skipWSSlow(s string) string {
+ if len(s) == 0 || s[0] != 0x20 && s[0] != 0x0A && s[0] != 0x09 && s[0] != 0x0D {
+ return s
+ }
+ for i := 1; i < len(s); i++ {
+ if s[i] != 0x20 && s[i] != 0x0A && s[i] != 0x09 && s[i] != 0x0D {
+ return s[i:]
+ }
+ }
+ return ""
+}
+
+type kv struct {
+ k string
+ v *Value
+}
+
+// MaxDepth is the maximum depth for nested JSON.
+const MaxDepth = 300
+
+func parseValue(s string, c *cache, depth int) (*Value, string, error) {
+ if len(s) == 0 {
+ return nil, s, fmt.Errorf("cannot parse empty string")
+ }
+ depth++
+ if depth > MaxDepth {
+ return nil, s, fmt.Errorf("too big depth for the nested JSON; it exceeds %d", MaxDepth)
+ }
+
+ if s[0] == '{' {
+ v, tail, err := parseObject(s[1:], c, depth)
+ if err != nil {
+ return nil, tail, fmt.Errorf("cannot parse object: %s", err)
+ }
+ return v, tail, nil
+ }
+ if s[0] == '[' {
+ v, tail, err := parseArray(s[1:], c, depth)
+ if err != nil {
+ return nil, tail, fmt.Errorf("cannot parse array: %s", err)
+ }
+ return v, tail, nil
+ }
+ if s[0] == '"' {
+ ss, tail, err := parseRawString(s[1:])
+ if err != nil {
+ return nil, tail, fmt.Errorf("cannot parse string: %s", err)
+ }
+ v := c.getValue()
+ v.t = typeRawString
+ v.s = ss
+ return v, tail, nil
+ }
+ if s[0] == 't' {
+ if len(s) < len("true") || s[:len("true")] != "true" {
+ return nil, s, fmt.Errorf("unexpected value found: %q", s)
+ }
+ return valueTrue, s[len("true"):], nil
+ }
+ if s[0] == 'f' {
+ if len(s) < len("false") || s[:len("false")] != "false" {
+ return nil, s, fmt.Errorf("unexpected value found: %q", s)
+ }
+ return valueFalse, s[len("false"):], nil
+ }
+ if s[0] == 'n' {
+ if len(s) < len("null") || s[:len("null")] != "null" {
+ // Try parsing NaN
+ if len(s) >= 3 && strings.EqualFold(s[:3], "nan") {
+ v := c.getValue()
+ v.t = TypeNumber
+ v.s = s[:3]
+ return v, s[3:], nil
+ }
+ return nil, s, fmt.Errorf("unexpected value found: %q", s)
+ }
+ return valueNull, s[len("null"):], nil
+ }
+
+ ns, tail, err := parseRawNumber(s)
+ if err != nil {
+ return nil, tail, fmt.Errorf("cannot parse number: %s", err)
+ }
+ v := c.getValue()
+ v.t = TypeNumber
+ v.s = ns
+ return v, tail, nil
+}
+
+func parseArray(s string, c *cache, depth int) (*Value, string, error) {
+ s = skipWS(s)
+ if len(s) == 0 {
+ return nil, s, fmt.Errorf("missing ']'")
+ }
+
+ if s[0] == ']' {
+ v := c.getValue()
+ v.t = TypeArray
+ v.a = v.a[:0]
+ return v, s[1:], nil
+ }
+
+ a := c.getValue()
+ a.t = TypeArray
+ a.a = a.a[:0]
+ for {
+ var v *Value
+ var err error
+
+ s = skipWS(s)
+ v, s, err = parseValue(s, c, depth)
+ if err != nil {
+ return nil, s, fmt.Errorf("cannot parse array value: %s", err)
+ }
+ a.a = append(a.a, v)
+
+ s = skipWS(s)
+ if len(s) == 0 {
+ return nil, s, fmt.Errorf("unexpected end of array")
+ }
+ if s[0] == ',' {
+ s = s[1:]
+ continue
+ }
+ if s[0] == ']' {
+ s = s[1:]
+ return a, s, nil
+ }
+ return nil, s, fmt.Errorf("missing ',' after array value")
+ }
+}
+
+func parseObject(s string, c *cache, depth int) (*Value, string, error) {
+ s = skipWS(s)
+ if len(s) == 0 {
+ return nil, s, fmt.Errorf("missing '}'")
+ }
+
+ if s[0] == '}' {
+ v := c.getValue()
+ v.t = TypeObject
+ v.o.reset()
+ return v, s[1:], nil
+ }
+
+ o := c.getValue()
+ o.t = TypeObject
+ o.o.reset()
+ for {
+ var err error
+ kv := o.o.getKV()
+
+ // Parse key.
+ s = skipWS(s)
+ if len(s) == 0 || s[0] != '"' {
+ return nil, s, fmt.Errorf(`cannot find opening '"" for object key`)
+ }
+ kv.k, s, err = parseRawKey(s[1:])
+ if err != nil {
+ return nil, s, fmt.Errorf("cannot parse object key: %s", err)
+ }
+ s = skipWS(s)
+ if len(s) == 0 || s[0] != ':' {
+ return nil, s, fmt.Errorf("missing ':' after object key")
+ }
+ s = s[1:]
+
+ // Parse value
+ s = skipWS(s)
+ kv.v, s, err = parseValue(s, c, depth)
+ if err != nil {
+ return nil, s, fmt.Errorf("cannot parse object value: %s", err)
+ }
+ s = skipWS(s)
+ if len(s) == 0 {
+ return nil, s, fmt.Errorf("unexpected end of object")
+ }
+ if s[0] == ',' {
+ s = s[1:]
+ continue
+ }
+ if s[0] == '}' {
+ return o, s[1:], nil
+ }
+ return nil, s, fmt.Errorf("missing ',' after object value")
+ }
+}
+
+func escapeString(dst []byte, s string) []byte {
+ if !hasSpecialChars(s) {
+ // Fast path - nothing to escape.
+ dst = append(dst, '"')
+ dst = append(dst, s...)
+ dst = append(dst, '"')
+ return dst
+ }
+
+ // Slow path.
+ return strconv.AppendQuote(dst, s)
+}
+
+func hasSpecialChars(s string) bool {
+ if strings.IndexByte(s, '"') >= 0 || strings.IndexByte(s, '\\') >= 0 {
+ return true
+ }
+ for i := 0; i < len(s); i++ {
+ if s[i] < 0x20 {
+ return true
+ }
+ }
+ return false
+}
+
+func unescapeStringBestEffort(s string) string {
+ n := strings.IndexByte(s, '\\')
+ if n < 0 {
+ // Fast path - nothing to unescape.
+ return s
+ }
+
+ // Slow path - unescape string.
+ b := s2b(s) // It is safe to do, since s points to a byte slice in Parser.b.
+ b = b[:n]
+ s = s[n+1:]
+ for len(s) > 0 {
+ ch := s[0]
+ s = s[1:]
+ switch ch {
+ case '"':
+ b = append(b, '"')
+ case '\\':
+ b = append(b, '\\')
+ case '/':
+ b = append(b, '/')
+ case 'b':
+ b = append(b, '\b')
+ case 'f':
+ b = append(b, '\f')
+ case 'n':
+ b = append(b, '\n')
+ case 'r':
+ b = append(b, '\r')
+ case 't':
+ b = append(b, '\t')
+ case 'u':
+ if len(s) < 4 {
+ // Too short escape sequence. Just store it unchanged.
+ b = append(b, "\\u"...)
+ break
+ }
+ xs := s[:4]
+ x, err := strconv.ParseUint(xs, 16, 16)
+ if err != nil {
+ // Invalid escape sequence. Just store it unchanged.
+ b = append(b, "\\u"...)
+ break
+ }
+ s = s[4:]
+ if !utf16.IsSurrogate(rune(x)) {
+ b = append(b, string(rune(x))...)
+ break
+ }
+
+ // Surrogate.
+ // See https://en.wikipedia.org/wiki/Universal_Character_Set_characters#Surrogates
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ b = append(b, "\\u"...)
+ b = append(b, xs...)
+ break
+ }
+ x1, err := strconv.ParseUint(s[2:6], 16, 16)
+ if err != nil {
+ b = append(b, "\\u"...)
+ b = append(b, xs...)
+ break
+ }
+ r := utf16.DecodeRune(rune(x), rune(x1))
+ b = append(b, string(r)...)
+ s = s[6:]
+ default:
+ // Unknown escape sequence. Just store it unchanged.
+ b = append(b, '\\', ch)
+ }
+ n = strings.IndexByte(s, '\\')
+ if n < 0 {
+ b = append(b, s...)
+ break
+ }
+ b = append(b, s[:n]...)
+ s = s[n+1:]
+ }
+ return b2s(b)
+}
+
+// parseRawKey is similar to parseRawString, but is optimized
+// for small-sized keys without escape sequences.
+func parseRawKey(s string) (string, string, error) {
+ for i := 0; i < len(s); i++ {
+ if s[i] == '"' {
+ // Fast path.
+ return s[:i], s[i+1:], nil
+ }
+ if s[i] == '\\' {
+ // Slow path.
+ return parseRawString(s)
+ }
+ }
+ return s, "", fmt.Errorf(`missing closing '"'`)
+}
+
+func parseRawString(s string) (string, string, error) {
+ n := strings.IndexByte(s, '"')
+ if n < 0 {
+ return s, "", fmt.Errorf(`missing closing '"'`)
+ }
+ if n == 0 || s[n-1] != '\\' {
+ // Fast path. No escaped ".
+ return s[:n], s[n+1:], nil
+ }
+
+ // Slow path - possible escaped " found.
+ ss := s
+ for {
+ i := n - 1
+ for i > 0 && s[i-1] == '\\' {
+ i--
+ }
+ if uint(n-i)%2 == 0 {
+ return ss[:len(ss)-len(s)+n], s[n+1:], nil
+ }
+ s = s[n+1:]
+
+ n = strings.IndexByte(s, '"')
+ if n < 0 {
+ return ss, "", fmt.Errorf(`missing closing '"'`)
+ }
+ if n == 0 || s[n-1] != '\\' {
+ return ss[:len(ss)-len(s)+n], s[n+1:], nil
+ }
+ }
+}
+
+func parseRawNumber(s string) (string, string, error) {
+ // The caller must ensure len(s) > 0
+
+ // Find the end of the number.
+ for i := 0; i < len(s); i++ {
+ ch := s[i]
+ if (ch >= '0' && ch <= '9') || ch == '.' || ch == '-' || ch == 'e' || ch == 'E' || ch == '+' {
+ continue
+ }
+ if i == 0 || i == 1 && (s[0] == '-' || s[0] == '+') {
+ if len(s[i:]) >= 3 {
+ xs := s[i : i+3]
+ if strings.EqualFold(xs, "inf") || strings.EqualFold(xs, "nan") {
+ return s[:i+3], s[i+3:], nil
+ }
+ }
+ return "", s, fmt.Errorf("unexpected char: %q", s[:1])
+ }
+ ns := s[:i]
+ s = s[i:]
+ return ns, s, nil
+ }
+ return s, "", nil
+}
+
+// Object represents JSON object.
+//
+// Object cannot be used from concurrent goroutines.
+// Use per-goroutine parsers or ParserPool instead.
+type Object struct {
+ kvs []kv
+ keysUnescaped bool
+}
+
+func (o *Object) reset() {
+ o.kvs = o.kvs[:0]
+ o.keysUnescaped = false
+}
+
+// MarshalTo appends marshaled o to dst and returns the result.
+func (o *Object) MarshalTo(dst []byte) []byte {
+ dst = append(dst, '{')
+ for i, kv := range o.kvs {
+ if o.keysUnescaped {
+ dst = escapeString(dst, kv.k)
+ } else {
+ dst = append(dst, '"')
+ dst = append(dst, kv.k...)
+ dst = append(dst, '"')
+ }
+ dst = append(dst, ':')
+ dst = kv.v.MarshalTo(dst)
+ if i != len(o.kvs)-1 {
+ dst = append(dst, ',')
+ }
+ }
+ dst = append(dst, '}')
+ return dst
+}
+
+// String returns string representation for the o.
+//
+// This function is for debugging purposes only. It isn't optimized for speed.
+// See MarshalTo instead.
+func (o *Object) String() string {
+ b := o.MarshalTo(nil)
+ // It is safe converting b to string without allocation, since b is no longer
+ // reachable after this line.
+ return b2s(b)
+}
+
+func (o *Object) getKV() *kv {
+ if cap(o.kvs) > len(o.kvs) {
+ o.kvs = o.kvs[:len(o.kvs)+1]
+ } else {
+ o.kvs = append(o.kvs, kv{})
+ }
+ return &o.kvs[len(o.kvs)-1]
+}
+
+func (o *Object) unescapeKeys() {
+ if o.keysUnescaped {
+ return
+ }
+ kvs := o.kvs
+ for i := range kvs {
+ kv := &kvs[i]
+ kv.k = unescapeStringBestEffort(kv.k)
+ }
+ o.keysUnescaped = true
+}
+
+// Len returns the number of items in the o.
+func (o *Object) Len() int {
+ return len(o.kvs)
+}
+
+// Get returns the value for the given key in the o.
+//
+// Returns nil if the value for the given key isn't found.
+//
+// The returned value is valid until Parse is called on the Parser returned o.
+func (o *Object) Get(key string) *Value {
+ if !o.keysUnescaped && strings.IndexByte(key, '\\') < 0 {
+ // Fast path - try searching for the key without object keys unescaping.
+ for _, kv := range o.kvs {
+ if kv.k == key {
+ return kv.v
+ }
+ }
+ }
+
+ // Slow path - unescape object keys.
+ o.unescapeKeys()
+
+ for _, kv := range o.kvs {
+ if kv.k == key {
+ return kv.v
+ }
+ }
+ return nil
+}
+
+// Visit calls f for each item in the o in the original order
+// of the parsed JSON.
+//
+// f cannot hold key and/or v after returning.
+func (o *Object) Visit(f func(key []byte, v *Value)) {
+ if o == nil {
+ return
+ }
+
+ o.unescapeKeys()
+
+ for _, kv := range o.kvs {
+ f(s2b(kv.k), kv.v)
+ }
+}
+
+// Value represents any JSON value.
+//
+// Call Type in order to determine the actual type of the JSON value.
+//
+// Value cannot be used from concurrent goroutines.
+// Use per-goroutine parsers or ParserPool instead.
+type Value struct {
+ o Object
+ a []*Value
+ s string
+ t Type
+}
+
+// MarshalTo appends marshaled v to dst and returns the result.
+func (v *Value) MarshalTo(dst []byte) []byte {
+ switch v.t {
+ case typeRawString:
+ dst = append(dst, '"')
+ dst = append(dst, v.s...)
+ dst = append(dst, '"')
+ return dst
+ case TypeObject:
+ return v.o.MarshalTo(dst)
+ case TypeArray:
+ dst = append(dst, '[')
+ for i, vv := range v.a {
+ dst = vv.MarshalTo(dst)
+ if i != len(v.a)-1 {
+ dst = append(dst, ',')
+ }
+ }
+ dst = append(dst, ']')
+ return dst
+ case TypeString:
+ return escapeString(dst, v.s)
+ case TypeNumber:
+ return append(dst, v.s...)
+ case TypeTrue:
+ return append(dst, "true"...)
+ case TypeFalse:
+ return append(dst, "false"...)
+ case TypeNull:
+ return append(dst, "null"...)
+ default:
+ panic(fmt.Errorf("BUG: unexpected Value type: %d", v.t))
+ }
+}
+
+// String returns string representation of the v.
+//
+// The function is for debugging purposes only. It isn't optimized for speed.
+// See MarshalTo instead.
+//
+// Don't confuse this function with StringBytes, which must be called
+// for obtaining the underlying JSON string for the v.
+func (v *Value) String() string {
+ b := v.MarshalTo(nil)
+ // It is safe converting b to string without allocation, since b is no longer
+ // reachable after this line.
+ return b2s(b)
+}
+
+// Type represents JSON type.
+type Type int
+
+const (
+ // TypeNull is JSON null.
+ TypeNull Type = 0
+
+ // TypeObject is JSON object type.
+ TypeObject Type = 1
+
+ // TypeArray is JSON array type.
+ TypeArray Type = 2
+
+ // TypeString is JSON string type.
+ TypeString Type = 3
+
+ // TypeNumber is JSON number type.
+ TypeNumber Type = 4
+
+ // TypeTrue is JSON true.
+ TypeTrue Type = 5
+
+ // TypeFalse is JSON false.
+ TypeFalse Type = 6
+
+ typeRawString Type = 7
+)
+
+// String returns string representation of t.
+func (t Type) String() string {
+ switch t {
+ case TypeObject:
+ return "object"
+ case TypeArray:
+ return "array"
+ case TypeString:
+ return "string"
+ case TypeNumber:
+ return "number"
+ case TypeTrue:
+ return "true"
+ case TypeFalse:
+ return "false"
+ case TypeNull:
+ return "null"
+
+ // typeRawString is skipped intentionally,
+ // since it shouldn't be visible to user.
+ default:
+ panic(fmt.Errorf("BUG: unknown Value type: %d", t))
+ }
+}
+
+// Type returns the type of the v.
+func (v *Value) Type() Type {
+ if v.t == typeRawString {
+ v.s = unescapeStringBestEffort(v.s)
+ v.t = TypeString
+ }
+ return v.t
+}
+
+// Exists returns true if the field exists for the given keys path.
+//
+// Array indexes may be represented as decimal numbers in keys.
+func (v *Value) Exists(keys ...string) bool {
+ v = v.Get(keys...)
+ return v != nil
+}
+
+// Get returns value by the given keys path.
+//
+// Array indexes may be represented as decimal numbers in keys.
+//
+// nil is returned for non-existing keys path.
+//
+// The returned value is valid until Parse is called on the Parser returned v.
+func (v *Value) Get(keys ...string) *Value {
+ if v == nil {
+ return nil
+ }
+ for _, key := range keys {
+ if v.t == TypeObject {
+ v = v.o.Get(key)
+ if v == nil {
+ return nil
+ }
+ } else if v.t == TypeArray {
+ n, err := strconv.Atoi(key)
+ if err != nil || n < 0 || n >= len(v.a) {
+ return nil
+ }
+ v = v.a[n]
+ } else {
+ return nil
+ }
+ }
+ return v
+}
+
+// GetObject returns object value by the given keys path.
+//
+// Array indexes may be represented as decimal numbers in keys.
+//
+// nil is returned for non-existing keys path or for invalid value type.
+//
+// The returned object is valid until Parse is called on the Parser returned v.
+func (v *Value) GetObject(keys ...string) *Object {
+ v = v.Get(keys...)
+ if v == nil || v.t != TypeObject {
+ return nil
+ }
+ return &v.o
+}
+
+// GetArray returns array value by the given keys path.
+//
+// Array indexes may be represented as decimal numbers in keys.
+//
+// nil is returned for non-existing keys path or for invalid value type.
+//
+// The returned array is valid until Parse is called on the Parser returned v.
+func (v *Value) GetArray(keys ...string) []*Value {
+ v = v.Get(keys...)
+ if v == nil || v.t != TypeArray {
+ return nil
+ }
+ return v.a
+}
+
+// GetFloat64 returns float64 value by the given keys path.
+//
+// Array indexes may be represented as decimal numbers in keys.
+//
+// 0 is returned for non-existing keys path or for invalid value type.
+func (v *Value) GetFloat64(keys ...string) float64 {
+ v = v.Get(keys...)
+ if v == nil || v.Type() != TypeNumber {
+ return 0
+ }
+ return fastfloat.ParseBestEffort(v.s)
+}
+
+// GetInt returns int value by the given keys path.
+//
+// Array indexes may be represented as decimal numbers in keys.
+//
+// 0 is returned for non-existing keys path or for invalid value type.
+func (v *Value) GetInt(keys ...string) int {
+ v = v.Get(keys...)
+ if v == nil || v.Type() != TypeNumber {
+ return 0
+ }
+ n := fastfloat.ParseInt64BestEffort(v.s)
+ nn := int(n)
+ if int64(nn) != n {
+ return 0
+ }
+ return nn
+}
+
+// GetUint returns uint value by the given keys path.
+//
+// Array indexes may be represented as decimal numbers in keys.
+//
+// 0 is returned for non-existing keys path or for invalid value type.
+func (v *Value) GetUint(keys ...string) uint {
+ v = v.Get(keys...)
+ if v == nil || v.Type() != TypeNumber {
+ return 0
+ }
+ n := fastfloat.ParseUint64BestEffort(v.s)
+ nn := uint(n)
+ if uint64(nn) != n {
+ return 0
+ }
+ return nn
+}
+
+// GetInt64 returns int64 value by the given keys path.
+//
+// Array indexes may be represented as decimal numbers in keys.
+//
+// 0 is returned for non-existing keys path or for invalid value type.
+func (v *Value) GetInt64(keys ...string) int64 {
+ v = v.Get(keys...)
+ if v == nil || v.Type() != TypeNumber {
+ return 0
+ }
+ return fastfloat.ParseInt64BestEffort(v.s)
+}
+
+// GetUint64 returns uint64 value by the given keys path.
+//
+// Array indexes may be represented as decimal numbers in keys.
+//
+// 0 is returned for non-existing keys path or for invalid value type.
+func (v *Value) GetUint64(keys ...string) uint64 {
+ v = v.Get(keys...)
+ if v == nil || v.Type() != TypeNumber {
+ return 0
+ }
+ return fastfloat.ParseUint64BestEffort(v.s)
+}
+
+// GetStringBytes returns string value by the given keys path.
+//
+// Array indexes may be represented as decimal numbers in keys.
+//
+// nil is returned for non-existing keys path or for invalid value type.
+//
+// The returned string is valid until Parse is called on the Parser returned v.
+func (v *Value) GetStringBytes(keys ...string) []byte {
+ v = v.Get(keys...)
+ if v == nil || v.Type() != TypeString {
+ return nil
+ }
+ return s2b(v.s)
+}
+
+// GetBool returns bool value by the given keys path.
+//
+// Array indexes may be represented as decimal numbers in keys.
+//
+// false is returned for non-existing keys path or for invalid value type.
+func (v *Value) GetBool(keys ...string) bool {
+ v = v.Get(keys...)
+ if v != nil && v.t == TypeTrue {
+ return true
+ }
+ return false
+}
+
+// Object returns the underlying JSON object for the v.
+//
+// The returned object is valid until Parse is called on the Parser returned v.
+//
+// Use GetObject if you don't need error handling.
+func (v *Value) Object() (*Object, error) {
+ if v.t != TypeObject {
+ return nil, fmt.Errorf("value doesn't contain object; it contains %s", v.Type())
+ }
+ return &v.o, nil
+}
+
+// Array returns the underlying JSON array for the v.
+//
+// The returned array is valid until Parse is called on the Parser returned v.
+//
+// Use GetArray if you don't need error handling.
+func (v *Value) Array() ([]*Value, error) {
+ if v.t != TypeArray {
+ return nil, fmt.Errorf("value doesn't contain array; it contains %s", v.Type())
+ }
+ return v.a, nil
+}
+
+// StringBytes returns the underlying JSON string for the v.
+//
+// The returned string is valid until Parse is called on the Parser returned v.
+//
+// Use GetStringBytes if you don't need error handling.
+func (v *Value) StringBytes() ([]byte, error) {
+ if v.Type() != TypeString {
+ return nil, fmt.Errorf("value doesn't contain string; it contains %s", v.Type())
+ }
+ return s2b(v.s), nil
+}
+
+// Float64 returns the underlying JSON number for the v.
+//
+// Use GetFloat64 if you don't need error handling.
+func (v *Value) Float64() (float64, error) {
+ if v.Type() != TypeNumber {
+ return 0, fmt.Errorf("value doesn't contain number; it contains %s", v.Type())
+ }
+ return fastfloat.Parse(v.s)
+}
+
+// Int returns the underlying JSON int for the v.
+//
+// Use GetInt if you don't need error handling.
+func (v *Value) Int() (int, error) {
+ if v.Type() != TypeNumber {
+ return 0, fmt.Errorf("value doesn't contain number; it contains %s", v.Type())
+ }
+ n, err := fastfloat.ParseInt64(v.s)
+ if err != nil {
+ return 0, err
+ }
+ nn := int(n)
+ if int64(nn) != n {
+ return 0, fmt.Errorf("number %q doesn't fit int", v.s)
+ }
+ return nn, nil
+}
+
+// Uint returns the underlying JSON uint for the v.
+//
+// Use GetInt if you don't need error handling.
+func (v *Value) Uint() (uint, error) {
+ if v.Type() != TypeNumber {
+ return 0, fmt.Errorf("value doesn't contain number; it contains %s", v.Type())
+ }
+ n, err := fastfloat.ParseUint64(v.s)
+ if err != nil {
+ return 0, err
+ }
+ nn := uint(n)
+ if uint64(nn) != n {
+ return 0, fmt.Errorf("number %q doesn't fit uint", v.s)
+ }
+ return nn, nil
+}
+
+// Int64 returns the underlying JSON int64 for the v.
+//
+// Use GetInt64 if you don't need error handling.
+func (v *Value) Int64() (int64, error) {
+ if v.Type() != TypeNumber {
+ return 0, fmt.Errorf("value doesn't contain number; it contains %s", v.Type())
+ }
+ return fastfloat.ParseInt64(v.s)
+}
+
+// Uint64 returns the underlying JSON uint64 for the v.
+//
+// Use GetInt64 if you don't need error handling.
+func (v *Value) Uint64() (uint64, error) {
+ if v.Type() != TypeNumber {
+ return 0, fmt.Errorf("value doesn't contain number; it contains %s", v.Type())
+ }
+ return fastfloat.ParseUint64(v.s)
+}
+
+// Bool returns the underlying JSON bool for the v.
+//
+// Use GetBool if you don't need error handling.
+func (v *Value) Bool() (bool, error) {
+ if v.t == TypeTrue {
+ return true, nil
+ }
+ if v.t == TypeFalse {
+ return false, nil
+ }
+ return false, fmt.Errorf("value doesn't contain bool; it contains %s", v.Type())
+}
+
+var (
+ valueTrue = &Value{t: TypeTrue}
+ valueFalse = &Value{t: TypeFalse}
+ valueNull = &Value{t: TypeNull}
+)
diff --git a/vendor/github.com/valyala/fastjson/pool.go b/vendor/github.com/valyala/fastjson/pool.go
new file mode 100644
index 0000000000..00cfb42fa6
--- /dev/null
+++ b/vendor/github.com/valyala/fastjson/pool.go
@@ -0,0 +1,52 @@
+package fastjson
+
+import (
+ "sync"
+)
+
+// ParserPool may be used for pooling Parsers for similarly typed JSONs.
+type ParserPool struct {
+ pool sync.Pool
+}
+
+// Get returns a Parser from pp.
+//
+// The Parser must be Put to pp after use.
+func (pp *ParserPool) Get() *Parser {
+ v := pp.pool.Get()
+ if v == nil {
+ return &Parser{}
+ }
+ return v.(*Parser)
+}
+
+// Put returns p to pp.
+//
+// p and objects recursively returned from p cannot be used after p
+// is put into pp.
+func (pp *ParserPool) Put(p *Parser) {
+ pp.pool.Put(p)
+}
+
+// ArenaPool may be used for pooling Arenas for similarly typed JSONs.
+type ArenaPool struct {
+ pool sync.Pool
+}
+
+// Get returns an Arena from ap.
+//
+// The Arena must be Put to ap after use.
+func (ap *ArenaPool) Get() *Arena {
+ v := ap.pool.Get()
+ if v == nil {
+ return &Arena{}
+ }
+ return v.(*Arena)
+}
+
+// Put returns a to ap.
+//
+// a and objects created by a cannot be used after a is put into ap.
+func (ap *ArenaPool) Put(a *Arena) {
+ ap.pool.Put(a)
+}
diff --git a/vendor/github.com/valyala/fastjson/scanner.go b/vendor/github.com/valyala/fastjson/scanner.go
new file mode 100644
index 0000000000..89b38816f0
--- /dev/null
+++ b/vendor/github.com/valyala/fastjson/scanner.go
@@ -0,0 +1,94 @@
+package fastjson
+
+import (
+ "errors"
+)
+
+// Scanner scans a series of JSON values. Values may be delimited by whitespace.
+//
+// Scanner may parse JSON lines ( http://jsonlines.org/ ).
+//
+// Scanner may be re-used for subsequent parsing.
+//
+// Scanner cannot be used from concurrent goroutines.
+//
+// Use Parser for parsing only a single JSON value.
+type Scanner struct {
+ // b contains a working copy of json value passed to Init.
+ b []byte
+
+ // s points to the next JSON value to parse.
+ s string
+
+ // err contains the last error.
+ err error
+
+ // v contains the last parsed JSON value.
+ v *Value
+
+ // c is used for caching JSON values.
+ c cache
+}
+
+// Init initializes sc with the given s.
+//
+// s may contain multiple JSON values, which may be delimited by whitespace.
+func (sc *Scanner) Init(s string) {
+ sc.b = append(sc.b[:0], s...)
+ sc.s = b2s(sc.b)
+ sc.err = nil
+ sc.v = nil
+}
+
+// InitBytes initializes sc with the given b.
+//
+// b may contain multiple JSON values, which may be delimited by whitespace.
+func (sc *Scanner) InitBytes(b []byte) {
+ sc.Init(b2s(b))
+}
+
+// Next parses the next JSON value from s passed to Init.
+//
+// Returns true on success. The parsed value is available via Value call.
+//
+// Returns false either on error or on the end of s.
+// Call Error in order to determine the cause of the returned false.
+func (sc *Scanner) Next() bool {
+ if sc.err != nil {
+ return false
+ }
+
+ sc.s = skipWS(sc.s)
+ if len(sc.s) == 0 {
+ sc.err = errEOF
+ return false
+ }
+
+ sc.c.reset()
+ v, tail, err := parseValue(sc.s, &sc.c, 0)
+ if err != nil {
+ sc.err = err
+ return false
+ }
+
+ sc.s = tail
+ sc.v = v
+ return true
+}
+
+// Error returns the last error.
+func (sc *Scanner) Error() error {
+ if sc.err == errEOF {
+ return nil
+ }
+ return sc.err
+}
+
+// Value returns the last parsed value.
+//
+// The value is valid until the Next call.
+func (sc *Scanner) Value() *Value {
+ return sc.v
+}
+
+var errEOF = errors.New("end of s")
diff --git a/vendor/github.com/valyala/fastjson/update.go b/vendor/github.com/valyala/fastjson/update.go
new file mode 100644
index 0000000000..f8099bdbb9
--- /dev/null
+++ b/vendor/github.com/valyala/fastjson/update.go
@@ -0,0 +1,110 @@
+package fastjson
+
+import (
+ "strconv"
+ "strings"
+)
+
+// Del deletes the entry with the given key from o.
+func (o *Object) Del(key string) {
+ if o == nil {
+ return
+ }
+ if !o.keysUnescaped && strings.IndexByte(key, '\\') < 0 {
+ // Fast path - try searching for the key without object keys unescaping.
+ for i, kv := range o.kvs {
+ if kv.k == key {
+ o.kvs = append(o.kvs[:i], o.kvs[i+1:]...)
+ return
+ }
+ }
+ }
+
+ // Slow path - unescape object keys before item search.
+ o.unescapeKeys()
+
+ for i, kv := range o.kvs {
+ if kv.k == key {
+ o.kvs = append(o.kvs[:i], o.kvs[i+1:]...)
+ return
+ }
+ }
+}
+
+// Del deletes the entry with the given key from array or object v.
+func (v *Value) Del(key string) {
+ if v == nil {
+ return
+ }
+ if v.t == TypeObject {
+ v.o.Del(key)
+ return
+ }
+ if v.t == TypeArray {
+ n, err := strconv.Atoi(key)
+ if err != nil || n < 0 || n >= len(v.a) {
+ return
+ }
+ v.a = append(v.a[:n], v.a[n+1:]...)
+ }
+}
+
+// Set sets (key, value) entry in the o.
+//
+// The value must be unchanged during o lifetime.
+func (o *Object) Set(key string, value *Value) {
+ if o == nil {
+ return
+ }
+ if value == nil {
+ value = valueNull
+ }
+ o.unescapeKeys()
+
+ // Try substituting already existing entry with the given key.
+ for i := range o.kvs {
+ kv := &o.kvs[i]
+ if kv.k == key {
+ kv.v = value
+ return
+ }
+ }
+
+ // Add new entry.
+ kv := o.getKV()
+ kv.k = key
+ kv.v = value
+}
+
+// Set sets (key, value) entry in the array or object v.
+//
+// The value must be unchanged during v lifetime.
+func (v *Value) Set(key string, value *Value) {
+ if v == nil {
+ return
+ }
+ if v.t == TypeObject {
+ v.o.Set(key, value)
+ return
+ }
+ if v.t == TypeArray {
+ idx, err := strconv.Atoi(key)
+ if err != nil || idx < 0 {
+ return
+ }
+ v.SetArrayItem(idx, value)
+ }
+}
+
+// SetArrayItem sets the value in the array v at idx position.
+//
+// The value must be unchanged during v lifetime.
+func (v *Value) SetArrayItem(idx int, value *Value) {
+ if v == nil || v.t != TypeArray {
+ return
+ }
+ for idx >= len(v.a) {
+ v.a = append(v.a, valueNull)
+ }
+ v.a[idx] = value
+}
diff --git a/vendor/github.com/valyala/fastjson/util.go b/vendor/github.com/valyala/fastjson/util.go
new file mode 100644
index 0000000000..03a53965a2
--- /dev/null
+++ b/vendor/github.com/valyala/fastjson/util.go
@@ -0,0 +1,30 @@
+package fastjson
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+func b2s(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+func s2b(s string) (b []byte) {
+ strh := (*reflect.StringHeader)(unsafe.Pointer(&s))
+ sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ sh.Data = strh.Data
+ sh.Len = strh.Len
+ sh.Cap = strh.Len
+ return b
+}
+
+const maxStartEndStringLen = 80
+
+func startEndString(s string) string {
+ if len(s) <= maxStartEndStringLen {
+ return s
+ }
+ start := s[:40]
+ end := s[len(s)-40:]
+ return start + "..." + end
+}
diff --git a/vendor/github.com/valyala/fastjson/validate.go b/vendor/github.com/valyala/fastjson/validate.go
new file mode 100644
index 0000000000..196f1c3dc6
--- /dev/null
+++ b/vendor/github.com/valyala/fastjson/validate.go
@@ -0,0 +1,308 @@
+package fastjson
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Validate validates JSON s.
+func Validate(s string) error {
+ s = skipWS(s)
+
+ tail, err := validateValue(s)
+ if err != nil {
+ return fmt.Errorf("cannot parse JSON: %s; unparsed tail: %q", err, startEndString(tail))
+ }
+ tail = skipWS(tail)
+ if len(tail) > 0 {
+ return fmt.Errorf("unexpected tail: %q", startEndString(tail))
+ }
+ return nil
+}
+
+// ValidateBytes validates JSON b.
+func ValidateBytes(b []byte) error {
+ return Validate(b2s(b))
+}
+
+func validateValue(s string) (string, error) {
+ if len(s) == 0 {
+ return s, fmt.Errorf("cannot parse empty string")
+ }
+
+ if s[0] == '{' {
+ tail, err := validateObject(s[1:])
+ if err != nil {
+ return tail, fmt.Errorf("cannot parse object: %s", err)
+ }
+ return tail, nil
+ }
+ if s[0] == '[' {
+ tail, err := validateArray(s[1:])
+ if err != nil {
+ return tail, fmt.Errorf("cannot parse array: %s", err)
+ }
+ return tail, nil
+ }
+ if s[0] == '"' {
+ sv, tail, err := validateString(s[1:])
+ if err != nil {
+ return tail, fmt.Errorf("cannot parse string: %s", err)
+ }
+ // Scan the string for control chars.
+ for i := 0; i < len(sv); i++ {
+ if sv[i] < 0x20 {
+ return tail, fmt.Errorf("string cannot contain control char 0x%02X", sv[i])
+ }
+ }
+ return tail, nil
+ }
+ if s[0] == 't' {
+ if len(s) < len("true") || s[:len("true")] != "true" {
+ return s, fmt.Errorf("unexpected value found: %q", s)
+ }
+ return s[len("true"):], nil
+ }
+ if s[0] == 'f' {
+ if len(s) < len("false") || s[:len("false")] != "false" {
+ return s, fmt.Errorf("unexpected value found: %q", s)
+ }
+ return s[len("false"):], nil
+ }
+ if s[0] == 'n' {
+ if len(s) < len("null") || s[:len("null")] != "null" {
+ return s, fmt.Errorf("unexpected value found: %q", s)
+ }
+ return s[len("null"):], nil
+ }
+
+ tail, err := validateNumber(s)
+ if err != nil {
+ return tail, fmt.Errorf("cannot parse number: %s", err)
+ }
+ return tail, nil
+}
+
+func validateArray(s string) (string, error) {
+ s = skipWS(s)
+ if len(s) == 0 {
+ return s, fmt.Errorf("missing ']'")
+ }
+ if s[0] == ']' {
+ return s[1:], nil
+ }
+
+ for {
+ var err error
+
+ s = skipWS(s)
+ s, err = validateValue(s)
+ if err != nil {
+ return s, fmt.Errorf("cannot parse array value: %s", err)
+ }
+
+ s = skipWS(s)
+ if len(s) == 0 {
+ return s, fmt.Errorf("unexpected end of array")
+ }
+ if s[0] == ',' {
+ s = s[1:]
+ continue
+ }
+ if s[0] == ']' {
+ s = s[1:]
+ return s, nil
+ }
+ return s, fmt.Errorf("missing ',' after array value")
+ }
+}
+
+func validateObject(s string) (string, error) {
+ s = skipWS(s)
+ if len(s) == 0 {
+ return s, fmt.Errorf("missing '}'")
+ }
+ if s[0] == '}' {
+ return s[1:], nil
+ }
+
+ for {
+ var err error
+
+ // Parse key.
+ s = skipWS(s)
+ if len(s) == 0 || s[0] != '"' {
+ return s, fmt.Errorf(`cannot find opening '"" for object key`)
+ }
+
+ var key string
+ key, s, err = validateKey(s[1:])
+ if err != nil {
+ return s, fmt.Errorf("cannot parse object key: %s", err)
+ }
+ // Scan the key for control chars.
+ for i := 0; i < len(key); i++ {
+ if key[i] < 0x20 {
+ return s, fmt.Errorf("object key cannot contain control char 0x%02X", key[i])
+ }
+ }
+ s = skipWS(s)
+ if len(s) == 0 || s[0] != ':' {
+ return s, fmt.Errorf("missing ':' after object key")
+ }
+ s = s[1:]
+
+ // Parse value
+ s = skipWS(s)
+ s, err = validateValue(s)
+ if err != nil {
+ return s, fmt.Errorf("cannot parse object value: %s", err)
+ }
+ s = skipWS(s)
+ if len(s) == 0 {
+ return s, fmt.Errorf("unexpected end of object")
+ }
+ if s[0] == ',' {
+ s = s[1:]
+ continue
+ }
+ if s[0] == '}' {
+ return s[1:], nil
+ }
+ return s, fmt.Errorf("missing ',' after object value")
+ }
+}
+
+// validateKey is similar to validateString, but is optimized
+// for typical object keys, which are quite small and have no escape sequences.
+func validateKey(s string) (string, string, error) {
+ for i := 0; i < len(s); i++ {
+ if s[i] == '"' {
+ // Fast path - the key doesn't contain escape sequences.
+ return s[:i], s[i+1:], nil
+ }
+ if s[i] == '\\' {
+ // Slow path - the key contains escape sequences.
+ return validateString(s)
+ }
+ }
+ return "", s, fmt.Errorf(`missing closing '"'`)
+}
+
+func validateString(s string) (string, string, error) {
+ // Try fast path - a string without escape sequences.
+ if n := strings.IndexByte(s, '"'); n >= 0 && strings.IndexByte(s[:n], '\\') < 0 {
+ return s[:n], s[n+1:], nil
+ }
+
+ // Slow path - escape sequences are present.
+ rs, tail, err := parseRawString(s)
+ if err != nil {
+ return rs, tail, err
+ }
+ for {
+ n := strings.IndexByte(rs, '\\')
+ if n < 0 {
+ return rs, tail, nil
+ }
+ n++
+ if n >= len(rs) {
+ return rs, tail, fmt.Errorf("BUG: parseRawString returned invalid string with trailing backslash: %q", rs)
+ }
+ ch := rs[n]
+ rs = rs[n+1:]
+ switch ch {
+ case '"', '\\', '/', 'b', 'f', 'n', 'r', 't':
+ // Valid escape sequences - see http://json.org/
+ break
+ case 'u':
+ if len(rs) < 4 {
+ return rs, tail, fmt.Errorf(`too short escape sequence: \u%s`, rs)
+ }
+ xs := rs[:4]
+ _, err := strconv.ParseUint(xs, 16, 16)
+ if err != nil {
+ return rs, tail, fmt.Errorf(`invalid escape sequence \u%s: %s`, xs, err)
+ }
+ rs = rs[4:]
+ default:
+ return rs, tail, fmt.Errorf(`unknown escape sequence \%c`, ch)
+ }
+ }
+}
+
+func validateNumber(s string) (string, error) {
+ if len(s) == 0 {
+ return s, fmt.Errorf("zero-length number")
+ }
+ if s[0] == '-' {
+ s = s[1:]
+ if len(s) == 0 {
+ return s, fmt.Errorf("missing number after minus")
+ }
+ }
+ i := 0
+ for i < len(s) {
+ if s[i] < '0' || s[i] > '9' {
+ break
+ }
+ i++
+ }
+ if i <= 0 {
+ return s, fmt.Errorf("expecting 0..9 digit, got %c", s[0])
+ }
+ if s[0] == '0' && i != 1 {
+ return s, fmt.Errorf("unexpected number starting from 0")
+ }
+ if i >= len(s) {
+ return "", nil
+ }
+ if s[i] == '.' {
+ // Validate fractional part
+ s = s[i+1:]
+ if len(s) == 0 {
+ return s, fmt.Errorf("missing fractional part")
+ }
+ i = 0
+ for i < len(s) {
+ if s[i] < '0' || s[i] > '9' {
+ break
+ }
+ i++
+ }
+ if i == 0 {
+ return s, fmt.Errorf("expecting 0..9 digit in fractional part, got %c", s[0])
+ }
+ if i >= len(s) {
+ return "", nil
+ }
+ }
+ if s[i] == 'e' || s[i] == 'E' {
+ // Validate exponent part
+ s = s[i+1:]
+ if len(s) == 0 {
+ return s, fmt.Errorf("missing exponent part")
+ }
+ if s[0] == '-' || s[0] == '+' {
+ s = s[1:]
+ if len(s) == 0 {
+ return s, fmt.Errorf("missing exponent part")
+ }
+ }
+ i = 0
+ for i < len(s) {
+ if s[i] < '0' || s[i] > '9' {
+ break
+ }
+ i++
+ }
+ if i == 0 {
+ return s, fmt.Errorf("expecting 0..9 digit in exponent part, got %c", s[0])
+ }
+ if i >= len(s) {
+ return "", nil
+ }
+ }
+ return s[i:], nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/LICENSE b/vendor/github.com/vektah/gqlparser/v2/LICENSE
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/LICENSE
rename to vendor/github.com/vektah/gqlparser/v2/LICENSE
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/argmap.go b/vendor/github.com/vektah/gqlparser/v2/ast/argmap.go
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/argmap.go
rename to vendor/github.com/vektah/gqlparser/v2/ast/argmap.go
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/collections.go b/vendor/github.com/vektah/gqlparser/v2/ast/collections.go
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/collections.go
rename to vendor/github.com/vektah/gqlparser/v2/ast/collections.go
diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/comment.go b/vendor/github.com/vektah/gqlparser/v2/ast/comment.go
new file mode 100644
index 0000000000..8fcfda5813
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/ast/comment.go
@@ -0,0 +1,31 @@
+package ast
+
+import (
+ "strconv"
+ "strings"
+)
+
+type Comment struct {
+ Value string
+ Position *Position
+}
+
+func (c *Comment) Text() string {
+ return strings.TrimPrefix(c.Value, "#")
+}
+
+type CommentGroup struct {
+ List []*Comment
+}
+
+func (c *CommentGroup) Dump() string {
+ if len(c.List) == 0 {
+ return ""
+ }
+ var builder strings.Builder
+ for _, comment := range c.List {
+ builder.WriteString(comment.Value)
+ builder.WriteString("\n")
+ }
+ return strconv.Quote(builder.String())
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/decode.go b/vendor/github.com/vektah/gqlparser/v2/ast/decode.go
similarity index 99%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/decode.go
rename to vendor/github.com/vektah/gqlparser/v2/ast/decode.go
index d00920554c..c9966b2440 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/decode.go
+++ b/vendor/github.com/vektah/gqlparser/v2/ast/decode.go
@@ -11,7 +11,7 @@ func UnmarshalSelectionSet(b []byte) (SelectionSet, error) {
return nil, err
}
- var result = make([]Selection, 0)
+ result := make([]Selection, 0)
for _, item := range tmp {
var field Field
if err := json.Unmarshal(item, &field); err == nil {
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/definition.go b/vendor/github.com/vektah/gqlparser/v2/ast/definition.go
similarity index 77%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/definition.go
rename to vendor/github.com/vektah/gqlparser/v2/ast/definition.go
index d203908168..9ceebf1bee 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/definition.go
+++ b/vendor/github.com/vektah/gqlparser/v2/ast/definition.go
@@ -29,8 +29,12 @@ type Definition struct {
Types []string // union
EnumValues EnumValueList // enum
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
BuiltIn bool `dump:"-"`
+
+ BeforeDescriptionComment *CommentGroup
+ AfterDescriptionComment *CommentGroup
+ EndOfDefinitionComment *CommentGroup
}
func (d *Definition) IsLeafType() bool {
@@ -65,7 +69,10 @@ type FieldDefinition struct {
DefaultValue *Value // only for input objects
Type *Type
Directives DirectiveList
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
+
+ BeforeDescriptionComment *CommentGroup
+ AfterDescriptionComment *CommentGroup
}
type ArgumentDefinition struct {
@@ -74,14 +81,20 @@ type ArgumentDefinition struct {
DefaultValue *Value
Type *Type
Directives DirectiveList
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
+
+ BeforeDescriptionComment *CommentGroup
+ AfterDescriptionComment *CommentGroup
}
type EnumValueDefinition struct {
Description string
Name string
Directives DirectiveList
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
+
+ BeforeDescriptionComment *CommentGroup
+ AfterDescriptionComment *CommentGroup
}
type DirectiveDefinition struct {
@@ -90,5 +103,8 @@ type DirectiveDefinition struct {
Arguments ArgumentDefinitionList
Locations []DirectiveLocation
IsRepeatable bool
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
+
+ BeforeDescriptionComment *CommentGroup
+ AfterDescriptionComment *CommentGroup
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/directive.go b/vendor/github.com/vektah/gqlparser/v2/ast/directive.go
similarity index 97%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/directive.go
rename to vendor/github.com/vektah/gqlparser/v2/ast/directive.go
index 5f6e8531f5..b11867c2e4 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/directive.go
+++ b/vendor/github.com/vektah/gqlparser/v2/ast/directive.go
@@ -30,7 +30,7 @@ const (
type Directive struct {
Name string
Arguments ArgumentList
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
// Requires validation
ParentDefinition *Definition
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/document.go b/vendor/github.com/vektah/gqlparser/v2/ast/document.go
similarity index 78%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/document.go
rename to vendor/github.com/vektah/gqlparser/v2/ast/document.go
index 43bfb54ff5..e2520ffb7c 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/document.go
+++ b/vendor/github.com/vektah/gqlparser/v2/ast/document.go
@@ -3,7 +3,8 @@ package ast
type QueryDocument struct {
Operations OperationList
Fragments FragmentDefinitionList
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
+ Comment *CommentGroup
}
type SchemaDocument struct {
@@ -12,7 +13,8 @@ type SchemaDocument struct {
Directives DirectiveDefinitionList
Definitions DefinitionList
Extensions DefinitionList
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
+ Comment *CommentGroup
}
func (d *SchemaDocument) Merge(other *SchemaDocument) {
@@ -24,9 +26,10 @@ func (d *SchemaDocument) Merge(other *SchemaDocument) {
}
type Schema struct {
- Query *Definition
- Mutation *Definition
- Subscription *Definition
+ Query *Definition
+ Mutation *Definition
+ Subscription *Definition
+ SchemaDirectives DirectiveList
Types map[string]*Definition
Directives map[string]*DirectiveDefinition
@@ -35,6 +38,8 @@ type Schema struct {
Implements map[string][]*Definition
Description string
+
+ Comment *CommentGroup
}
// AddTypes is the helper to add types definition to the schema
@@ -69,11 +74,16 @@ type SchemaDefinition struct {
Description string
Directives DirectiveList
OperationTypes OperationTypeDefinitionList
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
+
+ BeforeDescriptionComment *CommentGroup
+ AfterDescriptionComment *CommentGroup
+ EndOfDefinitionComment *CommentGroup
}
type OperationTypeDefinition struct {
Operation Operation
Type string
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
+ Comment *CommentGroup
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/dumper.go b/vendor/github.com/vektah/gqlparser/v2/ast/dumper.go
similarity index 92%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/dumper.go
rename to vendor/github.com/vektah/gqlparser/v2/ast/dumper.go
index dbb7a7efaf..e9ea88a12a 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/dumper.go
+++ b/vendor/github.com/vektah/gqlparser/v2/ast/dumper.go
@@ -40,13 +40,13 @@ func (d *dumper) dump(v reflect.Value) {
d.WriteString("false")
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- d.WriteString(fmt.Sprintf("%d", v.Int()))
+ fmt.Fprintf(d, "%d", v.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- d.WriteString(fmt.Sprintf("%d", v.Uint()))
+ fmt.Fprintf(d, "%d", v.Uint())
case reflect.Float32, reflect.Float64:
- d.WriteString(fmt.Sprintf("%.2f", v.Float()))
+ fmt.Fprintf(d, "%.2f", v.Float())
case reflect.String:
if v.Type().Name() != "string" {
@@ -70,11 +70,11 @@ func (d *dumper) dump(v reflect.Value) {
}
func (d *dumper) writeIndent() {
- d.Buffer.WriteString(strings.Repeat(" ", d.indent))
+ d.WriteString(strings.Repeat(" ", d.indent))
}
func (d *dumper) nl() {
- d.Buffer.WriteByte('\n')
+ d.WriteByte('\n')
d.writeIndent()
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/fragment.go b/vendor/github.com/vektah/gqlparser/v2/ast/fragment.go
similarity index 78%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/fragment.go
rename to vendor/github.com/vektah/gqlparser/v2/ast/fragment.go
index 57ab56c7c6..05805e1085 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/fragment.go
+++ b/vendor/github.com/vektah/gqlparser/v2/ast/fragment.go
@@ -8,7 +8,8 @@ type FragmentSpread struct {
ObjectDefinition *Definition
Definition *FragmentDefinition
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
+ Comment *CommentGroup
}
type InlineFragment struct {
@@ -19,7 +20,8 @@ type InlineFragment struct {
// Require validation
ObjectDefinition *Definition
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
+ Comment *CommentGroup
}
type FragmentDefinition struct {
@@ -34,5 +36,6 @@ type FragmentDefinition struct {
// Require validation
Definition *Definition
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
+ Comment *CommentGroup
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/operation.go b/vendor/github.com/vektah/gqlparser/v2/ast/operation.go
similarity index 78%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/operation.go
rename to vendor/github.com/vektah/gqlparser/v2/ast/operation.go
index 3b37f81bf3..2efed025ba 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/operation.go
+++ b/vendor/github.com/vektah/gqlparser/v2/ast/operation.go
@@ -14,7 +14,8 @@ type OperationDefinition struct {
VariableDefinitions VariableDefinitionList
Directives DirectiveList
SelectionSet SelectionSet
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
+ Comment *CommentGroup
}
type VariableDefinition struct {
@@ -22,7 +23,8 @@ type VariableDefinition struct {
Type *Type
DefaultValue *Value
Directives DirectiveList
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
+ Comment *CommentGroup
// Requires validation
Definition *Definition
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/path.go b/vendor/github.com/vektah/gqlparser/v2/ast/path.go
similarity index 91%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/path.go
rename to vendor/github.com/vektah/gqlparser/v2/ast/path.go
index be1a9e4edb..f40aa953dd 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/path.go
+++ b/vendor/github.com/vektah/gqlparser/v2/ast/path.go
@@ -14,10 +14,15 @@ type PathElement interface {
isPathElement()
}
-var _ PathElement = PathIndex(0)
-var _ PathElement = PathName("")
+var (
+ _ PathElement = PathIndex(0)
+ _ PathElement = PathName("")
+)
func (path Path) String() string {
+ if path == nil {
+ return ""
+ }
var str bytes.Buffer
for i, v := range path {
switch v := v.(type) {
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/selection.go b/vendor/github.com/vektah/gqlparser/v2/ast/selection.go
similarity index 62%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/selection.go
rename to vendor/github.com/vektah/gqlparser/v2/ast/selection.go
index 5ef26c6ab3..1858dc2136 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/selection.go
+++ b/vendor/github.com/vektah/gqlparser/v2/ast/selection.go
@@ -11,9 +11,9 @@ func (*Field) isSelection() {}
func (*FragmentSpread) isSelection() {}
func (*InlineFragment) isSelection() {}
-func (s *Field) GetPosition() *Position { return s.Position }
+func (f *Field) GetPosition() *Position { return f.Position }
func (s *FragmentSpread) GetPosition() *Position { return s.Position }
-func (s *InlineFragment) GetPosition() *Position { return s.Position }
+func (f *InlineFragment) GetPosition() *Position { return f.Position }
type Field struct {
Alias string
@@ -21,7 +21,8 @@ type Field struct {
Arguments ArgumentList
Directives DirectiveList
SelectionSet SelectionSet
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
+ Comment *CommentGroup
// Require validation
Definition *FieldDefinition
@@ -31,9 +32,10 @@ type Field struct {
type Argument struct {
Name string
Value *Value
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
+ Comment *CommentGroup
}
-func (s *Field) ArgumentMap(vars map[string]interface{}) map[string]interface{} {
- return arg2map(s.Definition.Arguments, s.Arguments, vars)
+func (f *Field) ArgumentMap(vars map[string]interface{}) map[string]interface{} {
+ return arg2map(f.Definition.Arguments, f.Arguments, vars)
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/source.go b/vendor/github.com/vektah/gqlparser/v2/ast/source.go
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/source.go
rename to vendor/github.com/vektah/gqlparser/v2/ast/source.go
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/type.go b/vendor/github.com/vektah/gqlparser/v2/ast/type.go
similarity index 96%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/type.go
rename to vendor/github.com/vektah/gqlparser/v2/ast/type.go
index 5f77bc7ce4..669f1da57e 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/type.go
+++ b/vendor/github.com/vektah/gqlparser/v2/ast/type.go
@@ -20,7 +20,7 @@ type Type struct {
NamedType string
Elem *Type
NonNull bool
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
}
func (t *Type) Name() string {
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/value.go b/vendor/github.com/vektah/gqlparser/v2/ast/value.go
similarity index 94%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/value.go
rename to vendor/github.com/vektah/gqlparser/v2/ast/value.go
index c25ef15059..45fa8016b5 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/value.go
+++ b/vendor/github.com/vektah/gqlparser/v2/ast/value.go
@@ -25,7 +25,8 @@ type Value struct {
Raw string
Children ChildValueList
Kind ValueKind
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
+ Comment *CommentGroup
// Require validation
Definition *Definition
@@ -36,7 +37,8 @@ type Value struct {
type ChildValue struct {
Name string
Value *Value
- Position *Position `dump:"-"`
+ Position *Position `dump:"-" json:"-"`
+ Comment *CommentGroup
}
func (v *Value) Value(vars map[string]interface{}) (interface{}, error) {
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/gqlerror/error.go b/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go
similarity index 74%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/gqlerror/error.go
rename to vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go
index 58d1c1bd6c..d9f2028871 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/gqlerror/error.go
+++ b/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go
@@ -1,17 +1,17 @@
package gqlerror
import (
- "bytes"
"errors"
"fmt"
"strconv"
+ "strings"
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
+ "github.com/vektah/gqlparser/v2/ast"
)
-// Error is the standard graphql error type described in https://facebook.github.io/graphql/draft/#sec-Errors
+// Error is the standard graphql error type described in https://spec.graphql.org/draft/#sec-Errors
type Error struct {
- err error `json:"-"`
+ Err error `json:"-"`
Message string `json:"message"`
Path ast.Path `json:"path,omitempty"`
Locations []Location `json:"locations,omitempty"`
@@ -38,7 +38,7 @@ type Location struct {
type List []*Error
func (err *Error) Error() string {
- var res bytes.Buffer
+ var res strings.Builder
if err == nil {
return ""
}
@@ -66,16 +66,23 @@ func (err *Error) Error() string {
return res.String()
}
-func (err Error) pathString() string {
+func (err *Error) pathString() string {
return err.Path.String()
}
-func (err Error) Unwrap() error {
- return err.err
+func (err *Error) Unwrap() error {
+ return err.Err
+}
+
+func (err *Error) AsError() error {
+ if err == nil {
+ return nil
+ }
+ return err
}
func (errs List) Error() string {
- var buf bytes.Buffer
+ var buf strings.Builder
for _, err := range errs {
buf.WriteString(err.Error())
buf.WriteByte('\n')
@@ -101,14 +108,48 @@ func (errs List) As(target interface{}) bool {
return false
}
+func (errs List) Unwrap() []error {
+ l := make([]error, len(errs))
+ for i, err := range errs {
+ l[i] = err
+ }
+ return l
+}
+
func WrapPath(path ast.Path, err error) *Error {
+ if err == nil {
+ return nil
+ }
return &Error{
- err: err,
+ Err: err,
Message: err.Error(),
Path: path,
}
}
+func Wrap(err error) *Error {
+ if err == nil {
+ return nil
+ }
+ return &Error{
+ Err: err,
+ Message: err.Error(),
+ }
+}
+
+func WrapIfUnwrapped(err error) *Error {
+ if err == nil {
+ return nil
+ }
+ if gqlErr, ok := err.(*Error); ok {
+ return gqlErr
+ }
+ return &Error{
+ Err: err,
+ Message: err.Error(),
+ }
+}
+
func Errorf(message string, args ...interface{}) *Error {
return &Error{
Message: fmt.Sprintf(message, args...),
@@ -123,6 +164,15 @@ func ErrorPathf(path ast.Path, message string, args ...interface{}) *Error {
}
func ErrorPosf(pos *ast.Position, message string, args ...interface{}) *Error {
+ if pos == nil {
+ return ErrorLocf(
+ "",
+ -1,
+ -1,
+ message,
+ args...,
+ )
+ }
return ErrorLocf(
pos.Src.Name,
pos.Line,
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/blockstring.go b/vendor/github.com/vektah/gqlparser/v2/lexer/blockstring.go
similarity index 100%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/blockstring.go
rename to vendor/github.com/vektah/gqlparser/v2/lexer/blockstring.go
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/lexer.go b/vendor/github.com/vektah/gqlparser/v2/lexer/lexer.go
similarity index 91%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/lexer.go
rename to vendor/github.com/vektah/gqlparser/v2/lexer/lexer.go
index f25555e650..1cbb4a0308 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/lexer.go
+++ b/vendor/github.com/vektah/gqlparser/v2/lexer/lexer.go
@@ -4,8 +4,8 @@ import (
"bytes"
"unicode/utf8"
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
- "github.com/open-policy-agent/opa/internal/gqlparser/gqlerror"
+ "github.com/vektah/gqlparser/v2/ast"
+ "github.com/vektah/gqlparser/v2/gqlerror"
)
// Lexer turns graphql request and schema strings into tokens
@@ -55,7 +55,7 @@ func (s *Lexer) makeValueToken(kind Type, value string) (Token, error) {
}, nil
}
-func (s *Lexer) makeError(format string, args ...interface{}) (Token, error) {
+func (s *Lexer) makeError(format string, args ...interface{}) (Token, *gqlerror.Error) {
column := s.endRunes - s.lineStartRunes + 1
return Token{
Kind: Invalid,
@@ -66,7 +66,7 @@ func (s *Lexer) makeError(format string, args ...interface{}) (Token, error) {
Column: column,
Src: s.Source,
},
- }, gqlerror.ErrorLocf(s.Source.Name, s.line, column, format, args...)
+ }, gqlerror.ErrorLocf(s.Name, s.line, column, format, args...)
}
// ReadToken gets the next token from the source starting at the given position.
@@ -74,8 +74,7 @@ func (s *Lexer) makeError(format string, args ...interface{}) (Token, error) {
// This skips over whitespace and comments until it finds the next lexable
// token, then lexes punctuators immediately or calls the appropriate helper
// function for more complicated tokens.
-func (s *Lexer) ReadToken() (token Token, err error) {
-
+func (s *Lexer) ReadToken() (Token, error) {
s.ws()
s.start = s.end
s.startRunes = s.endRunes
@@ -121,10 +120,7 @@ func (s *Lexer) ReadToken() (token Token, err error) {
case '|':
return s.makeValueToken(Pipe, "")
case '#':
- if comment, err := s.readComment(); err != nil {
- return comment, err
- }
- return s.ReadToken()
+ return s.readComment()
case '_', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
return s.readName()
@@ -258,7 +254,6 @@ func (s *Lexer) readNumber() (Token, error) {
return s.makeToken(Float)
}
return s.makeToken(Int)
-
}
// acceptByte if it matches any of given bytes, returning true if it found anything
@@ -321,8 +316,8 @@ func (s *Lexer) readString() (Token, error) {
}
switch r {
default:
- var char = rune(r)
- var w = 1
+ char := rune(r)
+ w := 1
// skip unicode overhead if we are in the ascii range
if r >= 127 {
@@ -426,17 +421,29 @@ func (s *Lexer) readBlockString() (Token, error) {
r := s.Input[s.end]
// Closing triple quote (""")
- if r == '"' && s.end+3 <= inputLen && s.Input[s.end:s.end+3] == `"""` {
- t, err := s.makeValueToken(BlockString, blockStringValue(buf.String()))
+ if r == '"' {
+ // Count consecutive quotes
+ quoteCount := 1
+ i := s.end + 1
+ for i < inputLen && s.Input[i] == '"' {
+ quoteCount++
+ i++
+ }
- // the token should not include the quotes in its value, but should cover them in its position
- t.Pos.Start -= 3
- t.Pos.End += 3
+ // If we have at least 3 quotes, use the last 3 as the closing quote
+ if quoteCount >= 3 {
+ // Add any extra quotes to the buffer (except the last 3)
+ for j := 0; j < quoteCount-3; j++ {
+ buf.WriteByte('"')
+ }
- // skip the close quote
- s.end += 3
- s.endRunes += 3
- return t, err
+ t, err := s.makeValueToken(BlockString, blockStringValue(buf.String()))
+ t.Pos.Start -= 3
+ t.Pos.End += 3
+ s.end += quoteCount
+ s.endRunes += quoteCount
+ return t, err
+ }
}
// SourceCharacter
@@ -444,11 +451,12 @@ func (s *Lexer) readBlockString() (Token, error) {
return s.makeError(`Invalid character within String: "\u%04d".`, r)
}
- if r == '\\' && s.end+4 <= inputLen && s.Input[s.end:s.end+4] == `\"""` {
+ switch {
+ case r == '\\' && s.end+4 <= inputLen && s.Input[s.end:s.end+4] == `\"""`:
buf.WriteString(`"""`)
s.end += 4
s.endRunes += 4
- } else if r == '\r' {
+ case r == '\r':
if s.end+1 < inputLen && s.Input[s.end+1] == '\n' {
s.end++
s.endRunes++
@@ -459,9 +467,9 @@ func (s *Lexer) readBlockString() (Token, error) {
s.endRunes++
s.line++
s.lineStartRunes = s.endRunes
- } else {
- var char = rune(r)
- var w = 1
+ default:
+ char := rune(r)
+ w := 1
// skip unicode overhead if we are in the ascii range
if r >= 127 {
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/lexer_test.yml b/vendor/github.com/vektah/gqlparser/v2/lexer/lexer_test.yml
similarity index 91%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/lexer_test.yml
rename to vendor/github.com/vektah/gqlparser/v2/lexer/lexer_test.yml
index 5c4d5f0ff5..0899f4ca9b 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/lexer_test.yml
+++ b/vendor/github.com/vektah/gqlparser/v2/lexer/lexer_test.yml
@@ -26,22 +26,38 @@ simple tokens:
column: 3
value: 'foo'
- - name: skips whitespace
- input: "\n\n foo\n\n\n"
+ - name: records line and column with comments
+ input: "\n\n\n#foo\n #bar\n foo\n"
tokens:
+ -
+ kind: COMMENT
+ start: 3
+ end: 7
+ line: 4
+ column: 0
+ value: '#foo'
+ -
+ kind: COMMENT
+ start: 10
+ end: 14
+ line: 5
+ column: 3
+ value: '#bar'
-
kind: NAME
- start: 6
- end: 9
+ start: 17
+ end: 20
+ line: 6
+ column: 3
value: 'foo'
- - name: skips comments
- input: "\n #comment\n foo#comment\n"
+ - name: skips whitespace
+ input: "\n\n foo\n\n\n"
tokens:
-
kind: NAME
- start: 18
- end: 21
+ start: 6
+ end: 9
value: 'foo'
- name: skips commas
@@ -78,6 +94,57 @@ simple tokens:
end: 1
value: a
+lexes comments:
+ - name: basic
+ input: '#simple'
+ tokens:
+ -
+ kind: COMMENT
+ start: 0
+ end: 7
+ value: '#simple'
+
+ - name: two lines
+ input: "#first\n#second"
+ tokens:
+ -
+ kind: COMMENT
+ start: 0
+ end: 6
+ value: "#first"
+ -
+ kind: COMMENT
+ start: 7
+ end: 14
+ value: "#second"
+
+ - name: whitespace
+ input: '# white space '
+ tokens:
+ -
+ kind: COMMENT
+ start: 0
+ end: 14
+ value: '# white space '
+
+ - name: not escaped
+ input: '#not escaped \n\r\b\t\f'
+ tokens:
+ -
+ kind: COMMENT
+ start: 0
+ end: 23
+ value: '#not escaped \n\r\b\t\f'
+
+ - name: slashes
+ input: '#slashes \\ \/'
+ tokens:
+ -
+ kind: COMMENT
+ start: 0
+ end: 14
+ value: '#slashes \\ \/'
+
lexes strings:
- name: basic
input: '"simple"'
@@ -674,7 +741,6 @@ lex reports useful unknown character error:
- name: question mark
input: "?"
error:
- message: 'Cannot parse the unexpected character "?".'
message: 'Cannot parse the unexpected character "?".'
locations: [{ line: 1, column: 1 }]
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/token.go b/vendor/github.com/vektah/gqlparser/v2/lexer/token.go
similarity index 97%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/token.go
rename to vendor/github.com/vektah/gqlparser/v2/lexer/token.go
index 79eefd0f4e..8985a7efb7 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/token.go
+++ b/vendor/github.com/vektah/gqlparser/v2/lexer/token.go
@@ -3,7 +3,7 @@ package lexer
import (
"strconv"
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
+ "github.com/vektah/gqlparser/v2/ast"
)
const (
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/parser.go b/vendor/github.com/vektah/gqlparser/v2/parser/parser.go
similarity index 54%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/parser.go
rename to vendor/github.com/vektah/gqlparser/v2/parser/parser.go
index c0d2b4a3b7..2aba983796 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/parser.go
+++ b/vendor/github.com/vektah/gqlparser/v2/parser/parser.go
@@ -3,9 +3,9 @@ package parser
import (
"strconv"
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
- "github.com/open-policy-agent/opa/internal/gqlparser/gqlerror"
- "github.com/open-policy-agent/opa/internal/gqlparser/lexer"
+ "github.com/vektah/gqlparser/v2/ast"
+ "github.com/vektah/gqlparser/v2/gqlerror"
+ "github.com/vektah/gqlparser/v2/lexer"
)
type parser struct {
@@ -17,6 +17,53 @@ type parser struct {
peekError error
prev lexer.Token
+
+ comment *ast.CommentGroup
+ commentConsuming bool
+
+ tokenCount int
+ maxTokenLimit int
+}
+
+func (p *parser) SetMaxTokenLimit(maxToken int) {
+ p.maxTokenLimit = maxToken
+}
+
+func (p *parser) consumeComment() (*ast.Comment, bool) {
+ if p.err != nil {
+ return nil, false
+ }
+ tok := p.peek()
+ if tok.Kind != lexer.Comment {
+ return nil, false
+ }
+ p.next()
+ return &ast.Comment{
+ Value: tok.Value,
+ Position: &tok.Pos,
+ }, true
+}
+
+func (p *parser) consumeCommentGroup() {
+ if p.err != nil {
+ return
+ }
+ if p.commentConsuming {
+ return
+ }
+ p.commentConsuming = true
+
+ var comments []*ast.Comment
+ for {
+ comment, ok := p.consumeComment()
+ if !ok {
+ break
+ }
+ comments = append(comments, comment)
+ }
+
+ p.comment = &ast.CommentGroup{List: comments}
+ p.commentConsuming = false
}
func (p *parser) peekPos() *ast.Position {
@@ -36,6 +83,9 @@ func (p *parser) peek() lexer.Token {
if !p.peeked {
p.peekToken, p.peekError = p.lexer.ReadToken()
p.peeked = true
+ if p.peekToken.Kind == lexer.Comment {
+ p.consumeCommentGroup()
+ }
}
return p.peekToken
@@ -52,33 +102,45 @@ func (p *parser) next() lexer.Token {
if p.err != nil {
return p.prev
}
+ // Increment the token count before reading the next token
+ p.tokenCount++
+ if p.maxTokenLimit != 0 && p.tokenCount > p.maxTokenLimit {
+ p.err = gqlerror.Errorf("exceeded token limit of %d", p.maxTokenLimit)
+ return p.prev
+ }
if p.peeked {
p.peeked = false
+ p.comment = nil
p.prev, p.err = p.peekToken, p.peekError
} else {
p.prev, p.err = p.lexer.ReadToken()
+ if p.prev.Kind == lexer.Comment {
+ p.consumeCommentGroup()
+ }
}
return p.prev
}
-func (p *parser) expectKeyword(value string) lexer.Token {
+func (p *parser) expectKeyword(value string) (lexer.Token, *ast.CommentGroup) {
tok := p.peek()
+ comment := p.comment
if tok.Kind == lexer.Name && tok.Value == value {
- return p.next()
+ return p.next(), comment
}
p.error(tok, "Expected %s, found %s", strconv.Quote(value), tok.String())
- return tok
+ return tok, comment
}
-func (p *parser) expect(kind lexer.Type) lexer.Token {
+func (p *parser) expect(kind lexer.Type) (lexer.Token, *ast.CommentGroup) {
tok := p.peek()
+ comment := p.comment
if tok.Kind == kind {
- return p.next()
+ return p.next(), comment
}
p.error(tok, "Expected %s, found %s", kind, tok.Kind.String())
- return tok
+ return tok, comment
}
func (p *parser) skip(kind lexer.Type) bool {
@@ -115,10 +177,10 @@ func (p *parser) many(start lexer.Type, end lexer.Type, cb func()) {
p.next()
}
-func (p *parser) some(start lexer.Type, end lexer.Type, cb func()) {
+func (p *parser) some(start lexer.Type, end lexer.Type, cb func()) *ast.CommentGroup {
hasDef := p.skip(start)
if !hasDef {
- return
+ return nil
}
called := false
@@ -129,8 +191,10 @@ func (p *parser) some(start lexer.Type, end lexer.Type, cb func()) {
if !called {
p.error(p.peek(), "expected at least one definition, found %s", p.peek().Kind.String())
- return
+ return nil
}
+ comment := p.comment
p.next()
+ return comment
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/query.go b/vendor/github.com/vektah/gqlparser/v2/parser/query.go
similarity index 85%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/query.go
rename to vendor/github.com/vektah/gqlparser/v2/parser/query.go
index 319425f587..47ac214a91 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/query.go
+++ b/vendor/github.com/vektah/gqlparser/v2/parser/query.go
@@ -1,15 +1,23 @@
package parser
import (
- "github.com/open-policy-agent/opa/internal/gqlparser/lexer"
+ "github.com/vektah/gqlparser/v2/lexer"
- //nolint:revive
- . "github.com/open-policy-agent/opa/internal/gqlparser/ast"
+ . "github.com/vektah/gqlparser/v2/ast" //nolint:staticcheck // bad, yeah
)
func ParseQuery(source *Source) (*QueryDocument, error) {
p := parser{
- lexer: lexer.New(source),
+ lexer: lexer.New(source),
+ maxTokenLimit: 0, // 0 means unlimited
+ }
+ return p.parseQueryDocument(), p.err
+}
+
+func ParseQueryWithTokenLimit(source *Source, maxTokenLimit int) (*QueryDocument, error) {
+ p := parser{
+ lexer: lexer.New(source),
+ maxTokenLimit: maxTokenLimit,
}
return p.parseQueryDocument(), p.err
}
@@ -45,6 +53,7 @@ func (p *parser) parseOperationDefinition() *OperationDefinition {
if p.peek().Kind == lexer.BraceL {
return &OperationDefinition{
Position: p.peekPos(),
+ Comment: p.comment,
Operation: Query,
SelectionSet: p.parseRequiredSelectionSet(),
}
@@ -52,6 +61,7 @@ func (p *parser) parseOperationDefinition() *OperationDefinition {
var od OperationDefinition
od.Position = p.peekPos()
+ od.Comment = p.comment
od.Operation = p.parseOperationType()
if p.peek().Kind == lexer.Name {
@@ -81,7 +91,7 @@ func (p *parser) parseOperationType() Operation {
func (p *parser) parseVariableDefinitions() VariableDefinitionList {
var defs []*VariableDefinition
- p.many(lexer.ParenL, lexer.ParenR, func() {
+ p.some(lexer.ParenL, lexer.ParenR, func() {
defs = append(defs, p.parseVariableDefinition())
})
@@ -91,6 +101,7 @@ func (p *parser) parseVariableDefinitions() VariableDefinitionList {
func (p *parser) parseVariableDefinition() *VariableDefinition {
var def VariableDefinition
def.Position = p.peekPos()
+ def.Comment = p.comment
def.Variable = p.parseVariable()
p.expect(lexer.Colon)
@@ -117,7 +128,7 @@ func (p *parser) parseOptionalSelectionSet() SelectionSet {
selections = append(selections, p.parseSelection())
})
- return SelectionSet(selections)
+ return selections
}
func (p *parser) parseRequiredSelectionSet() SelectionSet {
@@ -131,7 +142,7 @@ func (p *parser) parseRequiredSelectionSet() SelectionSet {
selections = append(selections, p.parseSelection())
})
- return SelectionSet(selections)
+ return selections
}
func (p *parser) parseSelection() Selection {
@@ -144,6 +155,7 @@ func (p *parser) parseSelection() Selection {
func (p *parser) parseField() *Field {
var field Field
field.Position = p.peekPos()
+ field.Comment = p.comment
field.Alias = p.parseName()
if p.skip(lexer.Colon) {
@@ -163,7 +175,7 @@ func (p *parser) parseField() *Field {
func (p *parser) parseArguments(isConst bool) ArgumentList {
var arguments ArgumentList
- p.many(lexer.ParenL, lexer.ParenR, func() {
+ p.some(lexer.ParenL, lexer.ParenR, func() {
arguments = append(arguments, p.parseArgument(isConst))
})
@@ -173,6 +185,7 @@ func (p *parser) parseArguments(isConst bool) ArgumentList {
func (p *parser) parseArgument(isConst bool) *Argument {
arg := Argument{}
arg.Position = p.peekPos()
+ arg.Comment = p.comment
arg.Name = p.parseName()
p.expect(lexer.Colon)
@@ -181,11 +194,12 @@ func (p *parser) parseArgument(isConst bool) *Argument {
}
func (p *parser) parseFragment() Selection {
- p.expect(lexer.Spread)
+ _, comment := p.expect(lexer.Spread)
if peek := p.peek(); peek.Kind == lexer.Name && peek.Value != "on" {
return &FragmentSpread{
Position: p.peekPos(),
+ Comment: comment,
Name: p.parseFragmentName(),
Directives: p.parseDirectives(false),
}
@@ -193,6 +207,7 @@ func (p *parser) parseFragment() Selection {
var def InlineFragment
def.Position = p.peekPos()
+ def.Comment = comment
if p.peek().Value == "on" {
p.next() // "on"
@@ -207,6 +222,7 @@ func (p *parser) parseFragment() Selection {
func (p *parser) parseFragmentDefinition() *FragmentDefinition {
var def FragmentDefinition
def.Position = p.peekPos()
+ def.Comment = p.comment
p.expectKeyword("fragment")
def.Name = p.parseFragmentName()
@@ -243,7 +259,7 @@ func (p *parser) parseValueLiteral(isConst bool) *Value {
p.unexpectedError()
return nil
}
- return &Value{Position: &token.Pos, Raw: p.parseVariable(), Kind: Variable}
+ return &Value{Position: &token.Pos, Comment: p.comment, Raw: p.parseVariable(), Kind: Variable}
case lexer.Int:
kind = IntValue
case lexer.Float:
@@ -268,32 +284,35 @@ func (p *parser) parseValueLiteral(isConst bool) *Value {
p.next()
- return &Value{Position: &token.Pos, Raw: token.Value, Kind: kind}
+ return &Value{Position: &token.Pos, Comment: p.comment, Raw: token.Value, Kind: kind}
}
func (p *parser) parseList(isConst bool) *Value {
var values ChildValueList
pos := p.peekPos()
+ comment := p.comment
p.many(lexer.BracketL, lexer.BracketR, func() {
values = append(values, &ChildValue{Value: p.parseValueLiteral(isConst)})
})
- return &Value{Children: values, Kind: ListValue, Position: pos}
+ return &Value{Children: values, Kind: ListValue, Position: pos, Comment: comment}
}
func (p *parser) parseObject(isConst bool) *Value {
var fields ChildValueList
pos := p.peekPos()
+ comment := p.comment
p.many(lexer.BraceL, lexer.BraceR, func() {
fields = append(fields, p.parseObjectField(isConst))
})
- return &Value{Children: fields, Kind: ObjectValue, Position: pos}
+ return &Value{Children: fields, Kind: ObjectValue, Position: pos, Comment: comment}
}
func (p *parser) parseObjectField(isConst bool) *ChildValue {
field := ChildValue{}
field.Position = p.peekPos()
+ field.Comment = p.comment
field.Name = p.parseName()
p.expect(lexer.Colon)
@@ -343,7 +362,7 @@ func (p *parser) parseTypeReference() *Type {
}
func (p *parser) parseName() string {
- token := p.expect(lexer.Name)
+ token, _ := p.expect(lexer.Name)
return token.Value
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/query_test.yml b/vendor/github.com/vektah/gqlparser/v2/parser/query_test.yml
similarity index 98%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/query_test.yml
rename to vendor/github.com/vektah/gqlparser/v2/parser/query_test.yml
index a46a01e718..ec0580f5fa 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/query_test.yml
+++ b/vendor/github.com/vektah/gqlparser/v2/parser/query_test.yml
@@ -436,6 +436,7 @@ large queries:
-
Alias: "id"
Name: "id"
+ Comment: "# Copyright (c) 2015-present, Facebook, Inc.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n"
-
Operation: Operation("mutation")
Name: "likeStory"
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/schema.go b/vendor/github.com/vektah/gqlparser/v2/parser/schema.go
similarity index 58%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/schema.go
rename to vendor/github.com/vektah/gqlparser/v2/parser/schema.go
index 32c293399b..804f02c9f8 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/schema.go
+++ b/vendor/github.com/vektah/gqlparser/v2/parser/schema.go
@@ -1,40 +1,72 @@
package parser
import (
- //nolint:revive
- . "github.com/open-policy-agent/opa/internal/gqlparser/ast"
- "github.com/open-policy-agent/opa/internal/gqlparser/lexer"
+ . "github.com/vektah/gqlparser/v2/ast" //nolint:staticcheck // bad, yeah
+ "github.com/vektah/gqlparser/v2/lexer"
)
+func ParseSchemas(inputs ...*Source) (*SchemaDocument, error) {
+ sd := &SchemaDocument{}
+ for _, input := range inputs {
+ inputAst, err := ParseSchema(input)
+ if err != nil {
+ return nil, err
+ }
+ sd.Merge(inputAst)
+ }
+ return sd, nil
+}
+
func ParseSchema(source *Source) (*SchemaDocument, error) {
p := parser{
- lexer: lexer.New(source),
+ lexer: lexer.New(source),
+ maxTokenLimit: 0, // default value is unlimited
}
- ast, err := p.parseSchemaDocument(), p.err
+ sd, err := p.parseSchemaDocument(), p.err
if err != nil {
return nil, err
}
- for _, def := range ast.Definitions {
+ for _, def := range sd.Definitions {
def.BuiltIn = source.BuiltIn
}
- for _, def := range ast.Extensions {
+ for _, def := range sd.Extensions {
def.BuiltIn = source.BuiltIn
}
- return ast, nil
+ return sd, nil
}
-func ParseSchemas(inputs ...*Source) (*SchemaDocument, error) {
- ast := &SchemaDocument{}
+func ParseSchemasWithLimit(maxTokenLimit int, inputs ...*Source) (*SchemaDocument, error) {
+ sd := &SchemaDocument{}
for _, input := range inputs {
- inputAst, err := ParseSchema(input)
+ inputAst, err := ParseSchemaWithLimit(input, maxTokenLimit)
if err != nil {
return nil, err
}
- ast.Merge(inputAst)
+ sd.Merge(inputAst)
}
- return ast, nil
+ return sd, nil
+}
+
+func ParseSchemaWithLimit(source *Source, maxTokenLimit int) (*SchemaDocument, error) {
+ p := parser{
+ lexer: lexer.New(source),
+ maxTokenLimit: maxTokenLimit, // 0 is unlimited
+ }
+ sd, err := p.parseSchemaDocument(), p.err
+ if err != nil {
+ return nil, err
+ }
+
+ for _, def := range sd.Definitions {
+ def.BuiltIn = source.BuiltIn
+ }
+ for _, def := range sd.Extensions {
+ def.BuiltIn = source.BuiltIn
+ }
+
+ return sd, nil
}
func (p *parser) parseSchemaDocument() *SchemaDocument {
@@ -45,7 +77,7 @@ func (p *parser) parseSchemaDocument() *SchemaDocument {
return nil
}
- var description string
+ var description descriptionWithComment
if p.peek().Kind == lexer.BlockString || p.peek().Kind == lexer.String {
description = p.parseDescription()
}
@@ -63,7 +95,7 @@ func (p *parser) parseSchemaDocument() *SchemaDocument {
case "directive":
doc.Directives = append(doc.Directives, p.parseDirectiveDefinition(description))
case "extend":
- if description != "" {
+ if description.text != "" {
p.unexpectedToken(p.prev)
}
p.parseTypeSystemExtension(&doc)
@@ -73,20 +105,26 @@ func (p *parser) parseSchemaDocument() *SchemaDocument {
}
}
+ // treat end of file comments
+ doc.Comment = p.comment
+
return &doc
}
-func (p *parser) parseDescription() string {
+func (p *parser) parseDescription() descriptionWithComment {
token := p.peek()
+ var desc descriptionWithComment
if token.Kind != lexer.BlockString && token.Kind != lexer.String {
- return ""
+ return desc
}
- return p.next().Value
+ desc.comment = p.comment
+ desc.text = p.next().Value
+ return desc
}
-func (p *parser) parseTypeSystemDefinition(description string) *Definition {
+func (p *parser) parseTypeSystemDefinition(description descriptionWithComment) *Definition {
tok := p.peek()
if tok.Kind != lexer.Name {
p.unexpectedError()
@@ -112,15 +150,17 @@ func (p *parser) parseTypeSystemDefinition(description string) *Definition {
}
}
-func (p *parser) parseSchemaDefinition(description string) *SchemaDefinition {
- p.expectKeyword("schema")
+func (p *parser) parseSchemaDefinition(description descriptionWithComment) *SchemaDefinition {
+ _, comment := p.expectKeyword("schema")
- def := SchemaDefinition{Description: description}
+ def := SchemaDefinition{}
def.Position = p.peekPos()
- def.Description = description
+ def.BeforeDescriptionComment = description.comment
+ def.Description = description.text
+ def.AfterDescriptionComment = comment
def.Directives = p.parseDirectives(true)
- p.some(lexer.BraceL, lexer.BraceR, func() {
+ def.EndOfDefinitionComment = p.some(lexer.BraceL, lexer.BraceR, func() {
def.OperationTypes = append(def.OperationTypes, p.parseOperationTypeDefinition())
})
return &def
@@ -129,35 +169,40 @@ func (p *parser) parseSchemaDefinition(description string) *SchemaDefinition {
func (p *parser) parseOperationTypeDefinition() *OperationTypeDefinition {
var op OperationTypeDefinition
op.Position = p.peekPos()
+ op.Comment = p.comment
op.Operation = p.parseOperationType()
p.expect(lexer.Colon)
op.Type = p.parseName()
return &op
}
-func (p *parser) parseScalarTypeDefinition(description string) *Definition {
- p.expectKeyword("scalar")
+func (p *parser) parseScalarTypeDefinition(description descriptionWithComment) *Definition {
+ _, comment := p.expectKeyword("scalar")
var def Definition
def.Position = p.peekPos()
+ def.BeforeDescriptionComment = description.comment
+ def.Description = description.text
+ def.AfterDescriptionComment = comment
def.Kind = Scalar
- def.Description = description
def.Name = p.parseName()
def.Directives = p.parseDirectives(true)
return &def
}
-func (p *parser) parseObjectTypeDefinition(description string) *Definition {
- p.expectKeyword("type")
+func (p *parser) parseObjectTypeDefinition(description descriptionWithComment) *Definition {
+ _, comment := p.expectKeyword("type")
var def Definition
def.Position = p.peekPos()
def.Kind = Object
- def.Description = description
+ def.BeforeDescriptionComment = description.comment
+ def.Description = description.text
+ def.AfterDescriptionComment = comment
def.Name = p.parseName()
def.Interfaces = p.parseImplementsInterfaces()
def.Directives = p.parseDirectives(true)
- def.Fields = p.parseFieldsDefinition()
+ def.Fields, def.EndOfDefinitionComment = p.parseFieldsDefinition()
return &def
}
@@ -176,18 +221,26 @@ func (p *parser) parseImplementsInterfaces() []string {
return types
}
-func (p *parser) parseFieldsDefinition() FieldList {
+func (p *parser) parseFieldsDefinition() (FieldList, *CommentGroup) {
var defs FieldList
- p.some(lexer.BraceL, lexer.BraceR, func() {
+ comment := p.some(lexer.BraceL, lexer.BraceR, func() {
defs = append(defs, p.parseFieldDefinition())
})
- return defs
+ return defs, comment
}
func (p *parser) parseFieldDefinition() *FieldDefinition {
var def FieldDefinition
def.Position = p.peekPos()
- def.Description = p.parseDescription()
+
+ desc := p.parseDescription()
+ if desc.text != "" {
+ def.BeforeDescriptionComment = desc.comment
+ def.Description = desc.text
+ }
+
+ p.peek() // peek to set p.comment
+ def.AfterDescriptionComment = p.comment
def.Name = p.parseName()
def.Arguments = p.parseArgumentDefs()
p.expect(lexer.Colon)
@@ -208,7 +261,15 @@ func (p *parser) parseArgumentDefs() ArgumentDefinitionList {
func (p *parser) parseArgumentDef() *ArgumentDefinition {
var def ArgumentDefinition
def.Position = p.peekPos()
- def.Description = p.parseDescription()
+
+ desc := p.parseDescription()
+ if desc.text != "" {
+ def.BeforeDescriptionComment = desc.comment
+ def.Description = desc.text
+ }
+
+ p.peek() // peek to set p.comment
+ def.AfterDescriptionComment = p.comment
def.Name = p.parseName()
p.expect(lexer.Colon)
def.Type = p.parseTypeReference()
@@ -222,7 +283,15 @@ func (p *parser) parseArgumentDef() *ArgumentDefinition {
func (p *parser) parseInputValueDef() *FieldDefinition {
var def FieldDefinition
def.Position = p.peekPos()
- def.Description = p.parseDescription()
+
+ desc := p.parseDescription()
+ if desc.text != "" {
+ def.BeforeDescriptionComment = desc.comment
+ def.Description = desc.text
+ }
+
+ p.peek() // peek to set p.comment
+ def.AfterDescriptionComment = p.comment
def.Name = p.parseName()
p.expect(lexer.Colon)
def.Type = p.parseTypeReference()
@@ -233,27 +302,31 @@ func (p *parser) parseInputValueDef() *FieldDefinition {
return &def
}
-func (p *parser) parseInterfaceTypeDefinition(description string) *Definition {
- p.expectKeyword("interface")
+func (p *parser) parseInterfaceTypeDefinition(description descriptionWithComment) *Definition {
+ _, comment := p.expectKeyword("interface")
var def Definition
def.Position = p.peekPos()
def.Kind = Interface
- def.Description = description
+ def.BeforeDescriptionComment = description.comment
+ def.Description = description.text
+ def.AfterDescriptionComment = comment
def.Name = p.parseName()
def.Interfaces = p.parseImplementsInterfaces()
def.Directives = p.parseDirectives(true)
- def.Fields = p.parseFieldsDefinition()
+ def.Fields, def.EndOfDefinitionComment = p.parseFieldsDefinition()
return &def
}
-func (p *parser) parseUnionTypeDefinition(description string) *Definition {
- p.expectKeyword("union")
+func (p *parser) parseUnionTypeDefinition(description descriptionWithComment) *Definition {
+ _, comment := p.expectKeyword("union")
var def Definition
def.Position = p.peekPos()
def.Kind = Union
- def.Description = description
+ def.BeforeDescriptionComment = description.comment
+ def.Description = description.text
+ def.AfterDescriptionComment = comment
def.Name = p.parseName()
def.Directives = p.parseDirectives(true)
def.Types = p.parseUnionMemberTypes()
@@ -274,87 +347,101 @@ func (p *parser) parseUnionMemberTypes() []string {
return types
}
-func (p *parser) parseEnumTypeDefinition(description string) *Definition {
- p.expectKeyword("enum")
+func (p *parser) parseEnumTypeDefinition(description descriptionWithComment) *Definition {
+ _, comment := p.expectKeyword("enum")
var def Definition
def.Position = p.peekPos()
def.Kind = Enum
- def.Description = description
+ def.BeforeDescriptionComment = description.comment
+ def.Description = description.text
+ def.AfterDescriptionComment = comment
def.Name = p.parseName()
def.Directives = p.parseDirectives(true)
- def.EnumValues = p.parseEnumValuesDefinition()
+ def.EnumValues, def.EndOfDefinitionComment = p.parseEnumValuesDefinition()
return &def
}
-func (p *parser) parseEnumValuesDefinition() EnumValueList {
+func (p *parser) parseEnumValuesDefinition() (EnumValueList, *CommentGroup) {
var values EnumValueList
- p.some(lexer.BraceL, lexer.BraceR, func() {
+ comment := p.some(lexer.BraceL, lexer.BraceR, func() {
values = append(values, p.parseEnumValueDefinition())
})
- return values
+ return values, comment
}
func (p *parser) parseEnumValueDefinition() *EnumValueDefinition {
- return &EnumValueDefinition{
- Position: p.peekPos(),
- Description: p.parseDescription(),
- Name: p.parseName(),
- Directives: p.parseDirectives(true),
+ var def EnumValueDefinition
+ def.Position = p.peekPos()
+ desc := p.parseDescription()
+ if desc.text != "" {
+ def.BeforeDescriptionComment = desc.comment
+ def.Description = desc.text
}
+
+ p.peek() // peek to set p.comment
+ def.AfterDescriptionComment = p.comment
+
+ def.Name = p.parseName()
+ def.Directives = p.parseDirectives(true)
+
+ return &def
}
-func (p *parser) parseInputObjectTypeDefinition(description string) *Definition {
- p.expectKeyword("input")
+func (p *parser) parseInputObjectTypeDefinition(description descriptionWithComment) *Definition {
+ _, comment := p.expectKeyword("input")
var def Definition
def.Position = p.peekPos()
def.Kind = InputObject
- def.Description = description
+ def.BeforeDescriptionComment = description.comment
+ def.Description = description.text
+ def.AfterDescriptionComment = comment
def.Name = p.parseName()
def.Directives = p.parseDirectives(true)
- def.Fields = p.parseInputFieldsDefinition()
+ def.Fields, def.EndOfDefinitionComment = p.parseInputFieldsDefinition()
return &def
}
-func (p *parser) parseInputFieldsDefinition() FieldList {
+func (p *parser) parseInputFieldsDefinition() (FieldList, *CommentGroup) {
var values FieldList
- p.some(lexer.BraceL, lexer.BraceR, func() {
+ comment := p.some(lexer.BraceL, lexer.BraceR, func() {
values = append(values, p.parseInputValueDef())
})
- return values
+ return values, comment
}
func (p *parser) parseTypeSystemExtension(doc *SchemaDocument) {
- p.expectKeyword("extend")
+ _, comment := p.expectKeyword("extend")
switch p.peek().Value {
case "schema":
- doc.SchemaExtension = append(doc.SchemaExtension, p.parseSchemaExtension())
+ doc.SchemaExtension = append(doc.SchemaExtension, p.parseSchemaExtension(comment))
case "scalar":
- doc.Extensions = append(doc.Extensions, p.parseScalarTypeExtension())
+ doc.Extensions = append(doc.Extensions, p.parseScalarTypeExtension(comment))
case "type":
- doc.Extensions = append(doc.Extensions, p.parseObjectTypeExtension())
+ doc.Extensions = append(doc.Extensions, p.parseObjectTypeExtension(comment))
case "interface":
- doc.Extensions = append(doc.Extensions, p.parseInterfaceTypeExtension())
+ doc.Extensions = append(doc.Extensions, p.parseInterfaceTypeExtension(comment))
case "union":
- doc.Extensions = append(doc.Extensions, p.parseUnionTypeExtension())
+ doc.Extensions = append(doc.Extensions, p.parseUnionTypeExtension(comment))
case "enum":
- doc.Extensions = append(doc.Extensions, p.parseEnumTypeExtension())
+ doc.Extensions = append(doc.Extensions, p.parseEnumTypeExtension(comment))
case "input":
- doc.Extensions = append(doc.Extensions, p.parseInputObjectTypeExtension())
+ doc.Extensions = append(doc.Extensions, p.parseInputObjectTypeExtension(comment))
default:
p.unexpectedError()
}
}
-func (p *parser) parseSchemaExtension() *SchemaDefinition {
+func (p *parser) parseSchemaExtension(comment *CommentGroup) *SchemaDefinition {
p.expectKeyword("schema")
var def SchemaDefinition
def.Position = p.peekPos()
+ def.AfterDescriptionComment = comment
def.Directives = p.parseDirectives(true)
- p.some(lexer.BraceL, lexer.BraceR, func() {
+ def.EndOfDefinitionComment = p.some(lexer.BraceL, lexer.BraceR, func() {
def.OperationTypes = append(def.OperationTypes, p.parseOperationTypeDefinition())
})
if len(def.Directives) == 0 && len(def.OperationTypes) == 0 {
@@ -363,11 +450,12 @@ func (p *parser) parseSchemaExtension() *SchemaDefinition {
return &def
}
-func (p *parser) parseScalarTypeExtension() *Definition {
+func (p *parser) parseScalarTypeExtension(comment *CommentGroup) *Definition {
p.expectKeyword("scalar")
var def Definition
def.Position = p.peekPos()
+ def.AfterDescriptionComment = comment
def.Kind = Scalar
def.Name = p.parseName()
def.Directives = p.parseDirectives(true)
@@ -377,42 +465,45 @@ func (p *parser) parseScalarTypeExtension() *Definition {
return &def
}
-func (p *parser) parseObjectTypeExtension() *Definition {
+func (p *parser) parseObjectTypeExtension(comment *CommentGroup) *Definition {
p.expectKeyword("type")
var def Definition
def.Position = p.peekPos()
+ def.AfterDescriptionComment = comment
def.Kind = Object
def.Name = p.parseName()
def.Interfaces = p.parseImplementsInterfaces()
def.Directives = p.parseDirectives(true)
- def.Fields = p.parseFieldsDefinition()
+ def.Fields, def.EndOfDefinitionComment = p.parseFieldsDefinition()
if len(def.Interfaces) == 0 && len(def.Directives) == 0 && len(def.Fields) == 0 {
p.unexpectedError()
}
return &def
}
-func (p *parser) parseInterfaceTypeExtension() *Definition {
+func (p *parser) parseInterfaceTypeExtension(comment *CommentGroup) *Definition {
p.expectKeyword("interface")
var def Definition
def.Position = p.peekPos()
+ def.AfterDescriptionComment = comment
def.Kind = Interface
def.Name = p.parseName()
def.Directives = p.parseDirectives(true)
- def.Fields = p.parseFieldsDefinition()
+ def.Fields, def.EndOfDefinitionComment = p.parseFieldsDefinition()
if len(def.Directives) == 0 && len(def.Fields) == 0 {
p.unexpectedError()
}
return &def
}
-func (p *parser) parseUnionTypeExtension() *Definition {
+func (p *parser) parseUnionTypeExtension(comment *CommentGroup) *Definition {
p.expectKeyword("union")
var def Definition
def.Position = p.peekPos()
+ def.AfterDescriptionComment = comment
def.Kind = Union
def.Name = p.parseName()
def.Directives = p.parseDirectives(true)
@@ -424,43 +515,47 @@ func (p *parser) parseUnionTypeExtension() *Definition {
return &def
}
-func (p *parser) parseEnumTypeExtension() *Definition {
+func (p *parser) parseEnumTypeExtension(comment *CommentGroup) *Definition {
p.expectKeyword("enum")
var def Definition
def.Position = p.peekPos()
+ def.AfterDescriptionComment = comment
def.Kind = Enum
def.Name = p.parseName()
def.Directives = p.parseDirectives(true)
- def.EnumValues = p.parseEnumValuesDefinition()
+ def.EnumValues, def.EndOfDefinitionComment = p.parseEnumValuesDefinition()
if len(def.Directives) == 0 && len(def.EnumValues) == 0 {
p.unexpectedError()
}
return &def
}
-func (p *parser) parseInputObjectTypeExtension() *Definition {
+func (p *parser) parseInputObjectTypeExtension(comment *CommentGroup) *Definition {
p.expectKeyword("input")
var def Definition
def.Position = p.peekPos()
+ def.AfterDescriptionComment = comment
def.Kind = InputObject
def.Name = p.parseName()
def.Directives = p.parseDirectives(false)
- def.Fields = p.parseInputFieldsDefinition()
+ def.Fields, def.EndOfDefinitionComment = p.parseInputFieldsDefinition()
if len(def.Directives) == 0 && len(def.Fields) == 0 {
p.unexpectedError()
}
return &def
}
-func (p *parser) parseDirectiveDefinition(description string) *DirectiveDefinition {
- p.expectKeyword("directive")
+func (p *parser) parseDirectiveDefinition(description descriptionWithComment) *DirectiveDefinition {
+ _, comment := p.expectKeyword("directive")
p.expect(lexer.At)
var def DirectiveDefinition
def.Position = p.peekPos()
- def.Description = description
+ def.BeforeDescriptionComment = description.comment
+ def.Description = description.text
+ def.AfterDescriptionComment = comment
def.Name = p.parseName()
def.Arguments = p.parseArgumentDefs()
@@ -487,7 +582,7 @@ func (p *parser) parseDirectiveLocations() []DirectiveLocation {
}
func (p *parser) parseDirectiveLocation() DirectiveLocation {
- name := p.expect(lexer.Name)
+ name, _ := p.expect(lexer.Name)
switch name.Value {
case `QUERY`:
@@ -533,3 +628,8 @@ func (p *parser) parseDirectiveLocation() DirectiveLocation {
p.unexpectedToken(name)
return ""
}
+
+type descriptionWithComment struct {
+ text string
+ comment *CommentGroup
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/schema_test.yml b/vendor/github.com/vektah/gqlparser/v2/parser/schema_test.yml
similarity index 81%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/schema_test.yml
rename to vendor/github.com/vektah/gqlparser/v2/parser/schema_test.yml
index 8b6a5d0ca3..705514a995 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/schema_test.yml
+++ b/vendor/github.com/vektah/gqlparser/v2/parser/schema_test.yml
@@ -15,6 +15,67 @@ object types:
Name: "world"
Type: String
+ - name: with comments
+ input: |
+ # Hello
+ # Hello another
+ type Hello {
+ # World
+ # World another
+ world: String
+ # end of type comments
+ }
+ # end of file comments
+ ast: |
+
+ Definitions: [Definition]
+ -
+ Kind: DefinitionKind("OBJECT")
+ Name: "Hello"
+ Fields: [FieldDefinition]
+ -
+ Name: "world"
+ Type: String
+ AfterDescriptionComment: "# World\n# World another\n"
+ AfterDescriptionComment: "# Hello\n# Hello another\n"
+ EndOfDefinitionComment: "# end of type comments\n"
+ Comment: "# end of file comments\n"
+
+ - name: with comments and description
+ input: |
+ # Hello
+ # Hello another
+ "type description"
+ # Hello after description
+ # Hello after description another
+ type Hello {
+ # World
+ # World another
+ "field description"
+ # World after description
+ # World after description another
+ world: String
+ # end of definition coments
+ # end of definition comments another
+ }
+ ast: |
+
+ Definitions: [Definition]
+ -
+ Kind: DefinitionKind("OBJECT")
+ Description: "type description"
+ Name: "Hello"
+ Fields: [FieldDefinition]
+ -
+ Description: "field description"
+ Name: "world"
+ Type: String
+ BeforeDescriptionComment: "# World\n# World another\n"
+ AfterDescriptionComment: "# World after description\n# World after description another\n"
+ BeforeDescriptionComment: "# Hello\n# Hello another\n"
+ AfterDescriptionComment: "# Hello after description\n# Hello after description another\n"
+ EndOfDefinitionComment: "# end of definition coments\n# end of definition comments another\n"
+
- name: with description
input: |
"Description"
@@ -35,6 +96,7 @@ object types:
- name: with block description
input: |
+ # Before description comment
"""
Description
"""
@@ -53,6 +115,8 @@ object types:
-
Name: "world"
Type: String
+ BeforeDescriptionComment: "# Before description comment\n"
+ AfterDescriptionComment: "# Even with comments between them\n"
- name: with field arg
input: |
type Hello {
@@ -146,8 +210,11 @@ object types:
type extensions:
- name: Object extension
input: |
+ # comment
extend type Hello {
+ # comment world
world: String
+ # end of definition comment
}
ast: |
@@ -159,6 +226,9 @@ type extensions:
-
Name: "world"
Type: String
+ AfterDescriptionComment: "# comment world\n"
+ AfterDescriptionComment: "# comment\n"
+ EndOfDefinitionComment: "# end of definition comment\n"
- name: without any fields
input: "extend type Hello implements Greeting"
@@ -277,6 +347,30 @@ schema definition:
Operation: Operation("query")
Type: "Query"
+ - name: with comments and description
+ input: |
+ # before description comment
+ "description"
+ # after description comment
+ schema {
+ # before field comment
+ query: Query
+ # after field comment
+ }
+ ast: |
+
+ Schema: [SchemaDefinition]
+ -
+ Description: "description"
+ OperationTypes: [OperationTypeDefinition]
+ -
+ Operation: Operation("query")
+ Type: "Query"
+ Comment: "# before field comment\n"
+ BeforeDescriptionComment: "# before description comment\n"
+ AfterDescriptionComment: "# after description comment\n"
+ EndOfDefinitionComment: "# after field comment\n"
+
schema extensions:
- name: simple
input: |
@@ -292,6 +386,26 @@ schema extensions:
Operation: Operation("mutation")
Type: "Mutation"
+ - name: with comment and description
+ input: |
+ # before extend comment
+ extend schema {
+ # before field comment
+ mutation: Mutation
+ # after field comment
+ }
+ ast: |
+
+ SchemaExtension: [SchemaDefinition]
+ -
+ OperationTypes: [OperationTypeDefinition]
+ -
+ Operation: Operation("mutation")
+ Type: "Mutation"
+ Comment: "# before field comment\n"
+ AfterDescriptionComment: "# before extend comment\n"
+ EndOfDefinitionComment: "# after field comment\n"
+
- name: directive only
input: "extend schema @directive"
ast: |
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/core/core.go b/vendor/github.com/vektah/gqlparser/v2/validator/core/core.go
new file mode 100644
index 0000000000..7a6295c2f9
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/core/core.go
@@ -0,0 +1,24 @@
+package core
+
+import (
+ "github.com/vektah/gqlparser/v2/gqlerror"
+)
+
+type AddErrFunc func(options ...ErrorOption)
+
+type RuleFunc func(observers *Events, addError AddErrFunc)
+
+type Rule struct {
+ Name string
+ RuleFunc RuleFunc
+}
+
+// NameSorter sorts Rules by name.
+// usage: sort.Sort(core.NameSorter(specifiedRules))
+type NameSorter []Rule
+
+func (a NameSorter) Len() int { return len(a) }
+func (a NameSorter) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a NameSorter) Less(i, j int) bool { return a[i].Name < a[j].Name }
+
+type ErrorOption func(err *gqlerror.Error)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/core/helpers.go b/vendor/github.com/vektah/gqlparser/v2/validator/core/helpers.go
new file mode 100644
index 0000000000..b395a8402b
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/core/helpers.go
@@ -0,0 +1,154 @@
+package core
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "sort"
+ "strings"
+
+ "github.com/agnivade/levenshtein"
+ "github.com/vektah/gqlparser/v2/ast"
+ "github.com/vektah/gqlparser/v2/gqlerror"
+)
+
+func Message(msg string, args ...interface{}) ErrorOption {
+ return func(err *gqlerror.Error) {
+ err.Message += fmt.Sprintf(msg, args...)
+ }
+}
+
+func At(position *ast.Position) ErrorOption {
+ return func(err *gqlerror.Error) {
+ if position == nil {
+ return
+ }
+ err.Locations = append(err.Locations, gqlerror.Location{
+ Line: position.Line,
+ Column: position.Column,
+ })
+ if position.Src.Name != "" {
+ err.SetFile(position.Src.Name)
+ }
+ }
+}
+
+func SuggestListQuoted(prefix string, typed string, suggestions []string) ErrorOption {
+ suggested := SuggestionList(typed, suggestions)
+ return func(err *gqlerror.Error) {
+ if len(suggested) > 0 {
+ err.Message += " " + prefix + " " + QuotedOrList(suggested...) + "?"
+ }
+ }
+}
+
+func SuggestListUnquoted(prefix string, typed string, suggestions []string) ErrorOption {
+ suggested := SuggestionList(typed, suggestions)
+ return func(err *gqlerror.Error) {
+ if len(suggested) > 0 {
+ err.Message += " " + prefix + " " + OrList(suggested...) + "?"
+ }
+ }
+}
+
+func Suggestf(suggestion string, args ...interface{}) ErrorOption {
+ return func(err *gqlerror.Error) {
+ err.Message += " Did you mean " + fmt.Sprintf(suggestion, args...) + "?"
+ }
+}
+
+// Given [ A, B, C ] return '"A", "B", or "C"'.
+func QuotedOrList(items ...string) string {
+ itemsQuoted := make([]string, len(items))
+ for i, item := range items {
+ itemsQuoted[i] = `"` + item + `"`
+ }
+ return OrList(itemsQuoted...)
+}
+
+// Given [ A, B, C ] return 'A, B, or C'.
+func OrList(items ...string) string {
+ var buf bytes.Buffer
+
+ if len(items) > 5 {
+ items = items[:5]
+ }
+ if len(items) == 2 {
+ buf.WriteString(items[0])
+ buf.WriteString(" or ")
+ buf.WriteString(items[1])
+ return buf.String()
+ }
+
+ for i, item := range items {
+ if i != 0 {
+ if i == len(items)-1 {
+ buf.WriteString(", or ")
+ } else {
+ buf.WriteString(", ")
+ }
+ }
+ buf.WriteString(item)
+ }
+ return buf.String()
+}
+
+// Given an invalid input string and a list of valid options, returns a filtered
+// list of valid options sorted based on their similarity with the input.
+func SuggestionList(input string, options []string) []string {
+ var results []string
+ optionsByDistance := map[string]int{}
+
+ for _, option := range options {
+ distance := lexicalDistance(input, option)
+ threshold := calcThreshold(input)
+ if distance <= threshold {
+ results = append(results, option)
+ optionsByDistance[option] = distance
+ }
+ }
+
+ sort.Slice(results, func(i, j int) bool {
+ return optionsByDistance[results[i]] < optionsByDistance[results[j]]
+ })
+ return results
+}
+
+func calcThreshold(a string) (threshold int) {
+ // the logic is copied from here
+ // https://github.com/graphql/graphql-js/blob/47bd8c8897c72d3efc17ecb1599a95cee6bac5e8/src/jsutils/suggestionList.ts#L14
+ threshold = int(math.Floor(float64(len(a))*0.4) + 1)
+
+ if threshold < 1 {
+ threshold = 1
+ }
+ return
+}
+
+// Computes the lexical distance between strings A and B.
+//
+// The "distance" between two strings is given by counting the minimum number
+// of edits needed to transform string A into string B. An edit can be an
+// insertion, deletion, or substitution of a single character, or a swap of two
+// adjacent characters.
+//
+// Includes a custom alteration from Damerau-Levenshtein to treat case changes
+// as a single edit which helps identify mis-cased values with an edit distance
+// of 1.
+//
+// This distance can be useful for detecting typos in input or sorting
+func lexicalDistance(a, b string) int {
+ if a == b {
+ return 0
+ }
+
+ a = strings.ToLower(a)
+ b = strings.ToLower(b)
+
+ // Any case change counts as a single edit
+ if a == b {
+ return 1
+ }
+
+ return levenshtein.ComputeDistance(a, b)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/walk.go b/vendor/github.com/vektah/gqlparser/v2/validator/core/walk.go
similarity index 98%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/walk.go
rename to vendor/github.com/vektah/gqlparser/v2/validator/core/walk.go
index f722871869..09a3016fd4 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/walk.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/core/walk.go
@@ -1,10 +1,10 @@
-package validator
+package core
import (
"context"
"fmt"
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
+ "github.com/vektah/gqlparser/v2/ast"
)
type Events struct {
@@ -22,27 +22,35 @@ type Events struct {
func (o *Events) OnOperation(f func(walker *Walker, operation *ast.OperationDefinition)) {
o.operationVisitor = append(o.operationVisitor, f)
}
+
func (o *Events) OnField(f func(walker *Walker, field *ast.Field)) {
o.field = append(o.field, f)
}
+
func (o *Events) OnFragment(f func(walker *Walker, fragment *ast.FragmentDefinition)) {
o.fragment = append(o.fragment, f)
}
+
func (o *Events) OnInlineFragment(f func(walker *Walker, inlineFragment *ast.InlineFragment)) {
o.inlineFragment = append(o.inlineFragment, f)
}
+
func (o *Events) OnFragmentSpread(f func(walker *Walker, fragmentSpread *ast.FragmentSpread)) {
o.fragmentSpread = append(o.fragmentSpread, f)
}
+
func (o *Events) OnDirective(f func(walker *Walker, directive *ast.Directive)) {
o.directive = append(o.directive, f)
}
+
func (o *Events) OnDirectiveList(f func(walker *Walker, directives []*ast.Directive)) {
o.directiveList = append(o.directiveList, f)
}
+
func (o *Events) OnValue(f func(walker *Walker, value *ast.Value)) {
o.value = append(o.value, f)
}
+
func (o *Events) OnVariable(f func(walker *Walker, variable *ast.VariableDefinition)) {
o.variable = append(o.variable, f)
}
@@ -277,7 +285,7 @@ func (w *Walker) walkSelection(parentDef *ast.Definition, it ast.Selection) {
w.walkDirectives(nextParentDef, it.Directives, ast.LocationFragmentSpread)
if def != nil && !w.validatedFragmentSpreads[def.Name] {
- // prevent inifinite recursion
+ // prevent infinite recursion
w.validatedFragmentSpreads[def.Name] = true
w.walkSelectionSet(nextParentDef, def.SelectionSet)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/imported/prelude.graphql b/vendor/github.com/vektah/gqlparser/v2/validator/imported/prelude.graphql
new file mode 100644
index 0000000000..8be3d2f5b6
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/imported/prelude.graphql
@@ -0,0 +1,250 @@
+# This file defines all the implicitly declared types that are required by the graphql spec. It is implicitly included by calls to LoadSchema
+
+"The `Int` scalar type represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1."
+scalar Int
+
+"The `Float` scalar type represents signed double-precision fractional values as specified by [IEEE 754](http://en.wikipedia.org/wiki/IEEE_floating_point)."
+scalar Float
+
+"The `String`scalar type represents textual data, represented as UTF-8 character sequences. The String type is most often used by GraphQL to represent free-form human-readable text."
+scalar String
+
+"The `Boolean` scalar type represents `true` or `false`."
+scalar Boolean
+
+"""The `ID` scalar type represents a unique identifier, often used to refetch an object or as key for a cache. The ID type appears in a JSON response as a String; however, it is not intended to be human-readable. When expected as an input type, any string (such as "4") or integer (such as 4) input value will be accepted as an ID."""
+scalar ID
+
+"Directs the executor to defer this fragment when the `if` argument is true or undefined."
+directive @defer(
+ "Deferred when true or undefined."
+ if: Boolean = true,
+ "Unique name"
+ label: String
+) on FRAGMENT_SPREAD | INLINE_FRAGMENT
+
+"""
+Directs the executor to include this field or fragment only when the `if` argument is true.
+"""
+directive @include(
+ """Included when true."""
+ if: Boolean!
+) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
+
+"""
+Directs the executor to skip this field or fragment when the `if` argument is true.
+"""
+directive @skip(
+ """Skipped when true."""
+ if: Boolean!
+) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
+
+"""Marks an element of a GraphQL schema as no longer supported."""
+directive @deprecated(
+ """
+ Explains why this element was deprecated, usually also including a suggestion for how to access supported similar data. Formatted using the Markdown syntax, as specified by [CommonMark](https://commonmark.org/).
+ """
+ reason: String = "No longer supported"
+) on FIELD_DEFINITION | ARGUMENT_DEFINITION | INPUT_FIELD_DEFINITION | ENUM_VALUE
+
+"""Exposes a URL that specifies the behavior of this scalar."""
+directive @specifiedBy(
+ """The URL that specifies the behavior of this scalar."""
+ url: String!
+) on SCALAR
+
+"""
+Indicates exactly one field must be supplied and this field must not be `null`.
+"""
+directive @oneOf on INPUT_OBJECT
+
+"""
+A GraphQL Schema defines the capabilities of a GraphQL server. It exposes all available types and directives on the server, as well as the entry points for query, mutation, and subscription operations.
+"""
+type __Schema {
+ description: String
+
+ """A list of all types supported by this server."""
+ types: [__Type!]!
+
+ """The type that query operations will be rooted at."""
+ queryType: __Type!
+
+ """
+ If this server supports mutation, the type that mutation operations will be rooted at.
+ """
+ mutationType: __Type
+
+ """
+ If this server support subscription, the type that subscription operations will be rooted at.
+ """
+ subscriptionType: __Type
+
+ """A list of all directives supported by this server."""
+ directives: [__Directive!]!
+}
+
+"""
+The fundamental unit of any GraphQL Schema is the type. There are many kinds of types in GraphQL as represented by the `__TypeKind` enum.
+
+Depending on the kind of a type, certain fields describe information about that type. Scalar types provide no information beyond a name, description and optional `specifiedByURL`, while Enum types provide their values. Object and Interface types provide the fields they describe. Abstract types, Union and Interface, provide the Object types possible at runtime. List and NonNull types compose other types.
+"""
+type __Type {
+ kind: __TypeKind!
+ name: String
+ description: String
+ specifiedByURL: String
+ fields(includeDeprecated: Boolean = false): [__Field!]
+ interfaces: [__Type!]
+ possibleTypes: [__Type!]
+ enumValues(includeDeprecated: Boolean = false): [__EnumValue!]
+ inputFields(includeDeprecated: Boolean = false): [__InputValue!]
+ ofType: __Type
+ isOneOf: Boolean
+}
+
+"""An enum describing what kind of type a given `__Type` is."""
+enum __TypeKind {
+ """Indicates this type is a scalar."""
+ SCALAR
+
+ """
+ Indicates this type is an object. `fields` and `interfaces` are valid fields.
+ """
+ OBJECT
+
+ """
+ Indicates this type is an interface. `fields`, `interfaces`, and `possibleTypes` are valid fields.
+ """
+ INTERFACE
+
+ """Indicates this type is a union. `possibleTypes` is a valid field."""
+ UNION
+
+ """Indicates this type is an enum. `enumValues` is a valid field."""
+ ENUM
+
+ """
+ Indicates this type is an input object. `inputFields` is a valid field.
+ """
+ INPUT_OBJECT
+
+ """Indicates this type is a list. `ofType` is a valid field."""
+ LIST
+
+ """Indicates this type is a non-null. `ofType` is a valid field."""
+ NON_NULL
+}
+
+"""
+Object and Interface types are described by a list of Fields, each of which has a name, potentially a list of arguments, and a return type.
+"""
+type __Field {
+ name: String!
+ description: String
+ args(includeDeprecated: Boolean = false): [__InputValue!]!
+ type: __Type!
+ isDeprecated: Boolean!
+ deprecationReason: String
+}
+
+"""
+Arguments provided to Fields or Directives and the input fields of an InputObject are represented as Input Values which describe their type and optionally a default value.
+"""
+type __InputValue {
+ name: String!
+ description: String
+ type: __Type!
+
+ """
+ A GraphQL-formatted string representing the default value for this input value.
+ """
+ defaultValue: String
+ isDeprecated: Boolean!
+ deprecationReason: String
+}
+
+"""
+One possible value for a given Enum. Enum values are unique values, not a placeholder for a string or numeric value. However an Enum value is returned in a JSON response as a string.
+"""
+type __EnumValue {
+ name: String!
+ description: String
+ isDeprecated: Boolean!
+ deprecationReason: String
+}
+
+"""
+A Directive provides a way to describe alternate runtime execution and type validation behavior in a GraphQL document.
+
+In some cases, you need to provide options to alter GraphQL's execution behavior in ways field arguments will not suffice, such as conditionally including or skipping a field. Directives provide this by describing additional information to the executor.
+"""
+type __Directive {
+ name: String!
+ description: String
+ isRepeatable: Boolean!
+ locations: [__DirectiveLocation!]!
+ args(includeDeprecated: Boolean = false): [__InputValue!]!
+}
+
+"""
+A Directive can be adjacent to many parts of the GraphQL language, a __DirectiveLocation describes one such possible adjacencies.
+"""
+enum __DirectiveLocation {
+ """Location adjacent to a query operation."""
+ QUERY
+
+ """Location adjacent to a mutation operation."""
+ MUTATION
+
+ """Location adjacent to a subscription operation."""
+ SUBSCRIPTION
+
+ """Location adjacent to a field."""
+ FIELD
+
+ """Location adjacent to a fragment definition."""
+ FRAGMENT_DEFINITION
+
+ """Location adjacent to a fragment spread."""
+ FRAGMENT_SPREAD
+
+ """Location adjacent to an inline fragment."""
+ INLINE_FRAGMENT
+
+ """Location adjacent to a variable definition."""
+ VARIABLE_DEFINITION
+
+ """Location adjacent to a schema definition."""
+ SCHEMA
+
+ """Location adjacent to a scalar definition."""
+ SCALAR
+
+ """Location adjacent to an object type definition."""
+ OBJECT
+
+ """Location adjacent to a field definition."""
+ FIELD_DEFINITION
+
+ """Location adjacent to an argument definition."""
+ ARGUMENT_DEFINITION
+
+ """Location adjacent to an interface definition."""
+ INTERFACE
+
+ """Location adjacent to a union definition."""
+ UNION
+
+ """Location adjacent to an enum definition."""
+ ENUM
+
+ """Location adjacent to an enum value definition."""
+ ENUM_VALUE
+
+ """Location adjacent to an input object type definition."""
+ INPUT_OBJECT
+
+ """Location adjacent to an input object field definition."""
+ INPUT_FIELD_DEFINITION
+}
\ No newline at end of file
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/prelude.go b/vendor/github.com/vektah/gqlparser/v2/validator/prelude.go
similarity index 66%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/prelude.go
rename to vendor/github.com/vektah/gqlparser/v2/validator/prelude.go
index 86796fab6c..5c88e93b3f 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/prelude.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/prelude.go
@@ -3,10 +3,10 @@ package validator
import (
_ "embed"
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
+ "github.com/vektah/gqlparser/v2/ast"
)
-//go:embed prelude.graphql
+//go:embed imported/prelude.graphql
var preludeGraphql string
var Prelude = &ast.Source{
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fields_on_correct_type.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go
similarity index 57%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fields_on_correct_type.go
rename to vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go
index d536e5e5f4..e4a67eb0cd 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fields_on_correct_type.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go
@@ -1,40 +1,54 @@
-package validator
+package rules
import (
"fmt"
"sort"
"strings"
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
+ "github.com/vektah/gqlparser/v2/ast"
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
)
-func init() {
- AddRule("FieldsOnCorrectType", func(observers *Events, addError AddErrFunc) {
- observers.OnField(func(walker *Walker, field *ast.Field) {
- if field.ObjectDefinition == nil || field.Definition != nil {
- return
- }
+func ruleFuncFieldsOnCorrectType(observers *Events, addError AddErrFunc, disableSuggestion bool) {
+ observers.OnField(func(walker *Walker, field *ast.Field) {
+ if field.ObjectDefinition == nil || field.Definition != nil {
+ return
+ }
- message := fmt.Sprintf(`Cannot query field "%s" on type "%s".`, field.Name, field.ObjectDefinition.Name)
+ message := fmt.Sprintf(`Cannot query field "%s" on type "%s".`, field.Name, field.ObjectDefinition.Name)
+ if !disableSuggestion {
if suggestedTypeNames := getSuggestedTypeNames(walker, field.ObjectDefinition, field.Name); suggestedTypeNames != nil {
message += " Did you mean to use an inline fragment on " + QuotedOrList(suggestedTypeNames...) + "?"
} else if suggestedFieldNames := getSuggestedFieldNames(field.ObjectDefinition, field.Name); suggestedFieldNames != nil {
message += " Did you mean " + QuotedOrList(suggestedFieldNames...) + "?"
}
+ }
- addError(
- Message(message),
- At(field.Position),
- )
- })
+ addError(
+ Message("%s", message),
+ At(field.Position),
+ )
})
}
-// Go through all of the implementations of type, as well as the interfaces
+var FieldsOnCorrectTypeRule = Rule{
+ Name: "FieldsOnCorrectType",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ ruleFuncFieldsOnCorrectType(observers, addError, false)
+ },
+}
+
+var FieldsOnCorrectTypeRuleWithoutSuggestions = Rule{
+ Name: "FieldsOnCorrectTypeWithoutSuggestions",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ ruleFuncFieldsOnCorrectType(observers, addError, true)
+ },
+}
+
+// Go through all the implementations of type, as well as the interfaces
// that they implement. If any of those types include the provided field,
// suggest them, sorted by how often the type is referenced, starting
// with Interfaces.
@@ -44,7 +58,7 @@ func getSuggestedTypeNames(walker *Walker, parent *ast.Definition, name string)
}
possibleTypes := walker.Schema.GetPossibleTypes(parent)
- var suggestedObjectTypes = make([]string, 0, len(possibleTypes))
+ suggestedObjectTypes := make([]string, 0, len(possibleTypes))
var suggestedInterfaceTypes []string
interfaceUsageCount := map[string]int{}
@@ -67,7 +81,7 @@ func getSuggestedTypeNames(walker *Walker, parent *ast.Definition, name string)
}
}
- suggestedTypes := append(suggestedInterfaceTypes, suggestedObjectTypes...)
+ suggestedTypes := concatSlice(suggestedInterfaceTypes, suggestedObjectTypes)
sort.SliceStable(suggestedTypes, func(i, j int) bool {
typeA, typeB := suggestedTypes[i], suggestedTypes[j]
@@ -81,6 +95,16 @@ func getSuggestedTypeNames(walker *Walker, parent *ast.Definition, name string)
return suggestedTypes
}
+// By employing a full slice expression (slice[low:high:max]),
+// where max is set to the slice’s length,
+// we ensure that appending elements results
+// in a slice backed by a distinct array.
+// This method prevents the shared array issue
+func concatSlice(first []string, second []string) []string {
+ n := len(first)
+ return append(first[:n:n], second...)
+}
+
// For the field name provided, determine if there are any similar field names
// that may be the result of a typo.
func getSuggestedFieldNames(parent *ast.Definition, name string) []string {
@@ -88,7 +112,7 @@ func getSuggestedFieldNames(parent *ast.Definition, name string) []string {
return nil
}
- var possibleFieldNames = make([]string, 0, len(parent.Fields))
+ possibleFieldNames := make([]string, 0, len(parent.Fields))
for _, field := range parent.Fields {
possibleFieldNames = append(possibleFieldNames, field.Name)
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fragments_on_composite_types.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go
similarity index 61%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fragments_on_composite_types.go
rename to vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go
index 66bd348c47..8fb2692589 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fragments_on_composite_types.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go
@@ -1,16 +1,17 @@
-package validator
+package rules
import (
"fmt"
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
+ "github.com/vektah/gqlparser/v2/ast"
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
)
-func init() {
- AddRule("FragmentsOnCompositeTypes", func(observers *Events, addError AddErrFunc) {
+var FragmentsOnCompositeTypesRule = Rule{
+ Name: "FragmentsOnCompositeTypes",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnInlineFragment(func(walker *Walker, inlineFragment *ast.InlineFragment) {
fragmentType := walker.Schema.Types[inlineFragment.TypeCondition]
if fragmentType == nil || fragmentType.IsCompositeType() {
@@ -20,12 +21,12 @@ func init() {
message := fmt.Sprintf(`Fragment cannot condition on non composite type "%s".`, inlineFragment.TypeCondition)
addError(
- Message(message),
+ Message("%s", message),
At(inlineFragment.Position),
)
})
- observers.OnFragment(func(_ *Walker, fragment *ast.FragmentDefinition) {
+ observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) {
if fragment.Definition == nil || fragment.TypeCondition == "" || fragment.Definition.IsCompositeType() {
return
}
@@ -33,9 +34,9 @@ func init() {
message := fmt.Sprintf(`Fragment "%s" cannot condition on non composite type "%s".`, fragment.Name, fragment.TypeCondition)
addError(
- Message(message),
+ Message("%s", message),
At(fragment.Position),
)
})
- })
+ },
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go
new file mode 100644
index 0000000000..4c065a715e
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go
@@ -0,0 +1,84 @@
+package rules
+
+import (
+ "github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
+)
+
+func ruleFuncKnownArgumentNames(observers *Events, addError AddErrFunc, disableSuggestion bool) {
+ // A GraphQL field is only valid if all supplied arguments are defined by that field.
+ observers.OnField(func(walker *Walker, field *ast.Field) {
+ if field.Definition == nil || field.ObjectDefinition == nil {
+ return
+ }
+ for _, arg := range field.Arguments {
+ def := field.Definition.Arguments.ForName(arg.Name)
+ if def != nil {
+ continue
+ }
+
+ if disableSuggestion {
+ addError(
+ Message(`Unknown argument "%s" on field "%s.%s".`, arg.Name, field.ObjectDefinition.Name, field.Name),
+ At(field.Position),
+ )
+ } else {
+ var suggestions []string
+ for _, argDef := range field.Definition.Arguments {
+ suggestions = append(suggestions, argDef.Name)
+ }
+ addError(
+ Message(`Unknown argument "%s" on field "%s.%s".`, arg.Name, field.ObjectDefinition.Name, field.Name),
+ SuggestListQuoted("Did you mean", arg.Name, suggestions),
+ At(field.Position),
+ )
+ }
+ }
+ })
+
+ observers.OnDirective(func(walker *Walker, directive *ast.Directive) {
+ if directive.Definition == nil {
+ return
+ }
+ for _, arg := range directive.Arguments {
+ def := directive.Definition.Arguments.ForName(arg.Name)
+ if def != nil {
+ continue
+ }
+
+ if disableSuggestion {
+ addError(
+ Message(`Unknown argument "%s" on directive "@%s".`, arg.Name, directive.Name),
+ At(directive.Position),
+ )
+ } else {
+ var suggestions []string
+ for _, argDef := range directive.Definition.Arguments {
+ suggestions = append(suggestions, argDef.Name)
+ }
+
+ addError(
+ Message(`Unknown argument "%s" on directive "@%s".`, arg.Name, directive.Name),
+ SuggestListQuoted("Did you mean", arg.Name, suggestions),
+ At(directive.Position),
+ )
+ }
+ }
+ })
+}
+
+var KnownArgumentNamesRule = Rule{
+ Name: "KnownArgumentNames",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ ruleFuncKnownArgumentNames(observers, addError, false)
+ },
+}
+
+var KnownArgumentNamesRuleWithoutSuggestions = Rule{
+ Name: "KnownArgumentNamesWithoutSuggestions",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ ruleFuncKnownArgumentNames(observers, addError, true)
+ },
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_directives.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go
similarity index 64%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_directives.go
rename to vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go
index 9855291e3b..2430d29b23 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_directives.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go
@@ -1,21 +1,22 @@
-package validator
+package rules
import (
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
+ "github.com/vektah/gqlparser/v2/ast"
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
)
-func init() {
- AddRule("KnownDirectives", func(observers *Events, addError AddErrFunc) {
+var KnownDirectivesRule = Rule{
+ Name: "KnownDirectives",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
type mayNotBeUsedDirective struct {
Name string
Line int
Column int
}
- var seen = map[mayNotBeUsedDirective]bool{}
- observers.OnDirective(func(_ *Walker, directive *ast.Directive) {
+ seen := map[mayNotBeUsedDirective]bool{}
+ observers.OnDirective(func(walker *Walker, directive *ast.Directive) {
if directive.Definition == nil {
addError(
Message(`Unknown directive "@%s".`, directive.Name),
@@ -45,5 +46,5 @@ func init() {
seen[tmp] = true
}
})
- })
+ },
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go
new file mode 100644
index 0000000000..c9b9f90d4a
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go
@@ -0,0 +1,22 @@
+package rules
+
+import (
+ "github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
+)
+
+var KnownFragmentNamesRule = Rule{
+ Name: "KnownFragmentNames",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ observers.OnFragmentSpread(func(walker *Walker, fragmentSpread *ast.FragmentSpread) {
+ if fragmentSpread.Definition == nil {
+ addError(
+ Message(`Unknown fragment "%s".`, fragmentSpread.Name),
+ At(fragmentSpread.Position),
+ )
+ }
+ })
+ },
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_root_type.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go
similarity index 72%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_root_type.go
rename to vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go
index ab97cd9017..b67da68ce7 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_root_type.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go
@@ -1,16 +1,17 @@
-package validator
+package rules
import (
"fmt"
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
+ "github.com/vektah/gqlparser/v2/ast"
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
)
-func init() {
- AddRule("KnownRootType", func(observers *Events, addError AddErrFunc) {
+var KnownRootTypeRule = Rule{
+ Name: "KnownRootType",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
// A query's root must be a valid type. Surprisingly, this isn't
// checked anywhere else!
observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) {
@@ -33,5 +34,5 @@ func init() {
At(operation.Position))
}
})
- })
+ },
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go
new file mode 100644
index 0000000000..a0f10fba75
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go
@@ -0,0 +1,80 @@
+package rules
+
+import (
+ "github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
+)
+
+func ruleFuncKnownTypeNames(observers *Events, addError AddErrFunc, disableSuggestion bool) {
+ observers.OnVariable(func(walker *Walker, variable *ast.VariableDefinition) {
+ typeName := variable.Type.Name()
+ typdef := walker.Schema.Types[typeName]
+ if typdef != nil {
+ return
+ }
+
+ addError(
+ Message(`Unknown type "%s".`, typeName),
+ At(variable.Position),
+ )
+ })
+
+ observers.OnInlineFragment(func(walker *Walker, inlineFragment *ast.InlineFragment) {
+ typedName := inlineFragment.TypeCondition
+ if typedName == "" {
+ return
+ }
+
+ def := walker.Schema.Types[typedName]
+ if def != nil {
+ return
+ }
+
+ addError(
+ Message(`Unknown type "%s".`, typedName),
+ At(inlineFragment.Position),
+ )
+ })
+
+ observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) {
+ typeName := fragment.TypeCondition
+ def := walker.Schema.Types[typeName]
+ if def != nil {
+ return
+ }
+
+ if disableSuggestion {
+ addError(
+ Message(`Unknown type "%s".`, typeName),
+ At(fragment.Position),
+ )
+ } else {
+ var possibleTypes []string
+ for _, t := range walker.Schema.Types {
+ possibleTypes = append(possibleTypes, t.Name)
+ }
+
+ addError(
+ Message(`Unknown type "%s".`, typeName),
+ SuggestListQuoted("Did you mean", typeName, possibleTypes),
+ At(fragment.Position),
+ )
+ }
+ })
+}
+
+var KnownTypeNamesRule = Rule{
+ Name: "KnownTypeNames",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ ruleFuncKnownTypeNames(observers, addError, false)
+ },
+}
+
+var KnownTypeNamesRuleWithoutSuggestions = Rule{
+ Name: "KnownTypeNamesWithoutSuggestions",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ ruleFuncKnownTypeNames(observers, addError, true)
+ },
+}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go
new file mode 100644
index 0000000000..dfa851c577
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go
@@ -0,0 +1,22 @@
+package rules
+
+import (
+ "github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
+)
+
+var LoneAnonymousOperationRule = Rule{
+ Name: "LoneAnonymousOperation",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) {
+ if operation.Name == "" && len(walker.Document.Operations) > 1 {
+ addError(
+ Message(`This anonymous operation must be the only defined operation.`),
+ At(operation.Position),
+ )
+ }
+ })
+ },
+}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/max_introspection_depth.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/max_introspection_depth.go
new file mode 100644
index 0000000000..651b23b4e3
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/max_introspection_depth.go
@@ -0,0 +1,86 @@
+package rules
+
+import (
+ "github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
+)
+
+const maxListsDepth = 3
+
+var MaxIntrospectionDepth = Rule{
+ Name: "MaxIntrospectionDepth",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ // Counts the depth of list fields in "__Type" recursively and
+ // returns `true` if the limit has been reached.
+ observers.OnField(func(walker *Walker, field *ast.Field) {
+ if field.Name == "__schema" || field.Name == "__type" {
+ visitedFragments := make(map[string]bool)
+ if checkDepthField(field, visitedFragments, 0) {
+ addError(
+ Message(`Maximum introspection depth exceeded`),
+ At(field.Position),
+ )
+ }
+ return
+ }
+ })
+ },
+}
+
+func checkDepthSelectionSet(selectionSet ast.SelectionSet, visitedFragments map[string]bool, depth int) bool {
+ for _, child := range selectionSet {
+ if field, ok := child.(*ast.Field); ok {
+ if checkDepthField(field, visitedFragments, depth) {
+ return true
+ }
+ }
+ if fragmentSpread, ok := child.(*ast.FragmentSpread); ok {
+ if checkDepthFragmentSpread(fragmentSpread, visitedFragments, depth) {
+ return true
+ }
+ }
+ if inlineFragment, ok := child.(*ast.InlineFragment); ok {
+ if checkDepthSelectionSet(inlineFragment.SelectionSet, visitedFragments, depth) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func checkDepthField(field *ast.Field, visitedFragments map[string]bool, depth int) bool {
+ if field.Name == "fields" ||
+ field.Name == "interfaces" ||
+ field.Name == "possibleTypes" ||
+ field.Name == "inputFields" {
+ depth++
+ if depth >= maxListsDepth {
+ return true
+ }
+ }
+ return checkDepthSelectionSet(field.SelectionSet, visitedFragments, depth)
+}
+
+func checkDepthFragmentSpread(fragmentSpread *ast.FragmentSpread, visitedFragments map[string]bool, depth int) bool {
+ fragmentName := fragmentSpread.Name
+ if visited, ok := visitedFragments[fragmentName]; ok && visited {
+ // Fragment cycles are handled by `NoFragmentCyclesRule`.
+ return false
+ }
+ fragment := fragmentSpread.Definition
+ if fragment == nil {
+ // Missing fragments checks are handled by `KnownFragmentNamesRule`.
+ return false
+ }
+
+ // Rather than following an immutable programming pattern which has
+ // significant memory and garbage collection overhead, we've opted to
+ // take a mutable approach for efficiency's sake. Importantly visiting a
+ // fragment twice is fine, so long as you don't do one visit inside the
+ // other.
+ visitedFragments[fragmentName] = true
+ defer delete(visitedFragments, fragmentName)
+ return checkDepthSelectionSet(fragment.SelectionSet, visitedFragments, depth)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_fragment_cycles.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go
similarity index 87%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_fragment_cycles.go
rename to vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go
index a7de611f17..fb3ac6ad3c 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_fragment_cycles.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go
@@ -1,17 +1,18 @@
-package validator
+package rules
import (
"fmt"
"strings"
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
+ "github.com/vektah/gqlparser/v2/ast"
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
)
-func init() {
- AddRule("NoFragmentCycles", func(observers *Events, addError AddErrFunc) {
+var NoFragmentCyclesRule = Rule{
+ Name: "NoFragmentCycles",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
visitedFrags := make(map[string]bool)
observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) {
@@ -67,7 +68,7 @@ func init() {
recursive(fragment)
})
- })
+ },
}
func getFragmentSpreads(node ast.SelectionSet) []*ast.FragmentSpread {
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_undefined_variables.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go
similarity index 61%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_undefined_variables.go
rename to vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go
index e45a5e3d51..562d7f19ce 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_undefined_variables.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go
@@ -1,14 +1,15 @@
-package validator
+package rules
import (
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
+ "github.com/vektah/gqlparser/v2/ast"
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
)
-func init() {
- AddRule("NoUndefinedVariables", func(observers *Events, addError AddErrFunc) {
+var NoUndefinedVariablesRule = Rule{
+ Name: "NoUndefinedVariables",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnValue(func(walker *Walker, value *ast.Value) {
if walker.CurrentOperation == nil || value.Kind != ast.Variable || value.VariableDefinition != nil {
return
@@ -26,5 +27,5 @@ func init() {
)
}
})
- })
+ },
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go
new file mode 100644
index 0000000000..6d27e11e9e
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go
@@ -0,0 +1,32 @@
+package rules
+
+import (
+ "github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
+)
+
+var NoUnusedFragmentsRule = Rule{
+ Name: "NoUnusedFragments",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ inFragmentDefinition := false
+ fragmentNameUsed := make(map[string]bool)
+
+ observers.OnFragmentSpread(func(walker *Walker, fragmentSpread *ast.FragmentSpread) {
+ if !inFragmentDefinition {
+ fragmentNameUsed[fragmentSpread.Name] = true
+ }
+ })
+
+ observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) {
+ inFragmentDefinition = true
+ if !fragmentNameUsed[fragment.Name] {
+ addError(
+ Message(`Fragment "%s" is never used.`, fragment.Name),
+ At(fragment.Position),
+ )
+ }
+ })
+ },
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_variables.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go
similarity index 52%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_variables.go
rename to vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go
index 163ac895b5..a4ce07090c 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_variables.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go
@@ -1,15 +1,16 @@
-package validator
+package rules
import (
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
+ "github.com/vektah/gqlparser/v2/ast"
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
)
-func init() {
- AddRule("NoUnusedVariables", func(observers *Events, addError AddErrFunc) {
- observers.OnOperation(func(_ *Walker, operation *ast.OperationDefinition) {
+var NoUnusedVariablesRule = Rule{
+ Name: "NoUnusedVariables",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) {
for _, varDef := range operation.VariableDefinitions {
if varDef.Used {
continue
@@ -28,5 +29,5 @@ func init() {
}
}
})
- })
+ },
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/overlapping_fields_can_be_merged.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go
similarity index 98%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/overlapping_fields_can_be_merged.go
rename to vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go
index 1e207a43e7..9e843e760b 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/overlapping_fields_can_be_merged.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go
@@ -1,19 +1,19 @@
-package validator
+package rules
import (
"bytes"
"fmt"
"reflect"
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
+ "github.com/vektah/gqlparser/v2/ast"
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
)
-func init() {
-
- AddRule("OverlappingFieldsCanBeMerged", func(observers *Events, addError AddErrFunc) {
+var OverlappingFieldsCanBeMergedRule = Rule{
+ Name: "OverlappingFieldsCanBeMerged",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
/**
* Algorithm:
*
@@ -105,7 +105,7 @@ func init() {
conflict.addFieldsConflictMessage(addError)
}
})
- })
+ },
}
type pairSet struct {
@@ -304,10 +304,8 @@ func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetweenFieldsAndFr
}
func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetweenFragments(conflicts *conflictMessageContainer, areMutuallyExclusive bool, fragmentSpreadA *ast.FragmentSpread, fragmentSpreadB *ast.FragmentSpread) {
-
var check func(fragmentSpreadA *ast.FragmentSpread, fragmentSpreadB *ast.FragmentSpread)
check = func(fragmentSpreadA *ast.FragmentSpread, fragmentSpreadB *ast.FragmentSpread) {
-
if fragmentSpreadA.Name == fragmentSpreadB.Name {
return
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/possible_fragment_spreads.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go
similarity index 84%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/possible_fragment_spreads.go
rename to vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go
index 79cb20c49c..f932ac8c2e 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/possible_fragment_spreads.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go
@@ -1,15 +1,15 @@
-package validator
+package rules
import (
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
+ "github.com/vektah/gqlparser/v2/ast"
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
)
-func init() {
- AddRule("PossibleFragmentSpreads", func(observers *Events, addError AddErrFunc) {
-
+var PossibleFragmentSpreadsRule = Rule{
+ Name: "PossibleFragmentSpreads",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
validate := func(walker *Walker, parentDef *ast.Definition, fragmentName string, emitError func()) {
if parentDef == nil {
return
@@ -66,5 +66,5 @@ func init() {
)
})
})
- })
+ },
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/provided_required_arguments.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/provided_required_arguments.go
similarity index 70%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/provided_required_arguments.go
rename to vendor/github.com/vektah/gqlparser/v2/validator/rules/provided_required_arguments.go
index d6d12c4fd2..fc1a6a476d 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/provided_required_arguments.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/provided_required_arguments.go
@@ -1,15 +1,15 @@
-package validator
+package rules
import (
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
-
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
+ "github.com/vektah/gqlparser/v2/ast"
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
)
-func init() {
- AddRule("ProvidedRequiredArguments", func(observers *Events, addError AddErrFunc) {
- observers.OnField(func(_ *Walker, field *ast.Field) {
+var ProvidedRequiredArgumentsRule = Rule{
+ Name: "ProvidedRequiredArguments",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ observers.OnField(func(walker *Walker, field *ast.Field) {
if field.Definition == nil {
return
}
@@ -35,7 +35,7 @@ func init() {
}
})
- observers.OnDirective(func(_ *Walker, directive *ast.Directive) {
+ observers.OnDirective(func(walker *Walker, directive *ast.Directive) {
if directive.Definition == nil {
return
}
@@ -60,5 +60,5 @@ func init() {
)
}
})
- })
+ },
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/rules.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/rules.go
new file mode 100644
index 0000000000..803543ed17
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/rules.go
@@ -0,0 +1,119 @@
+package rules
+
+import (
+ "slices"
+
+ "github.com/vektah/gqlparser/v2/validator/core"
+)
+
+// Rules manages GraphQL validation rules.
+type Rules struct {
+ rules map[string]core.RuleFunc
+ ruleNameKeys []string // for deterministic order
+}
+
+// NewRules creates a Rules instance with the specified rules.
+func NewRules(rs ...core.Rule) *Rules {
+ r := &Rules{
+ rules: make(map[string]core.RuleFunc),
+ }
+
+ for _, rule := range rs {
+ r.AddRule(rule.Name, rule.RuleFunc)
+ }
+
+ return r
+}
+
+// NewDefaultRules creates a Rules instance containing the default GraphQL validation rule set.
+func NewDefaultRules() *Rules {
+ rules := []core.Rule{
+ FieldsOnCorrectTypeRule,
+ FragmentsOnCompositeTypesRule,
+ KnownArgumentNamesRule,
+ KnownDirectivesRule,
+ KnownFragmentNamesRule,
+ KnownRootTypeRule,
+ KnownTypeNamesRule,
+ LoneAnonymousOperationRule,
+ MaxIntrospectionDepth,
+ NoFragmentCyclesRule,
+ NoUndefinedVariablesRule,
+ NoUnusedFragmentsRule,
+ NoUnusedVariablesRule,
+ OverlappingFieldsCanBeMergedRule,
+ PossibleFragmentSpreadsRule,
+ ProvidedRequiredArgumentsRule,
+ ScalarLeafsRule,
+ SingleFieldSubscriptionsRule,
+ UniqueArgumentNamesRule,
+ UniqueDirectivesPerLocationRule,
+ UniqueFragmentNamesRule,
+ UniqueInputFieldNamesRule,
+ UniqueOperationNamesRule,
+ UniqueVariableNamesRule,
+ ValuesOfCorrectTypeRule,
+ VariablesAreInputTypesRule,
+ VariablesInAllowedPositionRule,
+ }
+
+ r := NewRules(rules...)
+
+ return r
+}
+
+// AddRule adds a rule with the specified name and rule function to the rule set.
+// If a rule with the same name already exists, it will not be added.
+func (r *Rules) AddRule(name string, ruleFunc core.RuleFunc) {
+ if r.rules == nil {
+ r.rules = make(map[string]core.RuleFunc)
+ }
+
+ if _, exists := r.rules[name]; !exists {
+ r.rules[name] = ruleFunc
+ r.ruleNameKeys = append(r.ruleNameKeys, name)
+ }
+}
+
+// GetInner returns the internal rule map.
+// If the map is not initialized, it returns an empty map.
+func (r *Rules) GetInner() map[string]core.RuleFunc {
+ if r == nil {
+ return nil // impossible nonsense, hopefully
+ }
+ if r.rules == nil {
+ return make(map[string]core.RuleFunc)
+ }
+ return r.rules
+}
+
+// RemoveRule removes a rule with the specified name from the rule set.
+// If no rule with the specified name exists, it does nothing.
+func (r *Rules) RemoveRule(name string) {
+ if r == nil {
+ return // impossible nonsense, hopefully
+ }
+ if r.rules != nil {
+ delete(r.rules, name)
+ }
+
+ if len(r.ruleNameKeys) > 0 {
+ r.ruleNameKeys = slices.DeleteFunc(r.ruleNameKeys, func(s string) bool {
+ return s == name // delete the name rule key
+ })
+ }
+}
+
+// ReplaceRule replaces a rule with the specified name with a new rule function.
+// If no rule with the specified name exists, it does nothing.
+func (r *Rules) ReplaceRule(name string, ruleFunc core.RuleFunc) {
+ if r == nil {
+ return // impossible nonsense, hopefully
+ }
+ if r.rules == nil {
+ r.rules = make(map[string]core.RuleFunc)
+ }
+ if _, exists := r.rules[name]; exists {
+ r.rules[name] = ruleFunc
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/scalar_leafs.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go
similarity index 70%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/scalar_leafs.go
rename to vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go
index cd17b47c87..e4f210d757 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/scalar_leafs.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go
@@ -1,14 +1,15 @@
-package validator
+package rules
import (
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
+ "github.com/vektah/gqlparser/v2/ast"
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
)
-func init() {
- AddRule("ScalarLeafs", func(observers *Events, addError AddErrFunc) {
+var ScalarLeafsRule = Rule{
+ Name: "ScalarLeafs",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnField(func(walker *Walker, field *ast.Field) {
if field.Definition == nil {
return
@@ -34,5 +35,5 @@ func init() {
)
}
})
- })
+ },
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/single_field_subscriptions.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go
similarity index 85%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/single_field_subscriptions.go
rename to vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go
index 98cb984b40..feed91d5ce 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/single_field_subscriptions.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go
@@ -1,17 +1,18 @@
-package validator
+package rules
import (
"strconv"
"strings"
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
+ "github.com/vektah/gqlparser/v2/ast"
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
)
-func init() {
- AddRule("SingleFieldSubscriptions", func(observers *Events, addError AddErrFunc) {
+var SingleFieldSubscriptionsRule = Rule{
+ Name: "SingleFieldSubscriptions",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) {
if walker.Schema.Subscription == nil || operation.Operation != ast.Subscription {
return
@@ -40,7 +41,7 @@ func init() {
}
}
})
- })
+ },
}
type topField struct {
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go
new file mode 100644
index 0000000000..2ed1da2b34
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go
@@ -0,0 +1,36 @@
+package rules
+
+import (
+ "github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
+)
+
+var UniqueArgumentNamesRule = Rule{
+ Name: "UniqueArgumentNames",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ observers.OnField(func(walker *Walker, field *ast.Field) {
+ checkUniqueArgs(field.Arguments, addError)
+ })
+
+ observers.OnDirective(func(walker *Walker, directive *ast.Directive) {
+ checkUniqueArgs(directive.Arguments, addError)
+ })
+ },
+}
+
+func checkUniqueArgs(args ast.ArgumentList, addError AddErrFunc) {
+ knownArgNames := map[string]int{}
+
+ for _, arg := range args {
+ if knownArgNames[arg.Name] == 1 {
+ addError(
+ Message(`There can be only one argument named "%s".`, arg.Name),
+ At(arg.Position),
+ )
+ }
+
+ knownArgNames[arg.Name]++
+ }
+}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go
new file mode 100644
index 0000000000..0f57702814
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go
@@ -0,0 +1,27 @@
+package rules
+
+import (
+ "github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
+)
+
+var UniqueDirectivesPerLocationRule = Rule{
+ Name: "UniqueDirectivesPerLocation",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ observers.OnDirectiveList(func(walker *Walker, directives []*ast.Directive) {
+ seen := map[string]bool{}
+
+ for _, dir := range directives {
+ if dir.Name != "repeatable" && seen[dir.Name] {
+ addError(
+ Message(`The directive "@%s" can only be used once at this location.`, dir.Name),
+ At(dir.Position),
+ )
+ }
+ seen[dir.Name] = true
+ }
+ })
+ },
+}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go
new file mode 100644
index 0000000000..136b0fdb5a
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go
@@ -0,0 +1,25 @@
+package rules
+
+import (
+ "github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
+)
+
+var UniqueFragmentNamesRule = Rule{
+ Name: "UniqueFragmentNames",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ seenFragments := map[string]bool{}
+
+ observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) {
+ if seenFragments[fragment.Name] {
+ addError(
+ Message(`There can be only one fragment named "%s".`, fragment.Name),
+ At(fragment.Position),
+ )
+ }
+ seenFragments[fragment.Name] = true
+ })
+ },
+}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go
new file mode 100644
index 0000000000..41d8d667aa
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go
@@ -0,0 +1,30 @@
+package rules
+
+import (
+ "github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
+)
+
+var UniqueInputFieldNamesRule = Rule{
+ Name: "UniqueInputFieldNames",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ observers.OnValue(func(walker *Walker, value *ast.Value) {
+ if value.Kind != ast.ObjectValue {
+ return
+ }
+
+ seen := map[string]bool{}
+ for _, field := range value.Children {
+ if seen[field.Name] {
+ addError(
+ Message(`There can be only one input field named "%s".`, field.Name),
+ At(field.Position),
+ )
+ }
+ seen[field.Name] = true
+ }
+ })
+ },
+}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go
new file mode 100644
index 0000000000..ae4c54eede
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go
@@ -0,0 +1,25 @@
+package rules
+
+import (
+ "github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
+)
+
+var UniqueOperationNamesRule = Rule{
+ Name: "UniqueOperationNames",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ seen := map[string]bool{}
+
+ observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) {
+ if seen[operation.Name] {
+ addError(
+ Message(`There can be only one operation named "%s".`, operation.Name),
+ At(operation.Position),
+ )
+ }
+ seen[operation.Name] = true
+ })
+ },
+}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go
new file mode 100644
index 0000000000..4d4a6a87f7
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go
@@ -0,0 +1,27 @@
+package rules
+
+import (
+ "github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
+)
+
+var UniqueVariableNamesRule = Rule{
+ Name: "UniqueVariableNames",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) {
+ seen := map[string]int{}
+ for _, def := range operation.VariableDefinitions {
+ // add the same error only once per a variable.
+ if seen[def.Variable] == 1 {
+ addError(
+ Message(`There can be only one variable named "$%s".`, def.Variable),
+ At(def.Position),
+ )
+ }
+ seen[def.Variable]++
+ }
+ })
+ },
+}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go
new file mode 100644
index 0000000000..43c1a1bfb5
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go
@@ -0,0 +1,246 @@
+package rules
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+
+ "github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
+)
+
+func ruleFuncValuesOfCorrectType(observers *Events, addError AddErrFunc, disableSuggestion bool) {
+ observers.OnValue(func(walker *Walker, value *ast.Value) {
+ if value.Definition == nil || value.ExpectedType == nil {
+ return
+ }
+
+ if value.Kind == ast.NullValue && value.ExpectedType.NonNull {
+ addError(
+ Message(`Expected value of type "%s", found %s.`, value.ExpectedType.String(), value.String()),
+ At(value.Position),
+ )
+ }
+
+ if value.Definition.Kind == ast.Scalar {
+ // Skip custom validating scalars
+ if !value.Definition.OneOf("Int", "Float", "String", "Boolean", "ID") {
+ return
+ }
+ }
+
+ var possibleEnums []string
+ if value.Definition.Kind == ast.Enum {
+ for _, val := range value.Definition.EnumValues {
+ possibleEnums = append(possibleEnums, val.Name)
+ }
+ }
+
+ rawVal, err := value.Value(nil)
+ if err != nil {
+ unexpectedTypeMessage(addError, value)
+ }
+
+ switch value.Kind {
+ case ast.NullValue:
+ return
+ case ast.ListValue:
+ if value.ExpectedType.Elem == nil {
+ unexpectedTypeMessage(addError, value)
+ return
+ }
+
+ case ast.IntValue:
+ if !value.Definition.OneOf("Int", "Float", "ID") {
+ unexpectedTypeMessage(addError, value)
+ }
+
+ case ast.FloatValue:
+ if !value.Definition.OneOf("Float") {
+ unexpectedTypeMessage(addError, value)
+ }
+
+ case ast.StringValue, ast.BlockValue:
+ if value.Definition.Kind == ast.Enum {
+ if disableSuggestion {
+ addError(
+ Message(`Enum "%s" cannot represent non-enum value: %s.`, value.ExpectedType.String(), value.String()),
+ At(value.Position),
+ )
+ } else {
+ rawValStr := fmt.Sprint(rawVal)
+ addError(
+ Message(`Enum "%s" cannot represent non-enum value: %s.`, value.ExpectedType.String(), value.String()),
+ SuggestListQuoted("Did you mean the enum value", rawValStr, possibleEnums),
+ At(value.Position),
+ )
+ }
+ } else if !value.Definition.OneOf("String", "ID") {
+ unexpectedTypeMessage(addError, value)
+ }
+
+ case ast.EnumValue:
+ if value.Definition.Kind != ast.Enum {
+ if disableSuggestion {
+ addError(
+ unexpectedTypeMessageOnly(value),
+ At(value.Position),
+ )
+ } else {
+ rawValStr := fmt.Sprint(rawVal)
+ addError(
+ unexpectedTypeMessageOnly(value),
+ SuggestListUnquoted("Did you mean the enum value", rawValStr, possibleEnums),
+ At(value.Position),
+ )
+ }
+ } else if value.Definition.EnumValues.ForName(value.Raw) == nil {
+ if disableSuggestion {
+ addError(
+ Message(`Value "%s" does not exist in "%s" enum.`, value.String(), value.ExpectedType.String()),
+ At(value.Position),
+ )
+ } else {
+ rawValStr := fmt.Sprint(rawVal)
+ addError(
+ Message(`Value "%s" does not exist in "%s" enum.`, value.String(), value.ExpectedType.String()),
+ SuggestListQuoted("Did you mean the enum value", rawValStr, possibleEnums),
+ At(value.Position),
+ )
+ }
+ }
+
+ case ast.BooleanValue:
+ if !value.Definition.OneOf("Boolean") {
+ unexpectedTypeMessage(addError, value)
+ }
+
+ case ast.ObjectValue:
+
+ for _, field := range value.Definition.Fields {
+ if field.Type.NonNull {
+ fieldValue := value.Children.ForName(field.Name)
+ if fieldValue == nil && field.DefaultValue == nil {
+ addError(
+ Message(`Field "%s.%s" of required type "%s" was not provided.`, value.Definition.Name, field.Name, field.Type.String()),
+ At(value.Position),
+ )
+ continue
+ }
+ }
+ }
+
+ for _, directive := range value.Definition.Directives {
+ if directive.Name == "oneOf" {
+ func() {
+ if len(value.Children) != 1 {
+ addError(
+ Message(`OneOf Input Object "%s" must specify exactly one key.`, value.Definition.Name),
+ At(value.Position),
+ )
+ return
+ }
+
+ fieldValue := value.Children[0].Value
+ isNullLiteral := fieldValue == nil || fieldValue.Kind == ast.NullValue
+ if isNullLiteral {
+ addError(
+ Message(`Field "%s.%s" must be non-null.`, value.Definition.Name, value.Definition.Fields[0].Name),
+ At(fieldValue.Position),
+ )
+ return
+ }
+
+ isVariable := fieldValue.Kind == ast.Variable
+ if isVariable {
+ variableName := fieldValue.VariableDefinition.Variable
+ isNullableVariable := !fieldValue.VariableDefinition.Type.NonNull
+ if isNullableVariable {
+ addError(
+ Message(`Variable "%s" must be non-nullable to be used for OneOf Input Object "%s".`, variableName, value.Definition.Name),
+ At(fieldValue.Position),
+ )
+ }
+ }
+ }()
+ }
+ }
+
+ for _, fieldValue := range value.Children {
+ if value.Definition.Fields.ForName(fieldValue.Name) == nil {
+ if disableSuggestion {
+ addError(
+ Message(`Field "%s" is not defined by type "%s".`, fieldValue.Name, value.Definition.Name),
+ At(fieldValue.Position),
+ )
+ } else {
+ var suggestions []string
+ for _, fieldValue := range value.Definition.Fields {
+ suggestions = append(suggestions, fieldValue.Name)
+ }
+
+ addError(
+ Message(`Field "%s" is not defined by type "%s".`, fieldValue.Name, value.Definition.Name),
+ SuggestListQuoted("Did you mean", fieldValue.Name, suggestions),
+ At(fieldValue.Position),
+ )
+ }
+ }
+ }
+
+ case ast.Variable:
+ return
+
+ default:
+ panic(fmt.Errorf("unhandled %T", value))
+ }
+ })
+}
+
+var ValuesOfCorrectTypeRule = Rule{
+ Name: "ValuesOfCorrectType",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ ruleFuncValuesOfCorrectType(observers, addError, false)
+ },
+}
+
+var ValuesOfCorrectTypeRuleWithoutSuggestions = Rule{
+ Name: "ValuesOfCorrectTypeWithoutSuggestions",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ ruleFuncValuesOfCorrectType(observers, addError, true)
+ },
+}
+
+func unexpectedTypeMessage(addError AddErrFunc, v *ast.Value) {
+ addError(
+ unexpectedTypeMessageOnly(v),
+ At(v.Position),
+ )
+}
+
+func unexpectedTypeMessageOnly(v *ast.Value) ErrorOption {
+ switch v.ExpectedType.String() {
+ case "Int", "Int!":
+ if _, err := strconv.ParseInt(v.Raw, 10, 32); err != nil && errors.Is(err, strconv.ErrRange) {
+ return Message(`Int cannot represent non 32-bit signed integer value: %s`, v.String())
+ }
+ return Message(`Int cannot represent non-integer value: %s`, v.String())
+ case "String", "String!", "[String]":
+ return Message(`String cannot represent a non string value: %s`, v.String())
+ case "Boolean", "Boolean!":
+ return Message(`Boolean cannot represent a non boolean value: %s`, v.String())
+ case "Float", "Float!":
+ return Message(`Float cannot represent non numeric value: %s`, v.String())
+ case "ID", "ID!":
+ return Message(`ID cannot represent a non-string and non-integer value: %s`, v.String())
+ // case "Enum":
+ // return Message(`Enum "%s" cannot represent non-enum value: %s`, v.ExpectedType.String(), v.String())
+ default:
+ if v.Definition.Kind == ast.Enum {
+ return Message(`Enum "%s" cannot represent non-enum value: %s.`, v.ExpectedType.String(), v.String())
+ }
+ return Message(`Expected value of type "%s", found %s.`, v.ExpectedType.String(), v.String())
+ }
+}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go
new file mode 100644
index 0000000000..77f116bb50
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go
@@ -0,0 +1,31 @@
+package rules
+
+import (
+ "github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
+)
+
+var VariablesAreInputTypesRule = Rule{
+ Name: "VariablesAreInputTypes",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
+ observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) {
+ for _, def := range operation.VariableDefinitions {
+ if def.Definition == nil {
+ continue
+ }
+ if !def.Definition.IsInputType() {
+ addError(
+ Message(
+ `Variable "$%s" cannot be non-input type "%s".`,
+ def.Variable,
+ def.Type.String(),
+ ),
+ At(def.Position),
+ )
+ }
+ }
+ })
+ },
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/variables_in_allowed_position.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go
similarity index 72%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/variables_in_allowed_position.go
rename to vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go
index 08a8e18c09..b2af7e1923 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/variables_in_allowed_position.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go
@@ -1,14 +1,15 @@
-package validator
+package rules
import (
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
+ "github.com/vektah/gqlparser/v2/ast"
- //nolint:revive // Validator rules each use dot imports for convenience.
- . "github.com/open-policy-agent/opa/internal/gqlparser/validator"
+ //nolint:staticcheck // Validator rules each use dot imports for convenience.
+ . "github.com/vektah/gqlparser/v2/validator/core"
)
-func init() {
- AddRule("VariablesInAllowedPosition", func(observers *Events, addError AddErrFunc) {
+var VariablesInAllowedPositionRule = Rule{
+ Name: "VariablesInAllowedPosition",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnValue(func(walker *Walker, value *ast.Value) {
if value.Kind != ast.Variable || value.ExpectedType == nil || value.VariableDefinition == nil || walker.CurrentOperation == nil {
return
@@ -36,5 +37,5 @@ func init() {
)
}
})
- })
+ },
}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/schema.go b/vendor/github.com/vektah/gqlparser/v2/validator/schema.go
similarity index 86%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/schema.go
rename to vendor/github.com/vektah/gqlparser/v2/validator/schema.go
index c9c542195d..a8754afc2b 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/schema.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/schema.go
@@ -5,21 +5,20 @@ import (
"strconv"
"strings"
- //nolint:revive
- . "github.com/open-policy-agent/opa/internal/gqlparser/ast"
- "github.com/open-policy-agent/opa/internal/gqlparser/gqlerror"
- "github.com/open-policy-agent/opa/internal/gqlparser/parser"
+ . "github.com/vektah/gqlparser/v2/ast" //nolint:staticcheck // bad, yeah
+ "github.com/vektah/gqlparser/v2/gqlerror"
+ "github.com/vektah/gqlparser/v2/parser"
)
func LoadSchema(inputs ...*Source) (*Schema, error) {
- ast, err := parser.ParseSchemas(inputs...)
+ sd, err := parser.ParseSchemas(inputs...)
if err != nil {
- return nil, err
+ return nil, gqlerror.WrapIfUnwrapped(err)
}
- return ValidateSchemaDocument(ast)
+ return ValidateSchemaDocument(sd)
}
-func ValidateSchemaDocument(ast *SchemaDocument) (*Schema, error) {
+func ValidateSchemaDocument(sd *SchemaDocument) (*Schema, error) {
schema := Schema{
Types: map[string]*Definition{},
Directives: map[string]*DirectiveDefinition{},
@@ -27,16 +26,16 @@ func ValidateSchemaDocument(ast *SchemaDocument) (*Schema, error) {
Implements: map[string][]*Definition{},
}
- for i, def := range ast.Definitions {
+ for i, def := range sd.Definitions {
if schema.Types[def.Name] != nil {
return nil, gqlerror.ErrorPosf(def.Position, "Cannot redeclare type %s.", def.Name)
}
- schema.Types[def.Name] = ast.Definitions[i]
+ schema.Types[def.Name] = sd.Definitions[i]
}
- defs := append(DefinitionList{}, ast.Definitions...)
+ defs := append(DefinitionList{}, sd.Definitions...)
- for _, ext := range ast.Extensions {
+ for _, ext := range sd.Extensions {
def := schema.Types[ext.Name]
if def == nil {
schema.Types[ext.Name] = &Definition{
@@ -80,13 +79,13 @@ func ValidateSchemaDocument(ast *SchemaDocument) (*Schema, error) {
}
}
- for i, dir := range ast.Directives {
+ for i, dir := range sd.Directives {
if schema.Directives[dir.Name] != nil {
// While the spec says SDL must not (§3.5) explicitly define builtin
// scalars, it may (§3.13) define builtin directives. Here we check for
// that, and reject doubly-defined directives otherwise.
switch dir.Name {
- case "include", "skip", "deprecated", "specifiedBy": // the builtins
+ case "include", "skip", "deprecated", "specifiedBy", "defer", "oneOf": // the builtins
// In principle here we might want to validate that the
// directives are the same. But they might not be, if the
// server has an older spec than we do. (Plus, validating this
@@ -99,16 +98,16 @@ func ValidateSchemaDocument(ast *SchemaDocument) (*Schema, error) {
return nil, gqlerror.ErrorPosf(dir.Position, "Cannot redeclare directive %s.", dir.Name)
}
}
- schema.Directives[dir.Name] = ast.Directives[i]
+ schema.Directives[dir.Name] = sd.Directives[i]
}
- if len(ast.Schema) > 1 {
- return nil, gqlerror.ErrorPosf(ast.Schema[1].Position, "Cannot have multiple schema entry points, consider schema extensions instead.")
+ if len(sd.Schema) > 1 {
+ return nil, gqlerror.ErrorPosf(sd.Schema[1].Position, "Cannot have multiple schema entry points, consider schema extensions instead.")
}
- if len(ast.Schema) == 1 {
- schema.Description = ast.Schema[0].Description
- for _, entrypoint := range ast.Schema[0].OperationTypes {
+ if len(sd.Schema) == 1 {
+ schema.Description = sd.Schema[0].Description
+ for _, entrypoint := range sd.Schema[0].OperationTypes {
def := schema.Types[entrypoint.Type]
if def == nil {
return nil, gqlerror.ErrorPosf(entrypoint.Position, "Schema root %s refers to a type %s that does not exist.", entrypoint.Operation, entrypoint.Type)
@@ -122,9 +121,13 @@ func ValidateSchemaDocument(ast *SchemaDocument) (*Schema, error) {
schema.Subscription = def
}
}
+ if err := validateDirectives(&schema, sd.Schema[0].Directives, LocationSchema, nil); err != nil {
+ return nil, err
+ }
+ schema.SchemaDirectives = append(schema.SchemaDirectives, sd.Schema[0].Directives...)
}
- for _, ext := range ast.SchemaExtension {
+ for _, ext := range sd.SchemaExtension {
for _, entrypoint := range ext.OperationTypes {
def := schema.Types[entrypoint.Type]
if def == nil {
@@ -139,6 +142,10 @@ func ValidateSchemaDocument(ast *SchemaDocument) (*Schema, error) {
schema.Subscription = def
}
}
+ if err := validateDirectives(&schema, ext.Directives, LocationSchema, nil); err != nil {
+ return nil, err
+ }
+ schema.SchemaDirectives = append(schema.SchemaDirectives, ext.Directives...)
}
if err := validateTypeDefinitions(&schema); err != nil {
@@ -152,7 +159,7 @@ func ValidateSchemaDocument(ast *SchemaDocument) (*Schema, error) {
// Inferred root operation type names should be performed only when a `schema` directive is
// **not** provided, when it is, `Mutation` and `Subscription` becomes valid types and are not
// assigned as a root operation on the schema.
- if len(ast.Schema) == 0 {
+ if len(sd.Schema) == 0 {
if schema.Query == nil && schema.Types["Query"] != nil {
schema.Query = schema.Types["Query"]
}
@@ -284,6 +291,9 @@ func validateDefinition(schema *Schema, def *Definition) *gqlerror.Error {
return gqlerror.ErrorPosf(def.Position, "%s %s: non-enum value %s.", def.Kind, def.Name, value.Name)
}
}
+ if err := validateDirectives(schema, value.Directives, LocationEnumValue, nil); err != nil {
+ return err
+ }
}
case InputObject:
if len(def.Fields) == 0 {
@@ -359,11 +369,12 @@ func validateDirectives(schema *Schema, dirs DirectiveList, location DirectiveLo
if currentDirective != nil && dir.Name == currentDirective.Name {
return gqlerror.ErrorPosf(dir.Position, "Directive %s cannot refer to itself.", currentDirective.Name)
}
- if schema.Directives[dir.Name] == nil {
+ dirDefinition := schema.Directives[dir.Name]
+ if dirDefinition == nil {
return gqlerror.ErrorPosf(dir.Position, "Undefined directive %s.", dir.Name)
}
validKind := false
- for _, dirLocation := range schema.Directives[dir.Name].Locations {
+ for _, dirLocation := range dirDefinition.Locations {
if dirLocation == location {
validKind = true
break
@@ -372,6 +383,18 @@ func validateDirectives(schema *Schema, dirs DirectiveList, location DirectiveLo
if !validKind {
return gqlerror.ErrorPosf(dir.Position, "Directive %s is not applicable on %s.", dir.Name, location)
}
+ for _, arg := range dir.Arguments {
+ if dirDefinition.Arguments.ForName(arg.Name) == nil {
+ return gqlerror.ErrorPosf(arg.Position, "Undefined argument %s for directive %s.", arg.Name, dir.Name)
+ }
+ }
+ for _, schemaArg := range dirDefinition.Arguments {
+ if schemaArg.Type.NonNull && schemaArg.DefaultValue == nil {
+ if arg := dir.Arguments.ForName(schemaArg.Name); arg == nil || arg.Value.Kind == NullValue {
+ return gqlerror.ErrorPosf(dir.Position, "Argument %s for directive %s cannot be null.", schemaArg.Name, dir.Name)
+ }
+ }
+ }
dir.Definition = schema.Directives[dir.Name]
}
return nil
@@ -379,7 +402,7 @@ func validateDirectives(schema *Schema, dirs DirectiveList, location DirectiveLo
func validateImplements(schema *Schema, def *Definition, intfName string) *gqlerror.Error {
// see validation rules at the bottom of
- // https://facebook.github.io/graphql/October2021/#sec-Objects
+ // https://spec.graphql.org/October2021/#sec-Objects
intf := schema.Types[intfName]
if intf == nil {
return gqlerror.ErrorPosf(def.Position, "Undefined type %s.", strconv.Quote(intfName))
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/schema_test.yml b/vendor/github.com/vektah/gqlparser/v2/validator/schema_test.yml
similarity index 92%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/schema_test.yml
rename to vendor/github.com/vektah/gqlparser/v2/validator/schema_test.yml
index 7034a4697c..22f125bec4 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/schema_test.yml
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/schema_test.yml
@@ -80,6 +80,15 @@ object types:
message: 'Name "__id" must not begin with "__", which is reserved by GraphQL introspection.'
locations: [{line: 2, column: 3}]
+ - name: field argument list must not be empty
+ input: |
+ type FooBar {
+ foo(): ID
+ }
+ error:
+ message: 'expected at least one definition, found )'
+ locations: [{line: 2, column: 7}]
+
- name: check reserved names on type field argument
input: |
type FooBar {
@@ -528,7 +537,16 @@ directives:
directive @skip(if: Boolean!) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
directive @skip(if: Boolean!) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
- - name: must be declared
+ - name: must be declared (type)
+ input: |
+ type User @foo {
+ name: String
+ }
+ error:
+ message: "Undefined directive foo."
+ locations: [{line: 1, column: 12}]
+
+ - name: must be declared (field)
input: |
type User {
name: String @foo
@@ -537,6 +555,15 @@ directives:
message: "Undefined directive foo."
locations: [{line: 2, column: 17}]
+ - name: must be declared (enum)
+ input: |
+ enum Unit {
+ METER @foo
+ }
+ error:
+ message: "Undefined directive foo."
+ locations: [{line: 2, column: 10}]
+
- name: cannot be self-referential
input: |
directive @A(foo: Int! @A) on FIELD_DEFINITION
@@ -604,6 +631,32 @@ directives:
type P { name: String @testField }
interface I { id: ID @testField }
+ - name: Invalid directive argument not allowed
+ input: |
+ directive @foo(bla: Int!) on FIELD_DEFINITION
+ type P {f: Int @foo(foobla: 11)}
+
+ error:
+ message: 'Undefined argument foobla for directive foo.'
+ locations: [{line: 2, column: 21}]
+
+ - name: non-null argument must be provided
+ input: |
+ directive @foo(bla: Int!) on FIELD_DEFINITION
+ type P {f: Int @foo }
+
+ error:
+ message: 'Argument bla for directive foo cannot be null.'
+ locations: [{line: 2, column: 17}]
+
+ - name: non-null argument must not be null
+ input: |
+ directive @foo(bla: Int!) on FIELD_DEFINITION
+ type P {f: Int @foo(bla: null) }
+
+ error:
+ message: 'Argument bla for directive foo cannot be null.'
+ locations: [{line: 2, column: 17}]
entry points:
- name: multiple schema entry points
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/validator.go b/vendor/github.com/vektah/gqlparser/v2/validator/validator.go
new file mode 100644
index 0000000000..1214ed16e6
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/validator.go
@@ -0,0 +1,158 @@
+package validator
+
+import (
+ "sort"
+ //nolint:staticcheck // bad, yeah
+ . "github.com/vektah/gqlparser/v2/ast"
+ "github.com/vektah/gqlparser/v2/gqlerror"
+ "github.com/vektah/gqlparser/v2/validator/core"
+ validatorrules "github.com/vektah/gqlparser/v2/validator/rules"
+)
+
+type (
+ AddErrFunc = core.AddErrFunc
+ RuleFunc = core.RuleFunc
+ Rule = core.Rule
+ Events = core.Events
+ ErrorOption = core.ErrorOption
+ Walker = core.Walker
+)
+
+var (
+ Message = core.Message
+ QuotedOrList = core.QuotedOrList
+ OrList = core.OrList
+)
+
+// Walk is an alias for core.Walk
+func Walk(schema *Schema, document *QueryDocument, observers *Events) {
+ core.Walk(schema, document, observers)
+}
+
+var specifiedRules []Rule
+
+func init() {
+ // Initialize specifiedRules with default rules
+ defaultRules := validatorrules.NewDefaultRules()
+ for name, ruleFunc := range defaultRules.GetInner() {
+ specifiedRules = append(specifiedRules, Rule{Name: name, RuleFunc: ruleFunc})
+ // ensure initial default is in deterministic order
+ sort.Sort(core.NameSorter(specifiedRules))
+ }
+}
+
+// AddRule adds a rule to the rule set.
+// ruleFunc is called once each time `Validate` is executed.
+func AddRule(name string, ruleFunc RuleFunc) {
+ specifiedRules = append(specifiedRules, Rule{Name: name, RuleFunc: ruleFunc})
+}
+
+// RemoveRule removes an existing rule from the rule set
+// if one of the same name exists.
+// The rule set is global, so it is not safe for concurrent changes
+func RemoveRule(name string) {
+ var result []Rule // nolint:prealloc // using initialized with len(rules) produces a race condition
+ for _, r := range specifiedRules {
+ if r.Name == name {
+ continue
+ }
+ result = append(result, r)
+ }
+ specifiedRules = result
+}
+
+// ReplaceRule replaces an existing rule from the rule set
+// if one of the same name exists.
+// If no match is found, it will add a new rule to the rule set.
+// The rule set is global, so it is not safe for concurrent changes
+func ReplaceRule(name string, ruleFunc RuleFunc) {
+ var found bool
+ var result []Rule // nolint:prealloc // using initialized with len(rules) produces a race condition
+ for _, r := range specifiedRules {
+ if r.Name == name {
+ found = true
+ result = append(result, Rule{Name: name, RuleFunc: ruleFunc})
+ continue
+ }
+ result = append(result, r)
+ }
+ if !found {
+ specifiedRules = append(specifiedRules, Rule{Name: name, RuleFunc: ruleFunc})
+ return
+ }
+ specifiedRules = result
+}
+
+// Deprecated: use ValidateWithRules instead.
+func Validate(schema *Schema, doc *QueryDocument, rules ...Rule) gqlerror.List {
+ if rules == nil {
+ rules = specifiedRules
+ }
+
+ var errs gqlerror.List
+ if schema == nil {
+ errs = append(errs, gqlerror.Errorf("cannot validate as Schema is nil"))
+ }
+ if doc == nil {
+ errs = append(errs, gqlerror.Errorf("cannot validate as QueryDocument is nil"))
+ }
+ if len(errs) > 0 {
+ return errs
+ }
+ observers := &core.Events{}
+ for i := range rules {
+ rule := rules[i]
+ rule.RuleFunc(observers, func(options ...ErrorOption) {
+ err := &gqlerror.Error{
+ Rule: rule.Name,
+ }
+ for _, o := range options {
+ o(err)
+ }
+ errs = append(errs, err)
+ })
+ }
+
+ Walk(schema, doc, observers)
+ return errs
+}
+
+func ValidateWithRules(schema *Schema, doc *QueryDocument, rules *validatorrules.Rules) gqlerror.List {
+ if rules == nil {
+ rules = validatorrules.NewDefaultRules()
+ }
+
+ var errs gqlerror.List
+ if schema == nil {
+ errs = append(errs, gqlerror.Errorf("cannot validate as Schema is nil"))
+ }
+ if doc == nil {
+ errs = append(errs, gqlerror.Errorf("cannot validate as QueryDocument is nil"))
+ }
+ if len(errs) > 0 {
+ return errs
+ }
+ observers := &core.Events{}
+
+ var currentRules []Rule // nolint:prealloc // would require extra local refs for len
+ for name, ruleFunc := range rules.GetInner() {
+ currentRules = append(currentRules, Rule{Name: name, RuleFunc: ruleFunc})
+ // ensure deterministic order evaluation
+ sort.Sort(core.NameSorter(currentRules))
+ }
+
+ for _, currentRule := range currentRules {
+ currentRule.RuleFunc(observers, func(options ...ErrorOption) {
+ err := &gqlerror.Error{
+ Rule: currentRule.Name,
+ }
+ for _, o := range options {
+ o(err)
+ }
+ errs = append(errs, err)
+ })
+ }
+
+ Walk(schema, doc, observers)
+ return errs
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/vars.go b/vendor/github.com/vektah/gqlparser/v2/validator/vars.go
similarity index 95%
rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/vars.go
rename to vendor/github.com/vektah/gqlparser/v2/validator/vars.go
index 86be7c4df2..205a7fb516 100644
--- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/vars.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/vars.go
@@ -7,10 +7,11 @@ import (
"strconv"
"strings"
- "github.com/open-policy-agent/opa/internal/gqlparser/ast"
- "github.com/open-policy-agent/opa/internal/gqlparser/gqlerror"
+ "github.com/vektah/gqlparser/v2/ast"
+ "github.com/vektah/gqlparser/v2/gqlerror"
)
+//nolint:staticcheck // We do not care about capitalized error strings
var ErrUnexpectedType = fmt.Errorf("Unexpected Type")
// VariableValues coerces and validates variable values
@@ -55,19 +56,19 @@ func VariableValues(schema *ast.Schema, op *ast.OperationDefinition, variables m
jsonNumber, isJSONNumber := val.(json.Number)
if isJSONNumber {
- if v.Type.NamedType == "Int" {
+ switch v.Type.NamedType {
+ case "Int":
n, err := jsonNumber.Int64()
if err != nil {
return nil, gqlerror.ErrorPathf(validator.path, "cannot use value %d as %s", n, v.Type.NamedType)
}
rv = reflect.ValueOf(n)
- } else if v.Type.NamedType == "Float" {
+ case "Float":
f, err := jsonNumber.Float64()
if err != nil {
return nil, gqlerror.ErrorPathf(validator.path, "cannot use value %f as %s", f, v.Type.NamedType)
}
rv = reflect.ValueOf(f)
-
}
}
if rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Interface {
@@ -181,7 +182,7 @@ func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflec
return val, gqlerror.ErrorPathf(v.path, "cannot use %s as %s", kind.String(), typ.NamedType)
case ast.InputObject:
if val.Kind() != reflect.Map {
- return val, gqlerror.ErrorPathf(v.path, "must be a %s", def.Name)
+ return val, gqlerror.ErrorPathf(v.path, "must be a %s, not a %s", def.Name, val.Kind())
}
// check for unknown fields
@@ -222,7 +223,7 @@ func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflec
if fieldDef.Type.NonNull && field.IsNil() {
return val, gqlerror.ErrorPathf(v.path, "cannot be null")
}
- //allow null object field and skip it
+ // allow null object field and skip it
if !fieldDef.Type.NonNull && field.IsNil() {
continue
}
diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore
index 6bf3abc41e..2b53a25e1e 100644
--- a/vendor/go.opentelemetry.io/otel/.codespellignore
+++ b/vendor/go.opentelemetry.io/otel/.codespellignore
@@ -7,3 +7,4 @@ ans
nam
valu
thirdparty
+addOpt
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
index 5f69cc027c..b01762ffcc 100644
--- a/vendor/go.opentelemetry.io/otel/.golangci.yml
+++ b/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -10,6 +10,7 @@ linters:
- depguard
- errcheck
- errorlint
+ - gocritic
- godot
- gosec
- govet
@@ -86,6 +87,18 @@ linters:
deny:
- pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal
desc: Do not use cross-module internal packages.
+ gocritic:
+ disabled-checks:
+ - appendAssign
+ - commentedOutCode
+ - dupArg
+ - hugeParam
+ - importShadow
+ - preferDecodeRune
+ - rangeValCopy
+ - unnamedResult
+ - whyNoLint
+ enable-all: true
godot:
exclude:
# Exclude links.
@@ -167,7 +180,10 @@ linters:
- fmt.Print
- fmt.Printf
- fmt.Println
+ - name: unused-parameter
+ - name: unused-receiver
- name: unnecessary-stmt
+ - name: use-any
- name: useless-break
- name: var-declaration
- name: var-naming
@@ -224,10 +240,6 @@ linters:
- linters:
- gosec
text: 'G402: TLS MinVersion too low.'
- paths:
- - third_party$
- - builtin$
- - examples$
issues:
max-issues-per-linter: 0
max-same-issues: 0
@@ -237,14 +249,12 @@ formatters:
- goimports
- golines
settings:
+ gofumpt:
+ extra-rules: true
goimports:
local-prefixes:
- - go.opentelemetry.io
+ - go.opentelemetry.io/otel
golines:
max-len: 120
exclusions:
generated: lax
- paths:
- - third_party$
- - builtin$
- - examples$
diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore
index 40d62fa2eb..5328505888 100644
--- a/vendor/go.opentelemetry.io/otel/.lycheeignore
+++ b/vendor/go.opentelemetry.io/otel/.lycheeignore
@@ -2,5 +2,8 @@ http://localhost
http://jaeger-collector
https://github.com/open-telemetry/opentelemetry-go/milestone/
https://github.com/open-telemetry/opentelemetry-go/projects
+# Weaver model URL for semantic-conventions repository.
+https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+]
file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries
file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual
+http://4.3.2.1:78/user/123
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
index 4acc75701b..f3abcfdc2e 100644
--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -11,6 +11,93 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
+## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29
+
+This release is the last to support [Go 1.23].
+The next release will require at least [Go 1.24].
+
+### Added
+
+- Add native histogram exemplar support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6772)
+- Add template attribute functions to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6939)
+ - `ContainerLabel`
+ - `DBOperationParameter`
+ - `DBSystemParameter`
+ - `HTTPRequestHeader`
+ - `HTTPResponseHeader`
+ - `K8SCronJobAnnotation`
+ - `K8SCronJobLabel`
+ - `K8SDaemonSetAnnotation`
+ - `K8SDaemonSetLabel`
+ - `K8SDeploymentAnnotation`
+ - `K8SDeploymentLabel`
+ - `K8SJobAnnotation`
+ - `K8SJobLabel`
+ - `K8SNamespaceAnnotation`
+ - `K8SNamespaceLabel`
+ - `K8SNodeAnnotation`
+ - `K8SNodeLabel`
+ - `K8SPodAnnotation`
+ - `K8SPodLabel`
+ - `K8SReplicaSetAnnotation`
+ - `K8SReplicaSetLabel`
+ - `K8SStatefulSetAnnotation`
+ - `K8SStatefulSetLabel`
+ - `ProcessEnvironmentVariable`
+ - `RPCConnectRPCRequestMetadata`
+ - `RPCConnectRPCResponseMetadata`
+ - `RPCGRPCRequestMetadata`
+ - `RPCGRPCResponseMetadata`
+- Add `ErrorType` attribute helper function to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6962)
+- Add `WithAllowKeyDuplication` in `go.opentelemetry.io/otel/sdk/log` which can be used to disable deduplication for log records. (#6968)
+- Add `WithCardinalityLimit` option to configure the cardinality limit in `go.opentelemetry.io/otel/sdk/metric`. (#6996, #7065, #7081, #7164, #7165, #7179)
+- Add `Clone` method to `Record` in `go.opentelemetry.io/otel/log` that returns a copy of the record with no shared state. (#7001)
+- Add experimental self-observability span and batch span processor metrics in `go.opentelemetry.io/otel/sdk/trace`.
+ Check the `go.opentelemetry.io/otel/sdk/trace/internal/x` package documentation for more information. (#7027, #6393, #7209)
+- The `go.opentelemetry.io/otel/semconv/v1.36.0` package.
+ The package contains semantic conventions from the `v1.36.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.36.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.34.0.`(#7032, #7041)
+- Add support for configuring Prometheus name translation using `WithTranslationStrategy` option in `go.opentelemetry.io/otel/exporters/prometheus`. The current default translation strategy when UTF-8 mode is enabled is `NoUTF8EscapingWithSuffixes`, but a future release will change the default strategy to `UnderscoreEscapingWithSuffixes` for compliance with the specification. (#7111)
+- Add experimental self-observability log metrics in `go.opentelemetry.io/otel/sdk/log`.
+ Check the `go.opentelemetry.io/otel/sdk/log/internal/x` package documentation for more information. (#7121)
+- Add experimental self-observability trace exporter metrics in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`.
+ Check the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x` package documentation for more information. (#7133)
+- Support testing of [Go 1.25]. (#7187)
+- The `go.opentelemetry.io/otel/semconv/v1.37.0` package.
+ The package contains semantic conventions from the `v1.37.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.37.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.36.0.`(#7254)
+
+### Changed
+
+- Optimize `TraceIDFromHex` and `SpanIDFromHex` in `go.opentelemetry.io/otel/sdk/trace`. (#6791)
+- Change `AssertEqual` in `go.opentelemetry.io/otel/log/logtest` to accept `TestingT` in order to support benchmarks and fuzz tests. (#6908)
+- Change `DefaultExemplarReservoirProviderSelector` in `go.opentelemetry.io/otel/sdk/metric` to use `runtime.GOMAXPROCS(0)` instead of `runtime.NumCPU()` for the `FixedSizeReservoirProvider` default size. (#7094)
+
+### Fixed
+
+- `SetBody` method of `Record` in `go.opentelemetry.io/otel/sdk/log` now deduplicates key-value collections (`log.Value` of `log.KindMap` from `go.opentelemetry.io/otel/log`). (#7002)
+- Fix `go.opentelemetry.io/otel/exporters/prometheus` to not append a suffix if it's already present in metric name. (#7088)
+- Fix the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` self-observability component type and name. (#7195)
+- Fix partial export count metric in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7199)
+
+### Deprecated
+
+- Deprecate `WithoutUnits` and `WithoutCounterSuffixes` options, preferring `WithTranslationStrategy` instead. (#7111)
+- Deprecate support for `OTEL_GO_X_CARDINALITY_LIMIT` environment variable in `go.opentelemetry.io/otel/sdk/metric`. Use `WithCardinalityLimit` option instead. (#7166)
+
+## [0.59.1] 2025-07-21
+
+### Changed
+
+- Retract `v0.59.0` release of `go.opentelemetry.io/otel/exporters/prometheus` module which appends incorrect unit suffixes. (#7046)
+- Change `go.opentelemetry.io/otel/exporters/prometheus` to no longer deduplicate suffixes when UTF8 is enabled.
+ It is recommended to disable unit and counter suffixes in the exporter, and manually add suffixes if you rely on the existing behavior. (#7044)
+
+### Fixed
+
+- Fix `go.opentelemetry.io/otel/exporters/prometheus` to properly handle unit suffixes when the unit is in brackets.
+ E.g. `{spans}`. (#7044)
+
## [1.37.0/0.59.0/0.13.0] 2025-06-25
### Added
@@ -3343,7 +3430,8 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.38.0...HEAD
+[1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0
[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0
[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2
[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1
@@ -3439,6 +3527,7 @@ It contains api and sdk for trace and meter.
+[Go 1.25]: https://go.dev/doc/go1.25
[Go 1.24]: https://go.dev/doc/go1.24
[Go 1.23]: https://go.dev/doc/go1.23
[Go 1.22]: https://go.dev/doc/go1.22
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
index 945a07d2b0..26a03aed1d 100644
--- a/vendor/go.opentelemetry.io/otel/CODEOWNERS
+++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS
@@ -12,6 +12,6 @@
# https://help.github.com/en/articles/about-code-owners
#
-* @MrAlias @XSAM @dashpole @pellared @dmathieu
+* @MrAlias @XSAM @dashpole @pellared @dmathieu @flc1125
CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
index f9ddc281fc..0b3ae855c1 100644
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -192,6 +192,35 @@ should have `go test -bench` output in their description.
should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat)
output in their description.
+## Dependencies
+
+This project uses [Go Modules] for dependency management. All modules will use
+`go.mod` to explicitly list all direct and indirect dependencies, ensuring a
+clear dependency graph. The `go.sum` file for each module will be committed to
+the repository and used to verify the integrity of downloaded modules,
+preventing malicious tampering.
+
+This project uses automated dependency update tools (i.e. dependabot,
+renovatebot) to manage updates to dependencies. This ensures that dependencies
+are kept up-to-date with the latest security patches and features and are
+reviewed before being merged. If you would like to propose a change to a
+dependency it should be done through a pull request that updates the `go.mod`
+file and includes a description of the change.
+
+See the [versioning and compatibility](./VERSIONING.md) policy for more details
+about dependency compatibility.
+
+[Go Modules]: https://pkg.go.dev/cmd/go#hdr-Modules__module_versions__and_more
+
+### Environment Dependencies
+
+This project does not partition dependencies based on the environment (i.e.
+`development`, `staging`, `production`).
+
+Only the dependencies explicitly included in the released modules have be
+tested and verified to work with the released code. No other guarantee is made
+about the compatibility of other dependencies.
+
## Documentation
Each (non-internal, non-test) package must be documented using
@@ -233,6 +262,10 @@ For a non-comprehensive but foundational overview of these best practices
the [Effective Go](https://golang.org/doc/effective_go.html) documentation
is an excellent starting place.
+We also recommend following the
+[Go Code Review Comments](https://go.dev/wiki/CodeReviewComments)
+that collects common comments made during reviews of Go code.
+
As a convenience for developers building this project the `make precommit`
will format, lint, validate, and in some cases fix the changes you plan to
submit. This check will need to pass for your changes to be able to be
@@ -586,6 +619,10 @@ See also:
### Testing
+We allow using [`testify`](https://github.com/stretchr/testify) even though
+it is seen as non-idiomatic according to
+the [Go Test Comments](https://go.dev/wiki/TestComments#assert-libraries) page.
+
The tests should never leak goroutines.
Use the term `ConcurrentSafe` in the test name when it aims to verify the
@@ -640,13 +677,6 @@ should be canceled.
## Approvers and Maintainers
-### Triagers
-
-- [Alex Kats](https://github.com/akats7), Capital One
-- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
-
-### Approvers
-
### Maintainers
- [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832))
@@ -655,6 +685,21 @@ should be canceled.
- [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA))
- [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A))
+For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer).
+
+### Approvers
+
+- [Flc](https://github.com/flc1125), Independent
+
+For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver).
+
+### Triagers
+
+- [Alex Kats](https://github.com/akats7), Capital One
+- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
+
+For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager).
+
### Emeritus
- [Aaron Clawson](https://github.com/MadVikingGod)
@@ -665,6 +710,8 @@ should be canceled.
- [Josh MacDonald](https://github.com/jmacd)
- [Liz Fong-Jones](https://github.com/lizthegrey)
+For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager).
+
### Become an Approver or a Maintainer
See the [community membership document in OpenTelemetry community
diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE
index 261eeb9e9f..f1aee0f110 100644
--- a/vendor/go.opentelemetry.io/otel/LICENSE
+++ b/vendor/go.opentelemetry.io/otel/LICENSE
@@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
index 4fa423ca02..bc0f1f92d1 100644
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -34,9 +34,6 @@ $(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS)
MULTIMOD = $(TOOLS)/multimod
$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod
-SEMCONVGEN = $(TOOLS)/semconvgen
-$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen
-
CROSSLINK = $(TOOLS)/crosslink
$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink
@@ -71,7 +68,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck
$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
.PHONY: tools
-tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
+tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
# Virtualized python tools via docker
@@ -284,7 +281,7 @@ semconv-generate: $(SEMCONVKIT)
docker run --rm \
-u $(DOCKER_USER) \
--env HOME=/tmp/weaver \
- --mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \
+ --mount 'type=bind,source=$(PWD)/semconv/templates,target=/home/weaver/templates,readonly' \
--mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \
--mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \
$(WEAVER_IMAGE) registry generate \
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
index 5fa1b75c60..6b7ab5f219 100644
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -53,18 +53,25 @@ Currently, this project supports the following environments.
| OS | Go Version | Architecture |
|----------|------------|--------------|
+| Ubuntu | 1.25 | amd64 |
| Ubuntu | 1.24 | amd64 |
| Ubuntu | 1.23 | amd64 |
+| Ubuntu | 1.25 | 386 |
| Ubuntu | 1.24 | 386 |
| Ubuntu | 1.23 | 386 |
+| Ubuntu | 1.25 | arm64 |
| Ubuntu | 1.24 | arm64 |
| Ubuntu | 1.23 | arm64 |
+| macOS 13 | 1.25 | amd64 |
| macOS 13 | 1.24 | amd64 |
| macOS 13 | 1.23 | amd64 |
+| macOS | 1.25 | arm64 |
| macOS | 1.24 | arm64 |
| macOS | 1.23 | arm64 |
+| Windows | 1.25 | amd64 |
| Windows | 1.24 | amd64 |
| Windows | 1.23 | amd64 |
+| Windows | 1.25 | 386 |
| Windows | 1.24 | 386 |
| Windows | 1.23 | 386 |
diff --git a/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml b/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml
new file mode 100644
index 0000000000..8041fc62e4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml
@@ -0,0 +1,203 @@
+header:
+ schema-version: "1.0.0"
+ expiration-date: "2026-08-04T00:00:00.000Z"
+ last-updated: "2025-08-04"
+ last-reviewed: "2025-08-04"
+ commit-hash: 69e81088ad40f45a0764597326722dea8f3f00a8
+ project-url: https://github.com/open-telemetry/opentelemetry-go
+ project-release: "v1.37.0"
+ changelog: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CHANGELOG.md
+ license: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/LICENSE
+
+project-lifecycle:
+ status: active
+ bug-fixes-only: false
+ core-maintainers:
+ - https://github.com/dmathieu
+ - https://github.com/dashpole
+ - https://github.com/pellared
+ - https://github.com/XSAM
+ - https://github.com/MrAlias
+ release-process: |
+ See https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/RELEASING.md
+
+contribution-policy:
+ accepts-pull-requests: true
+ accepts-automated-pull-requests: true
+ automated-tools-list:
+ - automated-tool: dependabot
+ action: allowed
+ comment: Automated dependency updates are accepted.
+ - automated-tool: renovatebot
+ action: allowed
+ comment: Automated dependency updates are accepted.
+ - automated-tool: opentelemetrybot
+ action: allowed
+ comment: Automated OpenTelemetry actions are accepted.
+ contributing-policy: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md
+ code-of-conduct: https://github.com/open-telemetry/.github/blob/ffa15f76b65ec7bcc41f6a0b277edbb74f832206/CODE_OF_CONDUCT.md
+
+documentation:
+ - https://pkg.go.dev/go.opentelemetry.io/otel
+ - https://opentelemetry.io/docs/instrumentation/go/
+
+distribution-points:
+ - pkg:golang/go.opentelemetry.io/otel
+ - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus
+ - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus/test
+ - pkg:golang/go.opentelemetry.io/otel/bridge/opentracing
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
+ - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
+ - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdouttrace
+ - pkg:golang/go.opentelemetry.io/otel/exporters/zipkin
+ - pkg:golang/go.opentelemetry.io/otel/metric
+ - pkg:golang/go.opentelemetry.io/otel/sdk
+ - pkg:golang/go.opentelemetry.io/otel/sdk/metric
+ - pkg:golang/go.opentelemetry.io/otel/trace
+ - pkg:golang/go.opentelemetry.io/otel/exporters/prometheus
+ - pkg:golang/go.opentelemetry.io/otel/log
+ - pkg:golang/go.opentelemetry.io/otel/log/logtest
+ - pkg:golang/go.opentelemetry.io/otel/sdk/log
+ - pkg:golang/go.opentelemetry.io/otel/sdk/log/logtest
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
+ - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutlog
+ - pkg:golang/go.opentelemetry.io/otel/schema
+
+security-artifacts:
+ threat-model:
+ threat-model-created: false
+ comment: |
+ No formal threat model created yet.
+ self-assessment:
+ self-assessment-created: false
+ comment: |
+ No formal self-assessment yet.
+
+security-testing:
+ - tool-type: sca
+ tool-name: Dependabot
+ tool-version: latest
+ tool-url: https://github.com/dependabot
+ tool-rulesets:
+ - built-in
+ integration:
+ ad-hoc: false
+ ci: true
+ before-release: true
+ comment: |
+ Automated dependency updates.
+ - tool-type: sast
+ tool-name: golangci-lint
+ tool-version: latest
+ tool-url: https://github.com/golangci/golangci-lint
+ tool-rulesets:
+ - built-in
+ integration:
+ ad-hoc: false
+ ci: true
+ before-release: true
+ comment: |
+ Static analysis in CI.
+ - tool-type: fuzzing
+ tool-name: OSS-Fuzz
+ tool-version: latest
+ tool-url: https://github.com/google/oss-fuzz
+ tool-rulesets:
+ - default
+ integration:
+ ad-hoc: false
+ ci: false
+ before-release: false
+ comment: |
+ OpenTelemetry Go is integrated with OSS-Fuzz for continuous fuzz testing. See https://github.com/google/oss-fuzz/tree/f0f9b221190c6063a773bea606d192ebfc3d00cf/projects/opentelemetry-go for more details.
+ - tool-type: sast
+ tool-name: CodeQL
+ tool-version: latest
+ tool-url: https://github.com/github/codeql
+ tool-rulesets:
+ - default
+ integration:
+ ad-hoc: false
+ ci: true
+ before-release: true
+ comment: |
+ CodeQL static analysis is run in CI for all commits and pull requests to detect security vulnerabilities in the Go source code. See https://github.com/open-telemetry/opentelemetry-go/blob/d5b5b059849720144a03ca5c87561bfbdb940119/.github/workflows/codeql-analysis.yml for workflow details.
+ - tool-type: sca
+ tool-name: govulncheck
+ tool-version: latest
+ tool-url: https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck
+ tool-rulesets:
+ - default
+ integration:
+ ad-hoc: false
+ ci: true
+ before-release: true
+ comment: |
+ govulncheck is run in CI to detect known vulnerabilities in Go modules and code paths. See https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/.github/workflows/ci.yml for workflow configuration.
+
+security-assessments:
+ - auditor-name: 7ASecurity
+ auditor-url: https://7asecurity.com
+ auditor-report: https://7asecurity.com/reports/pentest-report-opentelemetry.pdf
+ report-year: 2023
+ comment: |
+ This independent penetration test by 7ASecurity covered OpenTelemetry repositories including opentelemetry-go. The assessment focused on codebase review, threat modeling, and vulnerability identification. See the report for details of findings and recommendations applicable to opentelemetry-go. No critical vulnerabilities were found for this repository.
+
+security-contacts:
+ - type: email
+ value: cncf-opentelemetry-security@lists.cncf.io
+ primary: true
+ - type: website
+ value: https://github.com/open-telemetry/opentelemetry-go/security/policy
+ primary: false
+
+vulnerability-reporting:
+ accepts-vulnerability-reports: true
+ email-contact: cncf-opentelemetry-security@lists.cncf.io
+ security-policy: https://github.com/open-telemetry/opentelemetry-go/security/policy
+ comment: |
+ Security issues should be reported via email or GitHub security policy page.
+
+dependencies:
+ third-party-packages: true
+ dependencies-lists:
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/test/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opentracing/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploggrpc/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploghttp/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracegrpc/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracehttp/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/prometheus/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutlog/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutmetric/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdouttrace/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/zipkin/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/internal/tools/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/logtest/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/metric/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/schema/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/logtest/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/metric/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/internal/telemetry/test/go.mod
+ dependencies-lifecycle:
+ policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md
+ comment: |
+ Dependency lifecycle managed via go.mod and renovatebot.
+ env-dependencies-policy:
+ policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md
+ comment: |
+ See contributing policy for environment usage.
diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
index 318e42fcab..6333d34b31 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
@@ -78,7 +78,7 @@ func DefaultEncoder() Encoder {
defaultEncoderOnce.Do(func() {
defaultEncoderInstance = &defaultAttrEncoder{
pool: sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &bytes.Buffer{}
},
},
@@ -96,11 +96,11 @@ func (d *defaultAttrEncoder) Encode(iter Iterator) string {
for iter.Next() {
i, keyValue := iter.IndexedAttribute()
if i > 0 {
- _, _ = buf.WriteRune(',')
+ _ = buf.WriteByte(',')
}
copyAndEscape(buf, string(keyValue.Key))
- _, _ = buf.WriteRune('=')
+ _ = buf.WriteByte('=')
if keyValue.Value.Type() == STRING {
copyAndEscape(buf, keyValue.Value.AsString())
@@ -122,14 +122,14 @@ func copyAndEscape(buf *bytes.Buffer, val string) {
for _, ch := range val {
switch ch {
case '=', ',', escapeChar:
- _, _ = buf.WriteRune(escapeChar)
+ _ = buf.WriteByte(escapeChar)
}
_, _ = buf.WriteRune(ch)
}
}
-// Valid returns true if this encoder ID was allocated by
-// `NewEncoderID`. Invalid encoder IDs will not be cached.
+// Valid reports whether this encoder ID was allocated by
+// [NewEncoderID]. Invalid encoder IDs will not be cached.
func (id EncoderID) Valid() bool {
return id.value != 0
}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go
index 3eeaa5d442..624ebbe381 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/filter.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go
@@ -15,8 +15,8 @@ type Filter func(KeyValue) bool
//
// If keys is empty a deny-all filter is returned.
func NewAllowKeysFilter(keys ...Key) Filter {
- if len(keys) <= 0 {
- return func(kv KeyValue) bool { return false }
+ if len(keys) == 0 {
+ return func(KeyValue) bool { return false }
}
allowed := make(map[Key]struct{}, len(keys))
@@ -34,8 +34,8 @@ func NewAllowKeysFilter(keys ...Key) Filter {
//
// If keys is empty an allow-all filter is returned.
func NewDenyKeysFilter(keys ...Key) Filter {
- if len(keys) <= 0 {
- return func(kv KeyValue) bool { return true }
+ if len(keys) == 0 {
+ return func(KeyValue) bool { return true }
}
forbid := make(map[Key]struct{}, len(keys))
diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go
index b76d2bbfdb..0875504302 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go
@@ -12,7 +12,7 @@ import (
)
// BoolSliceValue converts a bool slice into an array with same elements as slice.
-func BoolSliceValue(v []bool) interface{} {
+func BoolSliceValue(v []bool) any {
var zero bool
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
@@ -20,7 +20,7 @@ func BoolSliceValue(v []bool) interface{} {
}
// Int64SliceValue converts an int64 slice into an array with same elements as slice.
-func Int64SliceValue(v []int64) interface{} {
+func Int64SliceValue(v []int64) any {
var zero int64
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
@@ -28,7 +28,7 @@ func Int64SliceValue(v []int64) interface{} {
}
// Float64SliceValue converts a float64 slice into an array with same elements as slice.
-func Float64SliceValue(v []float64) interface{} {
+func Float64SliceValue(v []float64) any {
var zero float64
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
@@ -36,7 +36,7 @@ func Float64SliceValue(v []float64) interface{} {
}
// StringSliceValue converts a string slice into an array with same elements as slice.
-func StringSliceValue(v []string) interface{} {
+func StringSliceValue(v []string) any {
var zero string
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
@@ -44,7 +44,7 @@ func StringSliceValue(v []string) interface{} {
}
// AsBoolSlice converts a bool array into a slice into with same elements as array.
-func AsBoolSlice(v interface{}) []bool {
+func AsBoolSlice(v any) []bool {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array {
return nil
@@ -57,7 +57,7 @@ func AsBoolSlice(v interface{}) []bool {
}
// AsInt64Slice converts an int64 array into a slice into with same elements as array.
-func AsInt64Slice(v interface{}) []int64 {
+func AsInt64Slice(v any) []int64 {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array {
return nil
@@ -70,7 +70,7 @@ func AsInt64Slice(v interface{}) []int64 {
}
// AsFloat64Slice converts a float64 array into a slice into with same elements as array.
-func AsFloat64Slice(v interface{}) []float64 {
+func AsFloat64Slice(v any) []float64 {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array {
return nil
@@ -83,7 +83,7 @@ func AsFloat64Slice(v interface{}) []float64 {
}
// AsStringSlice converts a string array into a slice into with same elements as array.
-func AsStringSlice(v interface{}) []string {
+func AsStringSlice(v any) []string {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array {
return nil
diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
index f2ba89ce4b..8df6249f02 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
@@ -25,8 +25,8 @@ type oneIterator struct {
attr KeyValue
}
-// Next moves the iterator to the next position. Returns false if there are no
-// more attributes.
+// Next moves the iterator to the next position.
+// Next reports whether there are more attributes.
func (i *Iterator) Next() bool {
i.idx++
return i.idx < i.Len()
@@ -106,7 +106,8 @@ func (oi *oneIterator) advance() {
}
}
-// Next returns true if there is another attribute available.
+// Next moves the iterator to the next position.
+// Next reports whether there is another attribute available.
func (m *MergeIterator) Next() bool {
if m.one.done && m.two.done {
return false
diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go
index d9a22c6502..80a9e5643f 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/key.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/key.go
@@ -117,7 +117,7 @@ func (k Key) StringSlice(v []string) KeyValue {
}
}
-// Defined returns true for non-empty keys.
+// Defined reports whether the key is not empty.
func (k Key) Defined() bool {
return len(k) != 0
}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go
index 3028f9a40f..8c6928ca79 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/kv.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go
@@ -13,7 +13,7 @@ type KeyValue struct {
Value Value
}
-// Valid returns if kv is a valid OpenTelemetry attribute.
+// Valid reports whether kv is a valid OpenTelemetry attribute.
func (kv KeyValue) Valid() bool {
return kv.Key.Defined() && kv.Value.Type() != INVALID
}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go
index 6cbefceadf..64735d382e 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/set.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/set.go
@@ -31,11 +31,11 @@ type (
// Distinct is a unique identifier of a Set.
//
- // Distinct is designed to be ensures equivalence stability: comparisons
- // will return the save value across versions. For this reason, Distinct
- // should always be used as a map key instead of a Set.
+ // Distinct is designed to ensure equivalence stability: comparisons will
+ // return the same value across versions. For this reason, Distinct should
+ // always be used as a map key instead of a Set.
Distinct struct {
- iface interface{}
+ iface any
}
// Sortable implements sort.Interface, used for sorting KeyValue.
@@ -70,7 +70,7 @@ func (d Distinct) reflectValue() reflect.Value {
return reflect.ValueOf(d.iface)
}
-// Valid returns true if this value refers to a valid Set.
+// Valid reports whether this value refers to a valid Set.
func (d Distinct) Valid() bool {
return d.iface != nil
}
@@ -120,7 +120,7 @@ func (l *Set) Value(k Key) (Value, bool) {
return Value{}, false
}
-// HasValue tests whether a key is defined in this set.
+// HasValue reports whether a key is defined in this set.
func (l *Set) HasValue(k Key) bool {
if l == nil {
return false
@@ -155,7 +155,7 @@ func (l *Set) Equivalent() Distinct {
return l.equivalent
}
-// Equals returns true if the argument set is equivalent to this set.
+// Equals reports whether the argument set is equivalent to this set.
func (l *Set) Equals(o *Set) bool {
return l.Equivalent() == o.Equivalent()
}
@@ -344,7 +344,7 @@ func computeDistinct(kvs []KeyValue) Distinct {
// computeDistinctFixed computes a Distinct for small slices. It returns nil
// if the input is too large for this code path.
-func computeDistinctFixed(kvs []KeyValue) interface{} {
+func computeDistinctFixed(kvs []KeyValue) any {
switch len(kvs) {
case 1:
return [1]KeyValue(kvs)
@@ -373,7 +373,7 @@ func computeDistinctFixed(kvs []KeyValue) interface{} {
// computeDistinctReflect computes a Distinct using reflection, works for any
// size input.
-func computeDistinctReflect(kvs []KeyValue) interface{} {
+func computeDistinctReflect(kvs []KeyValue) any {
at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
for i, keyValue := range kvs {
*(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
@@ -387,7 +387,7 @@ func (l *Set) MarshalJSON() ([]byte, error) {
}
// MarshalLog is the marshaling function used by the logging system to represent this Set.
-func (l Set) MarshalLog() interface{} {
+func (l Set) MarshalLog() any {
kvs := make(map[string]string)
for _, kv := range l.ToSlice() {
kvs[string(kv.Key)] = kv.Value.Emit()
diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go
index 817eecacf1..653c33a861 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/value.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/value.go
@@ -22,7 +22,7 @@ type Value struct {
vtype Type
numeric uint64
stringly string
- slice interface{}
+ slice any
}
const (
@@ -199,8 +199,8 @@ func (v Value) asStringSlice() []string {
type unknownValueType struct{}
-// AsInterface returns Value's data as interface{}.
-func (v Value) AsInterface() interface{} {
+// AsInterface returns Value's data as any.
+func (v Value) AsInterface() any {
switch v.Type() {
case BOOL:
return v.AsBool()
@@ -262,7 +262,7 @@ func (v Value) Emit() string {
func (v Value) MarshalJSON() ([]byte, error) {
var jsonVal struct {
Type string
- Value interface{}
+ Value any
}
jsonVal.Type = v.Type().String()
jsonVal.Value = v.AsInterface()
diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
index 0e1fe24220..f83a448ec6 100644
--- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
@@ -812,7 +812,7 @@ var safeKeyCharset = [utf8.RuneSelf]bool{
// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name.
// Baggage name is a valid, non-empty UTF-8 string.
func validateBaggageName(s string) bool {
- if len(s) == 0 {
+ if s == "" {
return false
}
@@ -828,7 +828,7 @@ func validateBaggageValue(s string) bool {
// validateKey checks if the string is a valid W3C Baggage key.
func validateKey(s string) bool {
- if len(s) == 0 {
+ if s == "" {
return false
}
diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go
index 49a35b1225..d48847ed86 100644
--- a/vendor/go.opentelemetry.io/otel/codes/codes.go
+++ b/vendor/go.opentelemetry.io/otel/codes/codes.go
@@ -67,7 +67,7 @@ func (c *Code) UnmarshalJSON(b []byte) error {
return errors.New("nil receiver passed to UnmarshalJSON")
}
- var x interface{}
+ var x any
if err := json.Unmarshal(b, &x); err != nil {
return err
}
@@ -102,5 +102,5 @@ func (c *Code) MarshalJSON() ([]byte, error) {
if !ok {
return nil, fmt.Errorf("invalid code: %d", *c)
}
- return []byte(fmt.Sprintf("%q", str)), nil
+ return fmt.Appendf(nil, "%q", str), nil
}
diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
index 935bd48763..a311fbb483 100644
--- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
+++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
@@ -1,4 +1,4 @@
# This is a renovate-friendly source of Docker images.
-FROM python:3.13.5-slim-bullseye@sha256:5b9fc0d8ef79cfb5f300e61cb516e0c668067bbf77646762c38c94107e230dbc AS python
-FROM otel/weaver:v0.15.2@sha256:b13acea09f721774daba36344861f689ac4bb8d6ecd94c4600b4d590c8fb34b9 AS weaver
+FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python
+FROM otel/weaver:v0.17.1@sha256:32523b5e44fb44418786347e9f7dde187d8797adb6d57a2ee99c245346c3cdfe AS weaver
FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
index adbca7d347..86d7f4ba08 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
@@ -41,22 +41,22 @@ func GetLogger() logr.Logger {
// Info prints messages about the general state of the API or SDK.
// This should usually be less than 5 messages a minute.
-func Info(msg string, keysAndValues ...interface{}) {
+func Info(msg string, keysAndValues ...any) {
GetLogger().V(4).Info(msg, keysAndValues...)
}
// Error prints messages about exceptional states of the API or SDK.
-func Error(err error, msg string, keysAndValues ...interface{}) {
+func Error(err error, msg string, keysAndValues ...any) {
GetLogger().Error(err, msg, keysAndValues...)
}
// Debug prints messages about all internal changes in the API or SDK.
-func Debug(msg string, keysAndValues ...interface{}) {
+func Debug(msg string, keysAndValues ...any) {
GetLogger().V(8).Info(msg, keysAndValues...)
}
// Warn prints messages about warnings in the API or SDK.
// Not an error but is likely more important than an informational event.
-func Warn(msg string, keysAndValues ...interface{}) {
+func Warn(msg string, keysAndValues ...any) {
GetLogger().V(1).Info(msg, keysAndValues...)
}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
index 49e4ac4faa..bf5cf3119b 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
@@ -26,6 +26,7 @@ import (
"sync/atomic"
"go.opentelemetry.io/auto/sdk"
+
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE
index 261eeb9e9f..f1aee0f110 100644
--- a/vendor/go.opentelemetry.io/otel/metric/LICENSE
+++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE
@@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/README.md b/vendor/go.opentelemetry.io/otel/metric/noop/README.md
new file mode 100644
index 0000000000..bb89694356
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/noop/README.md
@@ -0,0 +1,3 @@
+# Metric Noop
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop)
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
new file mode 100644
index 0000000000..9afb69e583
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
@@ -0,0 +1,296 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package noop provides an implementation of the OpenTelemetry metric API that
+// produces no telemetry and minimizes used computation resources.
+//
+// Using this package to implement the OpenTelemetry metric API will
+// effectively disable OpenTelemetry.
+//
+// This implementation can be embedded in other implementations of the
+// OpenTelemetry metric API. Doing so will mean the implementation defaults to
+// no operation for methods it does not implement.
+package noop // import "go.opentelemetry.io/otel/metric/noop"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+var (
+ // Compile-time check this implements the OpenTelemetry API.
+
+ _ metric.MeterProvider = MeterProvider{}
+ _ metric.Meter = Meter{}
+ _ metric.Observer = Observer{}
+ _ metric.Registration = Registration{}
+ _ metric.Int64Counter = Int64Counter{}
+ _ metric.Float64Counter = Float64Counter{}
+ _ metric.Int64UpDownCounter = Int64UpDownCounter{}
+ _ metric.Float64UpDownCounter = Float64UpDownCounter{}
+ _ metric.Int64Histogram = Int64Histogram{}
+ _ metric.Float64Histogram = Float64Histogram{}
+ _ metric.Int64Gauge = Int64Gauge{}
+ _ metric.Float64Gauge = Float64Gauge{}
+ _ metric.Int64ObservableCounter = Int64ObservableCounter{}
+ _ metric.Float64ObservableCounter = Float64ObservableCounter{}
+ _ metric.Int64ObservableGauge = Int64ObservableGauge{}
+ _ metric.Float64ObservableGauge = Float64ObservableGauge{}
+ _ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{}
+ _ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{}
+ _ metric.Int64Observer = Int64Observer{}
+ _ metric.Float64Observer = Float64Observer{}
+)
+
+// MeterProvider is an OpenTelemetry No-Op MeterProvider.
+type MeterProvider struct{ embedded.MeterProvider }
+
+// NewMeterProvider returns a MeterProvider that does not record any telemetry.
+func NewMeterProvider() MeterProvider {
+ return MeterProvider{}
+}
+
+// Meter returns an OpenTelemetry Meter that does not record any telemetry.
+func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter {
+ return Meter{}
+}
+
+// Meter is an OpenTelemetry No-Op Meter.
+type Meter struct{ embedded.Meter }
+
+// Int64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) {
+ return Int64Counter{}, nil
+}
+
+// Int64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
+ return Int64UpDownCounter{}, nil
+}
+
+// Int64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
+ return Int64Histogram{}, nil
+}
+
+// Int64Gauge returns a Gauge used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
+ return Int64Gauge{}, nil
+}
+
+// Int64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableCounter(
+ string,
+ ...metric.Int64ObservableCounterOption,
+) (metric.Int64ObservableCounter, error) {
+ return Int64ObservableCounter{}, nil
+}
+
+// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Int64ObservableUpDownCounter(
+ string,
+ ...metric.Int64ObservableUpDownCounterOption,
+) (metric.Int64ObservableUpDownCounter, error) {
+ return Int64ObservableUpDownCounter{}, nil
+}
+
+// Int64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
+ return Int64ObservableGauge{}, nil
+}
+
+// Float64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) {
+ return Float64Counter{}, nil
+}
+
+// Float64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
+ return Float64UpDownCounter{}, nil
+}
+
+// Float64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
+ return Float64Histogram{}, nil
+}
+
+// Float64Gauge returns a Gauge used to record float64 measurements that
+// produces no telemetry.
+func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
+ return Float64Gauge{}, nil
+}
+
+// Float64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableCounter(
+ string,
+ ...metric.Float64ObservableCounterOption,
+) (metric.Float64ObservableCounter, error) {
+ return Float64ObservableCounter{}, nil
+}
+
+// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Float64ObservableUpDownCounter(
+ string,
+ ...metric.Float64ObservableUpDownCounterOption,
+) (metric.Float64ObservableUpDownCounter, error) {
+ return Float64ObservableUpDownCounter{}, nil
+}
+
+// Float64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableGauge(
+ string,
+ ...metric.Float64ObservableGaugeOption,
+) (metric.Float64ObservableGauge, error) {
+ return Float64ObservableGauge{}, nil
+}
+
+// RegisterCallback performs no operation.
+func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) {
+ return Registration{}, nil
+}
+
+// Observer acts as a recorder of measurements for multiple instruments in a
+// Callback, it performing no operation.
+type Observer struct{ embedded.Observer }
+
+// ObserveFloat64 performs no operation.
+func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) {
+}
+
+// ObserveInt64 performs no operation.
+func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) {
+}
+
+// Registration is the registration of a Callback with a No-Op Meter.
+type Registration struct{ embedded.Registration }
+
+// Unregister unregisters the Callback the Registration represents with the
+// No-Op Meter. This will always return nil because the No-Op Meter performs no
+// operation, including hold any record of registrations.
+func (Registration) Unregister() error { return nil }
+
+// Int64Counter is an OpenTelemetry Counter used to record int64 measurements.
+// It produces no telemetry.
+type Int64Counter struct{ embedded.Int64Counter }
+
+// Add performs no operation.
+func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64Counter is an OpenTelemetry Counter used to record float64
+// measurements. It produces no telemetry.
+type Float64Counter struct{ embedded.Float64Counter }
+
+// Add performs no operation.
+func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64
+// measurements. It produces no telemetry.
+type Int64UpDownCounter struct{ embedded.Int64UpDownCounter }
+
+// Add performs no operation.
+func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64UpDownCounter struct{ embedded.Float64UpDownCounter }
+
+// Add performs no operation.
+func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64Histogram is an OpenTelemetry Histogram used to record int64
+// measurements. It produces no telemetry.
+type Int64Histogram struct{ embedded.Int64Histogram }
+
+// Record performs no operation.
+func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {}
+
+// Float64Histogram is an OpenTelemetry Histogram used to record float64
+// measurements. It produces no telemetry.
+type Float64Histogram struct{ embedded.Float64Histogram }
+
+// Record performs no operation.
+func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {}
+
+// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64
+// measurements. It produces no telemetry.
+type Int64Gauge struct{ embedded.Int64Gauge }
+
+// Record performs no operation.
+func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {}
+
+// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64
+// measurements. It produces no telemetry.
+type Float64Gauge struct{ embedded.Float64Gauge }
+
+// Record performs no operation.
+func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {}
+
+// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableCounter struct {
+ metric.Int64Observable
+ embedded.Int64ObservableCounter
+}
+
+// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableCounter struct {
+ metric.Float64Observable
+ embedded.Float64ObservableCounter
+}
+
+// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableGauge struct {
+ metric.Int64Observable
+ embedded.Int64ObservableGauge
+}
+
+// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableGauge struct {
+ metric.Float64Observable
+ embedded.Float64ObservableGauge
+}
+
+// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record int64 measurements. It produces no telemetry.
+type Int64ObservableUpDownCounter struct {
+ metric.Int64Observable
+ embedded.Int64ObservableUpDownCounter
+}
+
+// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record float64 measurements. It produces no telemetry.
+type Float64ObservableUpDownCounter struct {
+ metric.Float64Observable
+ embedded.Float64ObservableUpDownCounter
+}
+
+// Int64Observer is a recorder of int64 measurements that performs no operation.
+type Int64Observer struct{ embedded.Int64Observer }
+
+// Observe performs no operation.
+func (Int64Observer) Observe(int64, ...metric.ObserveOption) {}
+
+// Float64Observer is a recorder of float64 measurements that performs no
+// operation.
+type Float64Observer struct{ embedded.Float64Observer }
+
+// Observe performs no operation.
+func (Float64Observer) Observe(float64, ...metric.ObserveOption) {}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
index ebda5026d6..0518826020 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
@@ -20,7 +20,7 @@ type Baggage struct{}
var _ TextMapPropagator = Baggage{}
// Inject sets baggage key-values from ctx into the carrier.
-func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) {
+func (Baggage) Inject(ctx context.Context, carrier TextMapCarrier) {
bStr := baggage.FromContext(ctx).String()
if bStr != "" {
carrier.Set(baggageHeader, bStr)
@@ -30,7 +30,7 @@ func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) {
// Extract returns a copy of parent with the baggage from the carrier added.
// If carrier implements [ValuesGetter] (e.g. [HeaderCarrier]), Values is invoked
// for multiple values extraction. Otherwise, Get is called.
-func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context {
+func (Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context {
if multiCarrier, ok := carrier.(ValuesGetter); ok {
return extractMultiBaggage(parent, multiCarrier)
}
@@ -38,7 +38,7 @@ func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context
}
// Fields returns the keys who's values are set with Inject.
-func (b Baggage) Fields() []string {
+func (Baggage) Fields() []string {
return []string{baggageHeader}
}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
index 5c8c26ea2e..0a32c59aa3 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
@@ -20,7 +20,7 @@ type TextMapCarrier interface {
// must never be done outside of a new major release.
// Set stores the key-value pair.
- Set(key string, value string)
+ Set(key, value string)
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
@@ -88,7 +88,7 @@ func (hc HeaderCarrier) Values(key string) []string {
}
// Set stores the key-value pair.
-func (hc HeaderCarrier) Set(key string, value string) {
+func (hc HeaderCarrier) Set(key, value string) {
http.Header(hc).Set(key, value)
}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
index 6870e316dc..6692d2665d 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
@@ -36,7 +36,7 @@ var (
)
// Inject injects the trace context from ctx into carrier.
-func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
+func (TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
sc := trace.SpanContextFromContext(ctx)
if !sc.IsValid() {
return
@@ -77,7 +77,7 @@ func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) cont
return trace.ContextWithRemoteSpanContext(ctx, sc)
}
-func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
+func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
h := carrier.Get(traceparentHeader)
if h == "" {
return trace.SpanContext{}
@@ -151,6 +151,6 @@ func extractPart(dst []byte, h *string, n int) bool {
}
// Fields returns the keys who's values are set with Inject.
-func (tc TraceContext) Fields() []string {
+func (TraceContext) Fields() []string {
return []string{traceparentHeader, tracestateHeader}
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE
index 261eeb9e9f..f1aee0f110 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/LICENSE
+++ b/vendor/go.opentelemetry.io/otel/sdk/LICENSE
@@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
index 68d296cbed..1be472e917 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
@@ -19,7 +19,7 @@ import (
// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
// will also enable this).
var Resource = newFeature("RESOURCE", func(v string) (string, bool) {
- if strings.ToLower(v) == "true" {
+ if strings.EqualFold(v, "true") {
return v, true
}
return "", false
@@ -59,7 +59,7 @@ func (f Feature[T]) Lookup() (v T, ok bool) {
return f.parse(vRaw)
}
-// Enabled returns if the feature is enabled.
+// Enabled reports whether the feature is enabled.
func (f Feature[T]) Enabled() bool {
_, ok := f.Lookup()
return ok
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
index cefe4ab914..3f20eb7a56 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
@@ -13,7 +13,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk"
- semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
)
type (
@@ -72,7 +72,7 @@ func StringDetector(schemaURL string, k attribute.Key, f func() (string, error))
// Detect returns a *Resource that describes the string as a value
// corresponding to attribute.Key as well as the specific schemaURL.
-func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) {
+func (sd stringDetector) Detect(context.Context) (*Resource, error) {
value, err := sd.F()
if err != nil {
return nil, fmt.Errorf("%s: %w", string(sd.K), err)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
index 0d8619715e..bbe142d203 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
@@ -11,7 +11,7 @@ import (
"os"
"regexp"
- semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
)
type containerIDProvider func() (string, error)
@@ -27,7 +27,7 @@ const cgroupPath = "/proc/self/cgroup"
// Detect returns a *Resource that describes the id of the container.
// If no container id found, an empty resource will be returned.
-func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) {
+func (cgroupContainerIDDetector) Detect(context.Context) (*Resource, error) {
containerID, err := containerID()
if err != nil {
return nil, err
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
index 16a062ad8c..4a1b017eea 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
@@ -12,7 +12,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
)
const (
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
index 7819039238..5fed33d4fb 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
@@ -8,7 +8,7 @@ import (
"errors"
"strings"
- semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
)
type hostIDProvider func() (string, error)
@@ -96,7 +96,7 @@ func (r *hostIDReaderLinux) read() (string, error) {
type hostIDDetector struct{}
// Detect returns a *Resource containing the platform specific host id.
-func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) {
+func (hostIDDetector) Detect(context.Context) (*Resource, error) {
hostID, err := hostID()
if err != nil {
return nil, err
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
index 01b4d27a03..51da76e807 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
@@ -8,7 +8,7 @@ import (
"strings"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
)
type osDescriptionProvider func() (string, error)
@@ -32,7 +32,7 @@ type (
// Detect returns a *Resource that describes the operating system type the
// service is running on.
-func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) {
+func (osTypeDetector) Detect(context.Context) (*Resource, error) {
osType := runtimeOS()
osTypeAttribute := mapRuntimeOSToSemconvOSType(osType)
@@ -45,7 +45,7 @@ func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) {
// Detect returns a *Resource that describes the operating system the
// service is running on.
-func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
+func (osDescriptionDetector) Detect(context.Context) (*Resource, error) {
description, err := osDescription()
if err != nil {
return nil, err
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
index f537e5ca5c..7252af79fc 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
@@ -63,12 +63,12 @@ func parseOSReleaseFile(file io.Reader) map[string]string {
return values
}
-// skip returns true if the line is blank or starts with a '#' character, and
+// skip reports whether the line is blank or starts with a '#' character, and
// therefore should be skipped from processing.
func skip(line string) bool {
line = strings.TrimSpace(line)
- return len(line) == 0 || strings.HasPrefix(line, "#")
+ return line == "" || strings.HasPrefix(line, "#")
}
// parse attempts to split the provided line on the first '=' character, and then
@@ -76,7 +76,7 @@ func skip(line string) bool {
func parse(line string) (string, string, bool) {
k, v, found := strings.Cut(line, "=")
- if !found || len(k) == 0 {
+ if !found || k == "" {
return "", "", false
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
index 6712ce80d5..138e57721b 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
@@ -11,7 +11,7 @@ import (
"path/filepath"
"runtime"
- semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
)
type (
@@ -112,19 +112,19 @@ type (
// Detect returns a *Resource that describes the process identifier (PID) of the
// executing process.
-func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) {
+func (processPIDDetector) Detect(context.Context) (*Resource, error) {
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil
}
// Detect returns a *Resource that describes the name of the process executable.
-func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) {
+func (processExecutableNameDetector) Detect(context.Context) (*Resource, error) {
executableName := filepath.Base(commandArgs()[0])
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil
}
// Detect returns a *Resource that describes the full path of the process executable.
-func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) {
+func (processExecutablePathDetector) Detect(context.Context) (*Resource, error) {
executablePath, err := executablePath()
if err != nil {
return nil, err
@@ -135,13 +135,13 @@ func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, err
// Detect returns a *Resource that describes all the command arguments as received
// by the process.
-func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) {
+func (processCommandArgsDetector) Detect(context.Context) (*Resource, error) {
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil
}
// Detect returns a *Resource that describes the username of the user that owns the
// process.
-func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) {
+func (processOwnerDetector) Detect(context.Context) (*Resource, error) {
owner, err := owner()
if err != nil {
return nil, err
@@ -152,17 +152,17 @@ func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) {
// Detect returns a *Resource that describes the name of the compiler used to compile
// this process image.
-func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) {
+func (processRuntimeNameDetector) Detect(context.Context) (*Resource, error) {
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil
}
// Detect returns a *Resource that describes the version of the runtime of this process.
-func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) {
+func (processRuntimeVersionDetector) Detect(context.Context) (*Resource, error) {
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil
}
// Detect returns a *Resource that describes the runtime of this process.
-func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
+func (processRuntimeDescriptionDetector) Detect(context.Context) (*Resource, error) {
runtimeDescription := fmt.Sprintf(
"go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch())
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
index 09b91e1e1b..28e1e4f7eb 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
@@ -112,7 +112,7 @@ func (r *Resource) String() string {
}
// MarshalLog is the marshaling function used by the logging system to represent this Resource.
-func (r *Resource) MarshalLog() interface{} {
+func (r *Resource) MarshalLog() any {
return struct {
Attributes attribute.Set
SchemaURL string
@@ -148,7 +148,7 @@ func (r *Resource) Iter() attribute.Iterator {
return r.attrs.Iter()
}
-// Equal returns whether r and o represent the same resource. Two resources can
+// Equal reports whether r and o represent the same resource. Two resources can
// be equal even if they have different schema URLs.
//
// See the documentation on the [Resource] type for the pitfalls of using ==
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
index 6966ed861e..9bc3e525d1 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
@@ -6,24 +6,35 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
import (
"context"
"errors"
+ "fmt"
"sync"
"sync/atomic"
"time"
"go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/sdk"
"go.opentelemetry.io/otel/sdk/internal/env"
+ "go.opentelemetry.io/otel/sdk/trace/internal/x"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+ "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
"go.opentelemetry.io/otel/trace"
)
// Defaults for BatchSpanProcessorOptions.
const (
- DefaultMaxQueueSize = 2048
- DefaultScheduleDelay = 5000
+ DefaultMaxQueueSize = 2048
+ // DefaultScheduleDelay is the delay interval between two consecutive exports, in milliseconds.
+ DefaultScheduleDelay = 5000
+ // DefaultExportTimeout is the duration after which an export is cancelled, in milliseconds.
DefaultExportTimeout = 30000
DefaultMaxExportBatchSize = 512
)
+var queueFull = otelconv.ErrorTypeAttr("queue_full")
+
// BatchSpanProcessorOption configures a BatchSpanProcessor.
type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions)
@@ -67,6 +78,11 @@ type batchSpanProcessor struct {
queue chan ReadOnlySpan
dropped uint32
+ selfObservabilityEnabled bool
+ callbackRegistration metric.Registration
+ spansProcessedCounter otelconv.SDKProcessorSpanProcessed
+ componentNameAttr attribute.KeyValue
+
batch []ReadOnlySpan
batchMutex sync.Mutex
timer *time.Timer
@@ -87,11 +103,7 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO
maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize)
if maxExportBatchSize > maxQueueSize {
- if DefaultMaxExportBatchSize > maxQueueSize {
- maxExportBatchSize = maxQueueSize
- } else {
- maxExportBatchSize = DefaultMaxExportBatchSize
- }
+ maxExportBatchSize = min(DefaultMaxExportBatchSize, maxQueueSize)
}
o := BatchSpanProcessorOptions{
@@ -112,6 +124,21 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO
stopCh: make(chan struct{}),
}
+ if x.SelfObservability.Enabled() {
+ bsp.selfObservabilityEnabled = true
+ bsp.componentNameAttr = componentName()
+
+ var err error
+ bsp.spansProcessedCounter, bsp.callbackRegistration, err = newBSPObs(
+ bsp.componentNameAttr,
+ func() int64 { return int64(len(bsp.queue)) },
+ int64(bsp.o.MaxQueueSize),
+ )
+ if err != nil {
+ otel.Handle(err)
+ }
+ }
+
bsp.stopWait.Add(1)
go func() {
defer bsp.stopWait.Done()
@@ -122,8 +149,61 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO
return bsp
}
+var processorIDCounter atomic.Int64
+
+// nextProcessorID returns an identifier for this batch span processor,
+// starting with 0 and incrementing by 1 each time it is called.
+func nextProcessorID() int64 {
+ return processorIDCounter.Add(1) - 1
+}
+
+func componentName() attribute.KeyValue {
+ id := nextProcessorID()
+ name := fmt.Sprintf("%s/%d", otelconv.ComponentTypeBatchingSpanProcessor, id)
+ return semconv.OTelComponentName(name)
+}
+
+// newBSPObs creates and returns a new set of metrics instruments and a
+// registration for a BatchSpanProcessor. It is the caller's responsibility
+// to unregister the registration when it is no longer needed.
+func newBSPObs(
+ cmpnt attribute.KeyValue,
+ qLen func() int64,
+ qMax int64,
+) (otelconv.SDKProcessorSpanProcessed, metric.Registration, error) {
+ meter := otel.GetMeterProvider().Meter(
+ selfObsScopeName,
+ metric.WithInstrumentationVersion(sdk.Version()),
+ metric.WithSchemaURL(semconv.SchemaURL),
+ )
+
+ qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter)
+
+ qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter)
+ err = errors.Join(err, e)
+
+ spansProcessed, e := otelconv.NewSDKProcessorSpanProcessed(meter)
+ err = errors.Join(err, e)
+
+ cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor
+ attrs := metric.WithAttributes(cmpnt, cmpntT)
+
+ reg, e := meter.RegisterCallback(
+ func(_ context.Context, o metric.Observer) error {
+ o.ObserveInt64(qSize.Inst(), qLen(), attrs)
+ o.ObserveInt64(qCap.Inst(), qMax, attrs)
+ return nil
+ },
+ qSize.Inst(),
+ qCap.Inst(),
+ )
+ err = errors.Join(err, e)
+
+ return spansProcessed, reg, err
+}
+
// OnStart method does nothing.
-func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {}
+func (*batchSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
// OnEnd method enqueues a ReadOnlySpan for later processing.
func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) {
@@ -162,6 +242,9 @@ func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error {
case <-ctx.Done():
err = ctx.Err()
}
+ if bsp.selfObservabilityEnabled {
+ err = errors.Join(err, bsp.callbackRegistration.Unregister())
+ }
})
return err
}
@@ -171,7 +254,7 @@ type forceFlushSpan struct {
flushed chan struct{}
}
-func (f forceFlushSpan) SpanContext() trace.SpanContext {
+func (forceFlushSpan) SpanContext() trace.SpanContext {
return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled})
}
@@ -274,6 +357,11 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
if l := len(bsp.batch); l > 0 {
global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped))
+ if bsp.selfObservabilityEnabled {
+ bsp.spansProcessedCounter.Add(ctx, int64(l),
+ bsp.componentNameAttr,
+ bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor))
+ }
err := bsp.e.ExportSpans(ctx, bsp.batch)
// A new batch is always created after exporting, even if the batch failed to be exported.
@@ -382,11 +470,17 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R
case bsp.queue <- sd:
return true
case <-ctx.Done():
+ if bsp.selfObservabilityEnabled {
+ bsp.spansProcessedCounter.Add(ctx, 1,
+ bsp.componentNameAttr,
+ bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor),
+ bsp.spansProcessedCounter.AttrErrorType(queueFull))
+ }
return false
}
}
-func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool {
+func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool {
if !sd.SpanContext().IsSampled() {
return false
}
@@ -396,12 +490,18 @@ func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) b
return true
default:
atomic.AddUint32(&bsp.dropped, 1)
+ if bsp.selfObservabilityEnabled {
+ bsp.spansProcessedCounter.Add(ctx, 1,
+ bsp.componentNameAttr,
+ bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor),
+ bsp.spansProcessedCounter.AttrErrorType(queueFull))
+ }
}
return false
}
// MarshalLog is the marshaling function used by the logging system to represent this Span Processor.
-func (bsp *batchSpanProcessor) MarshalLog() interface{} {
+func (bsp *batchSpanProcessor) MarshalLog() any {
return struct {
Type string
SpanExporter SpanExporter
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
index 1f60524e3e..e58e7f6ed7 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
@@ -6,5 +6,8 @@ Package trace contains support for OpenTelemetry distributed tracing.
The following assumes a basic familiarity with OpenTelemetry concepts.
See https://opentelemetry.io.
+
+See [go.opentelemetry.io/otel/sdk/trace/internal/x] for information about
+the experimental features.
*/
package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
index c8d3fb7e3c..3649322a6e 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
@@ -32,7 +32,7 @@ type randomIDGenerator struct{}
var _ IDGenerator = &randomIDGenerator{}
// NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
-func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID {
+func (*randomIDGenerator) NewSpanID(context.Context, trace.TraceID) trace.SpanID {
sid := trace.SpanID{}
for {
binary.NativeEndian.PutUint64(sid[:], rand.Uint64())
@@ -45,7 +45,7 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace
// NewIDs returns a non-zero trace ID and a non-zero span ID from a
// randomly-chosen sequence.
-func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) {
+func (*randomIDGenerator) NewIDs(context.Context) (trace.TraceID, trace.SpanID) {
tid := trace.TraceID{}
sid := trace.SpanID{}
for {
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md
new file mode 100644
index 0000000000..feec16fa64
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md
@@ -0,0 +1,35 @@
+# Experimental Features
+
+The Trace SDK contains features that have not yet stabilized in the OpenTelemetry specification.
+These features are added to the OpenTelemetry Go Trace SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
+
+These features may change in backwards incompatible ways as feedback is applied.
+See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
+
+## Features
+
+- [Self-Observability](#self-observability)
+
+### Self-Observability
+
+The SDK provides a self-observability feature that allows you to monitor the SDK itself.
+
+To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`.
+
+When enabled, the SDK will create the following metrics using the global `MeterProvider`:
+
+- `otel.sdk.span.live`
+- `otel.sdk.span.started`
+
+Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics.
+
+[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md
+
+## Compatibility and Stability
+
+Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md).
+These features may be removed or modified in successive version releases, including patch versions.
+
+When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
+There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version.
+If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go
new file mode 100644
index 0000000000..2fcbbcc66e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go
@@ -0,0 +1,63 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package x documents experimental features for [go.opentelemetry.io/otel/sdk/trace].
+package x // import "go.opentelemetry.io/otel/sdk/trace/internal/x"
+
+import (
+ "os"
+ "strings"
+)
+
+// SelfObservability is an experimental feature flag that determines if SDK
+// self-observability metrics are enabled.
+//
+// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable
+// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
+// will also enable this).
+var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) {
+ if strings.EqualFold(v, "true") {
+ return v, true
+ }
+ return "", false
+})
+
+// Feature is an experimental feature control flag. It provides a uniform way
+// to interact with these feature flags and parse their values.
+type Feature[T any] struct {
+ key string
+ parse func(v string) (T, bool)
+}
+
+func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
+ const envKeyRoot = "OTEL_GO_X_"
+ return Feature[T]{
+ key: envKeyRoot + suffix,
+ parse: parse,
+ }
+}
+
+// Key returns the environment variable key that needs to be set to enable the
+// feature.
+func (f Feature[T]) Key() string { return f.key }
+
+// Lookup returns the user configured value for the feature and true if the
+// user has enabled the feature. Otherwise, if the feature is not enabled, a
+// zero-value and false are returned.
+func (f Feature[T]) Lookup() (v T, ok bool) {
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
+ //
+ // > The SDK MUST interpret an empty value of an environment variable the
+ // > same way as when the variable is unset.
+ vRaw := os.Getenv(f.key)
+ if vRaw == "" {
+ return v, ok
+ }
+ return f.parse(vRaw)
+}
+
+// Enabled reports whether the feature is enabled.
+func (f Feature[T]) Enabled() bool {
+ _, ok := f.Lookup()
+ return ok
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
index 0e2a2e7c60..37ce2ac876 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
@@ -5,14 +5,20 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
import (
"context"
+ "errors"
"fmt"
"sync"
"sync/atomic"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/sdk"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/resource"
+ "go.opentelemetry.io/otel/sdk/trace/internal/x"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+ "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
"go.opentelemetry.io/otel/trace/noop"
@@ -20,6 +26,7 @@ import (
const (
defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer"
+ selfObsScopeName = "go.opentelemetry.io/otel/sdk/trace"
)
// tracerProviderConfig.
@@ -45,7 +52,7 @@ type tracerProviderConfig struct {
}
// MarshalLog is the marshaling function used by the logging system to represent this Provider.
-func (cfg tracerProviderConfig) MarshalLog() interface{} {
+func (cfg tracerProviderConfig) MarshalLog() any {
return struct {
SpanProcessors []SpanProcessor
SamplerType string
@@ -156,8 +163,18 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
t, ok := p.namedTracer[is]
if !ok {
t = &tracer{
- provider: p,
- instrumentationScope: is,
+ provider: p,
+ instrumentationScope: is,
+ selfObservabilityEnabled: x.SelfObservability.Enabled(),
+ }
+ if t.selfObservabilityEnabled {
+ var err error
+ t.spanLiveMetric, t.spanStartedMetric, err = newInst()
+ if err != nil {
+ msg := "failed to create self-observability metrics for tracer: %w"
+ err := fmt.Errorf(msg, err)
+ otel.Handle(err)
+ }
}
p.namedTracer[is] = t
}
@@ -184,6 +201,23 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
return t
}
+func newInst() (otelconv.SDKSpanLive, otelconv.SDKSpanStarted, error) {
+ m := otel.GetMeterProvider().Meter(
+ selfObsScopeName,
+ metric.WithInstrumentationVersion(sdk.Version()),
+ metric.WithSchemaURL(semconv.SchemaURL),
+ )
+
+ var err error
+ spanLiveMetric, e := otelconv.NewSDKSpanLive(m)
+ err = errors.Join(err, e)
+
+ spanStartedMetric, e := otelconv.NewSDKSpanStarted(m)
+ err = errors.Join(err, e)
+
+ return spanLiveMetric, spanStartedMetric, err
+}
+
// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors.
func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) {
// This check prevents calls during a shutdown.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
index aa7b262d0d..689663d48b 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
@@ -110,14 +110,14 @@ func TraceIDRatioBased(fraction float64) Sampler {
type alwaysOnSampler struct{}
-func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult {
+func (alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult {
return SamplingResult{
Decision: RecordAndSample,
Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
}
}
-func (as alwaysOnSampler) Description() string {
+func (alwaysOnSampler) Description() string {
return "AlwaysOnSampler"
}
@@ -131,14 +131,14 @@ func AlwaysSample() Sampler {
type alwaysOffSampler struct{}
-func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult {
+func (alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult {
return SamplingResult{
Decision: Drop,
Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
}
}
-func (as alwaysOffSampler) Description() string {
+func (alwaysOffSampler) Description() string {
return "AlwaysOffSampler"
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
index 664e13e03f..411d9ccdd7 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
@@ -39,7 +39,7 @@ func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor {
}
// OnStart does nothing.
-func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
+func (*simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
// OnEnd immediately exports a ReadOnlySpan.
func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) {
@@ -104,13 +104,13 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error {
}
// ForceFlush does nothing as there is no data to flush.
-func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error {
+func (*simpleSpanProcessor) ForceFlush(context.Context) error {
return nil
}
// MarshalLog is the marshaling function used by the logging system to represent
// this Span Processor.
-func (ssp *simpleSpanProcessor) MarshalLog() interface{} {
+func (ssp *simpleSpanProcessor) MarshalLog() any {
return struct {
Type string
Exporter SpanExporter
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
index d511d0f271..63aa337800 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
@@ -35,7 +35,7 @@ type snapshot struct {
var _ ReadOnlySpan = snapshot{}
-func (s snapshot) private() {}
+func (snapshot) private() {}
// Name returns the name of the span.
func (s snapshot) Name() string {
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
index 1785a4bbb0..b376051fbb 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
@@ -20,7 +20,7 @@ import (
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/resource"
- semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)
@@ -61,6 +61,7 @@ type ReadOnlySpan interface {
InstrumentationScope() instrumentation.Scope
// InstrumentationLibrary returns information about the instrumentation
// library that created the span.
+ //
// Deprecated: please use InstrumentationScope instead.
InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility
// Resource returns information about the entity that produced the span.
@@ -165,7 +166,7 @@ func (s *recordingSpan) SpanContext() trace.SpanContext {
return s.spanContext
}
-// IsRecording returns if this span is being recorded. If this span has ended
+// IsRecording reports whether this span is being recorded. If this span has ended
// this will return false.
func (s *recordingSpan) IsRecording() bool {
if s == nil {
@@ -177,7 +178,7 @@ func (s *recordingSpan) IsRecording() bool {
return s.isRecording()
}
-// isRecording returns if this span is being recorded. If this span has ended
+// isRecording reports whether this span is being recorded. If this span has ended
// this will return false.
//
// This method assumes s.mu.Lock is held by the caller.
@@ -495,6 +496,16 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
}
s.mu.Unlock()
+ if s.tracer.selfObservabilityEnabled {
+ defer func() {
+ // Add the span to the context to ensure the metric is recorded
+ // with the correct span context.
+ ctx := trace.ContextWithSpan(context.Background(), s)
+ set := spanLiveSet(s.spanContext.IsSampled())
+ s.tracer.spanLiveMetric.AddSet(ctx, -1, set)
+ }()
+ }
+
sps := s.tracer.provider.getSpanProcessors()
if len(sps) == 0 {
return
@@ -545,7 +556,7 @@ func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) {
s.addEvent(semconv.ExceptionEventName, opts...)
}
-func typeStr(i interface{}) string {
+func typeStr(i any) string {
t := reflect.TypeOf(i)
if t.PkgPath() == "" && t.Name() == "" {
// Likely a builtin type.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
index 0b65ae9ab7..e965c4cce8 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
@@ -7,7 +7,9 @@ import (
"context"
"time"
+ "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)
@@ -17,6 +19,10 @@ type tracer struct {
provider *TracerProvider
instrumentationScope instrumentation.Scope
+
+ selfObservabilityEnabled bool
+ spanLiveMetric otelconv.SDKSpanLive
+ spanStartedMetric otelconv.SDKSpanStarted
}
var _ trace.Tracer = &tracer{}
@@ -46,17 +52,25 @@ func (tr *tracer) Start(
}
s := tr.newSpan(ctx, name, &config)
+ newCtx := trace.ContextWithSpan(ctx, s)
+ if tr.selfObservabilityEnabled {
+ psc := trace.SpanContextFromContext(ctx)
+ set := spanStartedSet(psc, s)
+ tr.spanStartedMetric.AddSet(newCtx, 1, set)
+ }
+
if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() {
sps := tr.provider.getSpanProcessors()
for _, sp := range sps {
+ // Use original context.
sp.sp.OnStart(ctx, rw)
}
}
if rtt, ok := s.(runtimeTracer); ok {
- ctx = rtt.runtimeTrace(ctx)
+ newCtx = rtt.runtimeTrace(newCtx)
}
- return trace.ContextWithSpan(ctx, s), s
+ return newCtx, s
}
type runtimeTracer interface {
@@ -112,11 +126,12 @@ func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanCo
if !isRecording(samplingResult) {
return tr.newNonRecordingSpan(sc)
}
- return tr.newRecordingSpan(psc, sc, name, samplingResult, config)
+ return tr.newRecordingSpan(ctx, psc, sc, name, samplingResult, config)
}
// newRecordingSpan returns a new configured recordingSpan.
func (tr *tracer) newRecordingSpan(
+ ctx context.Context,
psc, sc trace.SpanContext,
name string,
sr SamplingResult,
@@ -153,6 +168,14 @@ func (tr *tracer) newRecordingSpan(
s.SetAttributes(sr.Attributes...)
s.SetAttributes(config.Attributes()...)
+ if tr.selfObservabilityEnabled {
+ // Propagate any existing values from the context with the new span to
+ // the measurement context.
+ ctx = trace.ContextWithSpan(ctx, s)
+ set := spanLiveSet(s.spanContext.IsSampled())
+ tr.spanLiveMetric.AddSet(ctx, 1, set)
+ }
+
return s
}
@@ -160,3 +183,112 @@ func (tr *tracer) newRecordingSpan(
func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan {
return nonRecordingSpan{tracer: tr, sc: sc}
}
+
+type parentState int
+
+const (
+ parentStateNoParent parentState = iota
+ parentStateLocalParent
+ parentStateRemoteParent
+)
+
+type samplingState int
+
+const (
+ samplingStateDrop samplingState = iota
+ samplingStateRecordOnly
+ samplingStateRecordAndSample
+)
+
+type spanStartedSetKey struct {
+ parent parentState
+ sampling samplingState
+}
+
+var spanStartedSetCache = map[spanStartedSetKey]attribute.Set{
+ {parentStateNoParent, samplingStateDrop}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
+ ),
+ {parentStateLocalParent, samplingStateDrop}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
+ ),
+ {parentStateRemoteParent, samplingStateDrop}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
+ ),
+
+ {parentStateNoParent, samplingStateRecordOnly}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
+ ),
+ {parentStateLocalParent, samplingStateRecordOnly}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
+ ),
+ {parentStateRemoteParent, samplingStateRecordOnly}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
+ ),
+
+ {parentStateNoParent, samplingStateRecordAndSample}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
+ ),
+ {parentStateLocalParent, samplingStateRecordAndSample}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
+ ),
+ {parentStateRemoteParent, samplingStateRecordAndSample}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
+ ),
+}
+
+func spanStartedSet(psc trace.SpanContext, span trace.Span) attribute.Set {
+ key := spanStartedSetKey{
+ parent: parentStateNoParent,
+ sampling: samplingStateDrop,
+ }
+
+ if psc.IsValid() {
+ if psc.IsRemote() {
+ key.parent = parentStateRemoteParent
+ } else {
+ key.parent = parentStateLocalParent
+ }
+ }
+
+ if span.IsRecording() {
+ if span.SpanContext().IsSampled() {
+ key.sampling = samplingStateRecordAndSample
+ } else {
+ key.sampling = samplingStateRecordOnly
+ }
+ }
+
+ return spanStartedSetCache[key]
+}
+
+type spanLiveSetKey struct {
+ sampled bool
+}
+
+var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{
+ {true}: attribute.NewSet(
+ otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
+ otelconv.SpanSamplingResultRecordAndSample,
+ ),
+ ),
+ {false}: attribute.NewSet(
+ otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
+ otelconv.SpanSamplingResultRecordOnly,
+ ),
+ ),
+}
+
+func spanLiveSet(sampled bool) attribute.Set {
+ key := spanLiveSetKey{sampled: sampled}
+ return spanLiveSetCache[key]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
deleted file mode 100644
index b84dd2c5ee..0000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
-
-// version is the current release version of the metric SDK in use.
-func version() string {
- return "1.16.0-rc.1"
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go
index c0217af6b9..7f97cc31e5 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/version.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/version.go
@@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk"
// Version is the current release version of the OpenTelemetry SDK in use.
func Version() string {
- return "1.37.0"
+ return "1.38.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md
deleted file mode 100644
index 02b56115e3..0000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md
+++ /dev/null
@@ -1,4 +0,0 @@
-
-# Migration from v1.33.0 to v1.34.0
-
-The `go.opentelemetry.io/otel/semconv/v1.34.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.33.0`.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md
deleted file mode 100644
index fab06c9752..0000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Semconv v1.34.0
-
-[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.34.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md
new file mode 100644
index 0000000000..2480547895
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md
@@ -0,0 +1,41 @@
+
+# Migration from v1.36.0 to v1.37.0
+
+The `go.opentelemetry.io/otel/semconv/v1.37.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.36.0` with the following exceptions.
+
+## Removed
+
+The following declarations have been removed.
+Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions.
+
+If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use.
+If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case.
+
+- `ContainerRuntime`
+- `ContainerRuntimeKey`
+- `GenAIOpenAIRequestServiceTierAuto`
+- `GenAIOpenAIRequestServiceTierDefault`
+- `GenAIOpenAIRequestServiceTierKey`
+- `GenAIOpenAIResponseServiceTier`
+- `GenAIOpenAIResponseServiceTierKey`
+- `GenAIOpenAIResponseSystemFingerprint`
+- `GenAIOpenAIResponseSystemFingerprintKey`
+- `GenAISystemAWSBedrock`
+- `GenAISystemAnthropic`
+- `GenAISystemAzureAIInference`
+- `GenAISystemAzureAIOpenAI`
+- `GenAISystemCohere`
+- `GenAISystemDeepseek`
+- `GenAISystemGCPGemini`
+- `GenAISystemGCPGenAI`
+- `GenAISystemGCPVertexAI`
+- `GenAISystemGroq`
+- `GenAISystemIBMWatsonxAI`
+- `GenAISystemKey`
+- `GenAISystemMistralAI`
+- `GenAISystemOpenAI`
+- `GenAISystemPerplexity`
+- `GenAISystemXai`
+
+[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions
+[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md
new file mode 100644
index 0000000000..d795247f32
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.37.0
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.37.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go
similarity index 89%
rename from vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go
rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go
index 5b56662573..b6b27498f2 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go
@@ -3,7 +3,7 @@
// Code generated from semantic convention specification. DO NOT EDIT.
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0"
import "go.opentelemetry.io/otel/attribute"
@@ -28,7 +28,8 @@ const (
// AndroidOSAPILevelKey is the attribute Key conforming to the
// "android.os.api_level" semantic conventions. It represents the uniquely
// identifies the framework API revision offered by a version (`os.version`) of
- // the android operating system. More information can be found [here].
+ // the android operating system. More information can be found in the
+ // [Android API levels documentation].
//
// Type: string
// RequirementLevel: Recommended
@@ -36,16 +37,17 @@ const (
//
// Examples: "33", "32"
//
- // [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels
+ // [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels
AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
)
// AndroidOSAPILevel returns an attribute KeyValue conforming to the
// "android.os.api_level" semantic conventions. It represents the uniquely
// identifies the framework API revision offered by a version (`os.version`) of
-// the android operating system. More information can be found [here].
+// the android operating system. More information can be found in the
+// [Android API levels documentation].
//
-// [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels
+// [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels
func AndroidOSAPILevel(val string) attribute.KeyValue {
return AndroidOSAPILevelKey.String(val)
}
@@ -73,6 +75,18 @@ var (
// Namespace: app
const (
+ // AppBuildIDKey is the attribute Key conforming to the "app.build_id" semantic
+ // conventions. It represents the unique identifier for a particular build or
+ // compilation of the application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "6cff0a7e-cefc-4668-96f5-1273d8b334d0",
+ // "9f2b833506aa6973a92fde9733e6271f", "my-app-1.0.0-code-123"
+ AppBuildIDKey = attribute.Key("app.build_id")
+
// AppInstallationIDKey is the attribute Key conforming to the
// "app.installation.id" semantic conventions. It represents a unique identifier
// representing the installation of an application on a specific device.
@@ -106,16 +120,51 @@ const (
// - [App set ID].
// - [`Settings.getString(Settings.Secure.ANDROID_ID)`].
//
- // More information about Android identifier best practices can be found [here]
- // .
+ // More information about Android identifier best practices can be found in the
+ // [Android user data IDs guide].
//
// [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor
// [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations
// [App set ID]: https://developer.android.com/identity/app-set-id
// [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID
- // [here]: https://developer.android.com/training/articles/user-data-ids
+ // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids
AppInstallationIDKey = attribute.Key("app.installation.id")
+ // AppJankFrameCountKey is the attribute Key conforming to the
+ // "app.jank.frame_count" semantic conventions. It represents a number of frame
+ // renders that experienced jank.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 9, 42
+ // Note: Depending on platform limitations, the value provided MAY be
+ // approximation.
+ AppJankFrameCountKey = attribute.Key("app.jank.frame_count")
+
+ // AppJankPeriodKey is the attribute Key conforming to the "app.jank.period"
+ // semantic conventions. It represents the time period, in seconds, for which
+ // this jank is being reported.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0, 5.0, 10.24
+ AppJankPeriodKey = attribute.Key("app.jank.period")
+
+ // AppJankThresholdKey is the attribute Key conforming to the
+ // "app.jank.threshold" semantic conventions. It represents the minimum
+ // rendering threshold for this jank, in seconds.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0.016, 0.7, 1.024
+ AppJankThresholdKey = attribute.Key("app.jank.threshold")
+
// AppScreenCoordinateXKey is the attribute Key conforming to the
// "app.screen.coordinate.x" semantic conventions. It represents the x
// (horizontal) coordinate of a screen coordinate, in screen pixels.
@@ -164,6 +213,13 @@ const (
AppWidgetNameKey = attribute.Key("app.widget.name")
)
+// AppBuildID returns an attribute KeyValue conforming to the "app.build_id"
+// semantic conventions. It represents the unique identifier for a particular
+// build or compilation of the application.
+func AppBuildID(val string) attribute.KeyValue {
+ return AppBuildIDKey.String(val)
+}
+
// AppInstallationID returns an attribute KeyValue conforming to the
// "app.installation.id" semantic conventions. It represents a unique identifier
// representing the installation of an application on a specific device.
@@ -171,6 +227,27 @@ func AppInstallationID(val string) attribute.KeyValue {
return AppInstallationIDKey.String(val)
}
+// AppJankFrameCount returns an attribute KeyValue conforming to the
+// "app.jank.frame_count" semantic conventions. It represents a number of frame
+// renders that experienced jank.
+func AppJankFrameCount(val int) attribute.KeyValue {
+ return AppJankFrameCountKey.Int(val)
+}
+
+// AppJankPeriod returns an attribute KeyValue conforming to the
+// "app.jank.period" semantic conventions. It represents the time period, in
+// seconds, for which this jank is being reported.
+func AppJankPeriod(val float64) attribute.KeyValue {
+ return AppJankPeriodKey.Float64(val)
+}
+
+// AppJankThreshold returns an attribute KeyValue conforming to the
+// "app.jank.threshold" semantic conventions. It represents the minimum rendering
+// threshold for this jank, in seconds.
+func AppJankThreshold(val float64) attribute.KeyValue {
+ return AppJankThresholdKey.Float64(val)
+}
+
// AppScreenCoordinateX returns an attribute KeyValue conforming to the
// "app.screen.coordinate.x" semantic conventions. It represents the x
// (horizontal) coordinate of a screen coordinate, in screen pixels.
@@ -1525,59 +1602,14 @@ func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue {
// Enum values for aws.ecs.launchtype
var (
- // ec2
+ // Amazon EC2
// Stability: development
AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
- // fargate
+ // Amazon Fargate
// Stability: development
AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
)
-// Namespace: az
-const (
- // AzNamespaceKey is the attribute Key conforming to the "az.namespace" semantic
- // conventions. It represents the [Azure Resource Provider Namespace] as
- // recognized by the client.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus"
- //
- // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers
- AzNamespaceKey = attribute.Key("az.namespace")
-
- // AzServiceRequestIDKey is the attribute Key conforming to the
- // "az.service_request_id" semantic conventions. It represents the unique
- // identifier of the service request. It's generated by the Azure service and
- // returned with the response.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "00000000-0000-0000-0000-000000000000"
- AzServiceRequestIDKey = attribute.Key("az.service_request_id")
-)
-
-// AzNamespace returns an attribute KeyValue conforming to the "az.namespace"
-// semantic conventions. It represents the [Azure Resource Provider Namespace] as
-// recognized by the client.
-//
-// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers
-func AzNamespace(val string) attribute.KeyValue {
- return AzNamespaceKey.String(val)
-}
-
-// AzServiceRequestID returns an attribute KeyValue conforming to the
-// "az.service_request_id" semantic conventions. It represents the unique
-// identifier of the service request. It's generated by the Azure service and
-// returned with the response.
-func AzServiceRequestID(val string) attribute.KeyValue {
- return AzServiceRequestIDKey.String(val)
-}
-
// Namespace: azure
const (
// AzureClientIDKey is the attribute Key conforming to the "azure.client.id"
@@ -1665,6 +1697,31 @@ const (
//
// Examples: 1000, 1002
AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code")
+
+ // AzureResourceProviderNamespaceKey is the attribute Key conforming to the
+ // "azure.resource_provider.namespace" semantic conventions. It represents the
+ // [Azure Resource Provider Namespace] as recognized by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus"
+ //
+ // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers
+ AzureResourceProviderNamespaceKey = attribute.Key("azure.resource_provider.namespace")
+
+ // AzureServiceRequestIDKey is the attribute Key conforming to the
+ // "azure.service.request.id" semantic conventions. It represents the unique
+ // identifier of the service request. It's generated by the Azure service and
+ // returned with the response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "00000000-0000-0000-0000-000000000000"
+ AzureServiceRequestIDKey = attribute.Key("azure.service.request.id")
)
// AzureClientID returns an attribute KeyValue conforming to the
@@ -1705,6 +1762,23 @@ func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue {
return AzureCosmosDBResponseSubStatusCodeKey.Int(val)
}
+// AzureResourceProviderNamespace returns an attribute KeyValue conforming to the
+// "azure.resource_provider.namespace" semantic conventions. It represents the
+// [Azure Resource Provider Namespace] as recognized by the client.
+//
+// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers
+func AzureResourceProviderNamespace(val string) attribute.KeyValue {
+ return AzureResourceProviderNamespaceKey.String(val)
+}
+
+// AzureServiceRequestID returns an attribute KeyValue conforming to the
+// "azure.service.request.id" semantic conventions. It represents the unique
+// identifier of the service request. It's generated by the Azure service and
+// returned with the response.
+func AzureServiceRequestID(val string) attribute.KeyValue {
+ return AzureServiceRequestIDKey.String(val)
+}
+
// Enum values for azure.cosmosdb.connection.mode
var (
// Gateway (HTTP) connection.
@@ -1717,19 +1791,19 @@ var (
// Enum values for azure.cosmosdb.consistency.level
var (
- // strong
+ // Strong
// Stability: development
AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong")
- // bounded_staleness
+ // Bounded Staleness
// Stability: development
AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness")
- // session
+ // Session
// Stability: development
AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session")
- // eventual
+ // Eventual
// Stability: development
AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual")
- // consistent_prefix
+ // Consistent Prefix
// Stability: development
AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix")
)
@@ -1944,37 +2018,37 @@ func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
// Enum values for cassandra.consistency.level
var (
- // all
+ // All
// Stability: development
CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all")
- // each_quorum
+ // Each Quorum
// Stability: development
CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum")
- // quorum
+ // Quorum
// Stability: development
CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum")
- // local_quorum
+ // Local Quorum
// Stability: development
CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum")
- // one
+ // One
// Stability: development
CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one")
- // two
+ // Two
// Stability: development
CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two")
- // three
+ // Three
// Stability: development
CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three")
- // local_one
+ // Local One
// Stability: development
CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one")
- // any
+ // Any
// Stability: development
CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any")
- // serial
+ // Serial
// Stability: development
CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial")
- // local_serial
+ // Local Serial
// Stability: development
CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial")
)
@@ -2527,7 +2601,7 @@ const (
// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
// [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html
// [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names
- // [Fully Qualified Resource ID]: https://docs.microsoft.com/rest/api/resources/resources/get-by-id
+ // [Fully Qualified Resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id
CloudResourceIDKey = attribute.Key("cloud.resource_id")
)
@@ -2604,25 +2678,25 @@ var (
CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift")
// Azure Virtual Machines
// Stability: development
- CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure.vm")
// Azure Container Apps
// Stability: development
- CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps")
+ CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure.container_apps")
// Azure Container Instances
// Stability: development
- CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure.container_instances")
// Azure Kubernetes Service
// Stability: development
- CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure.aks")
// Azure Functions
// Stability: development
- CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure.functions")
// Azure App Service
// Stability: development
- CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure.app_service")
// Azure Red Hat OpenShift
// Stability: development
- CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure_openshift")
+ CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure.openshift")
// Google Bare Metal Solution (BMS)
// Stability: development
CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
@@ -3374,16 +3448,40 @@ const (
// Examples: "opentelemetry-autoconf"
ContainerNameKey = attribute.Key("container.name")
- // ContainerRuntimeKey is the attribute Key conforming to the
- // "container.runtime" semantic conventions. It represents the container runtime
- // managing this container.
+ // ContainerRuntimeDescriptionKey is the attribute Key conforming to the
+ // "container.runtime.description" semantic conventions. It represents a
+ // description about the runtime which could include, for example details about
+ // the CRI/API version being used or other customisations.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "docker://19.3.1 - CRI: 1.22.0"
+ ContainerRuntimeDescriptionKey = attribute.Key("container.runtime.description")
+
+ // ContainerRuntimeNameKey is the attribute Key conforming to the
+ // "container.runtime.name" semantic conventions. It represents the container
+ // runtime managing this container.
//
// Type: string
// RequirementLevel: Recommended
// Stability: Development
//
// Examples: "docker", "containerd", "rkt"
- ContainerRuntimeKey = attribute.Key("container.runtime")
+ ContainerRuntimeNameKey = attribute.Key("container.runtime.name")
+
+ // ContainerRuntimeVersionKey is the attribute Key conforming to the
+ // "container.runtime.version" semantic conventions. It represents the version
+ // of the runtime of this process, as returned by the runtime without
+ // modification.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0.0
+ ContainerRuntimeVersionKey = attribute.Key("container.runtime.version")
)
// ContainerCommand returns an attribute KeyValue conforming to the
@@ -3467,6 +3565,13 @@ func ContainerImageTags(val ...string) attribute.KeyValue {
return ContainerImageTagsKey.StringSlice(val)
}
+// ContainerLabel returns an attribute KeyValue conforming to the
+// "container.label" semantic conventions. It represents the container labels,
+// `` being the label name, the value being the label value.
+func ContainerLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("container.label."+key, val)
+}
+
// ContainerName returns an attribute KeyValue conforming to the "container.name"
// semantic conventions. It represents the container name used by container
// runtime.
@@ -3474,11 +3579,26 @@ func ContainerName(val string) attribute.KeyValue {
return ContainerNameKey.String(val)
}
-// ContainerRuntime returns an attribute KeyValue conforming to the
-// "container.runtime" semantic conventions. It represents the container runtime
-// managing this container.
-func ContainerRuntime(val string) attribute.KeyValue {
- return ContainerRuntimeKey.String(val)
+// ContainerRuntimeDescription returns an attribute KeyValue conforming to the
+// "container.runtime.description" semantic conventions. It represents a
+// description about the runtime which could include, for example details about
+// the CRI/API version being used or other customisations.
+func ContainerRuntimeDescription(val string) attribute.KeyValue {
+ return ContainerRuntimeDescriptionKey.String(val)
+}
+
+// ContainerRuntimeName returns an attribute KeyValue conforming to the
+// "container.runtime.name" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntimeName(val string) attribute.KeyValue {
+ return ContainerRuntimeNameKey.String(val)
+}
+
+// ContainerRuntimeVersion returns an attribute KeyValue conforming to the
+// "container.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without modification.
+func ContainerRuntimeVersion(val string) attribute.KeyValue {
+ return ContainerRuntimeVersionKey.String(val)
}
// Namespace: cpu
@@ -3514,28 +3634,28 @@ func CPULogicalNumber(val int) attribute.KeyValue {
// Enum values for cpu.mode
var (
- // user
+ // User
// Stability: development
CPUModeUser = CPUModeKey.String("user")
- // system
+ // System
// Stability: development
CPUModeSystem = CPUModeKey.String("system")
- // nice
+ // Nice
// Stability: development
CPUModeNice = CPUModeKey.String("nice")
- // idle
+ // Idle
// Stability: development
CPUModeIdle = CPUModeKey.String("idle")
- // iowait
+ // IO Wait
// Stability: development
CPUModeIOWait = CPUModeKey.String("iowait")
- // interrupt
+ // Interrupt
// Stability: development
CPUModeInterrupt = CPUModeKey.String("interrupt")
- // steal
+ // Steal
// Stability: development
CPUModeSteal = CPUModeKey.String("steal")
- // kernel
+ // Kernel
// Stability: development
CPUModeKernel = CPUModeKey.String("kernel")
)
@@ -3794,6 +3914,22 @@ func DBOperationName(val string) attribute.KeyValue {
return DBOperationNameKey.String(val)
}
+// DBOperationParameter returns an attribute KeyValue conforming to the
+// "db.operation.parameter" semantic conventions. It represents a database
+// operation parameter, with `` being the parameter name, and the attribute
+// value being a string representation of the parameter value.
+func DBOperationParameter(key string, val string) attribute.KeyValue {
+ return attribute.String("db.operation.parameter."+key, val)
+}
+
+// DBQueryParameter returns an attribute KeyValue conforming to the
+// "db.query.parameter" semantic conventions. It represents a database query
+// parameter, with `` being the parameter name, and the attribute value
+// being a string representation of the parameter value.
+func DBQueryParameter(key string, val string) attribute.KeyValue {
+ return attribute.String("db.query.parameter."+key, val)
+}
+
// DBQuerySummary returns an attribute KeyValue conforming to the
// "db.query.summary" semantic conventions. It represents the low cardinality
// summary of a database query.
@@ -4194,8 +4330,8 @@ const (
// Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be
// used as values.
//
- // More information about Android identifier best practices can be found [here]
- // .
+ // More information about Android identifier best practices can be found in the
+ // [Android user data IDs guide].
//
// > [!WARNING]> This attribute may contain sensitive (PII) information. Caution
// > should be taken when storing personal data or anything which can identify a
@@ -4210,7 +4346,7 @@ const (
// > opt-in feature.> See [`app.installation.id`]> for a more
// > privacy-preserving alternative.
//
- // [here]: https://developer.android.com/training/articles/user-data-ids
+ // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids
// [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id
DeviceIDKey = attribute.Key("device.id")
@@ -4308,6 +4444,17 @@ var (
// Namespace: dns
const (
+ // DNSAnswersKey is the attribute Key conforming to the "dns.answers" semantic
+ // conventions. It represents the list of IPv4 or IPv6 addresses resolved during
+ // DNS lookup.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "10.0.0.1", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
+ DNSAnswersKey = attribute.Key("dns.answers")
+
// DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name"
// semantic conventions. It represents the name being queried.
//
@@ -4323,6 +4470,13 @@ const (
DNSQuestionNameKey = attribute.Key("dns.question.name")
)
+// DNSAnswers returns an attribute KeyValue conforming to the "dns.answers"
+// semantic conventions. It represents the list of IPv4 or IPv6 addresses
+// resolved during DNS lookup.
+func DNSAnswers(val ...string) attribute.KeyValue {
+ return DNSAnswersKey.StringSlice(val)
+}
+
// DNSQuestionName returns an attribute KeyValue conforming to the
// "dns.question.name" semantic conventions. It represents the name being
// queried.
@@ -4941,7 +5095,7 @@ const (
//
// Type: string
// RequirementLevel: Recommended
- // Stability: Development
+ // Stability: Release_Candidate
//
// Examples: "5157782b-2203-4c80-a857-dbbd5e7761db"
FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id")
@@ -4951,7 +5105,7 @@ const (
//
// Type: string
// RequirementLevel: Recommended
- // Stability: Development
+ // Stability: Release_Candidate
//
// Examples: "logo-color"
FeatureFlagKeyKey = attribute.Key("feature_flag.key")
@@ -4962,7 +5116,7 @@ const (
//
// Type: string
// RequirementLevel: Recommended
- // Stability: Development
+ // Stability: Release_Candidate
//
// Examples: "Flag Manager"
FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name")
@@ -4973,7 +5127,7 @@ const (
//
// Type: Enum
// RequirementLevel: Recommended
- // Stability: Development
+ // Stability: Release_Candidate
//
// Examples: "static", "targeting_match", "error", "default"
FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason")
@@ -4984,7 +5138,7 @@ const (
//
// Type: any
// RequirementLevel: Recommended
- // Stability: Development
+ // Stability: Release_Candidate
//
// Examples: "#ff0000", true, 3
// Note: With some feature flag providers, feature flag results can be quite
@@ -5004,7 +5158,7 @@ const (
//
// Type: string
// RequirementLevel: Recommended
- // Stability: Development
+ // Stability: Release_Candidate
//
// Examples: "red", "true", "on"
// Note: A semantic identifier, commonly referred to as a variant, provides a
@@ -5020,7 +5174,7 @@ const (
//
// Type: string
// RequirementLevel: Recommended
- // Stability: Development
+ // Stability: Release_Candidate
//
// Examples: "proj-1", "ab98sgs", "service1/dev"
//
@@ -5034,7 +5188,7 @@ const (
//
// Type: string
// RequirementLevel: Recommended
- // Stability: Development
+ // Stability: Release_Candidate
//
// Examples: "1", "01ABCDEF"
FeatureFlagVersionKey = attribute.Key("feature_flag.version")
@@ -5088,34 +5242,34 @@ func FeatureFlagVersion(val string) attribute.KeyValue {
// Enum values for feature_flag.result.reason
var (
// The resolved value is static (no dynamic evaluation).
- // Stability: development
+ // Stability: release_candidate
FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static")
// The resolved value fell back to a pre-configured value (no dynamic evaluation
// occurred or dynamic evaluation yielded no result).
- // Stability: development
+ // Stability: release_candidate
FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default")
// The resolved value was the result of a dynamic evaluation, such as a rule or
// specific user-targeting.
- // Stability: development
+ // Stability: release_candidate
FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match")
// The resolved value was the result of pseudorandom assignment.
- // Stability: development
+ // Stability: release_candidate
FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split")
// The resolved value was retrieved from cache.
- // Stability: development
+ // Stability: release_candidate
FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached")
// The resolved value was the result of the flag being disabled in the
// management system.
- // Stability: development
+ // Stability: release_candidate
FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled")
// The reason for the resolved value could not be determined.
- // Stability: development
+ // Stability: release_candidate
FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown")
// The resolved value is non-authoritative or possibly out of date
- // Stability: development
+ // Stability: release_candidate
FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale")
// The resolved value was the result of an error.
- // Stability: development
+ // Stability: release_candidate
FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error")
)
@@ -5208,7 +5362,7 @@ const (
// RequirementLevel: Recommended
// Stability: Development
//
- // Examples: "Zone.Identifer"
+ // Examples: "Zone.Identifier"
// Note: On Linux, a resource fork is used to store additional data with a
// filesystem object. A file always has at least one fork for the data portion,
// and additional forks may exist.
@@ -5863,39 +6017,41 @@ const (
// `db.*`, to further identify and describe the data source.
GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id")
- // GenAIOpenAIRequestServiceTierKey is the attribute Key conforming to the
- // "gen_ai.openai.request.service_tier" semantic conventions. It represents the
- // service tier requested. May be a specific tier, default, or auto.
+ // GenAIInputMessagesKey is the attribute Key conforming to the
+ // "gen_ai.input.messages" semantic conventions. It represents the chat history
+ // provided to the model as an input.
//
- // Type: Enum
+ // Type: any
// RequirementLevel: Recommended
// Stability: Development
//
- // Examples: "auto", "default"
- GenAIOpenAIRequestServiceTierKey = attribute.Key("gen_ai.openai.request.service_tier")
-
- // GenAIOpenAIResponseServiceTierKey is the attribute Key conforming to the
- // "gen_ai.openai.response.service_tier" semantic conventions. It represents the
- // service tier used for the response.
+ // Examples: "[\n {\n "role": "user",\n "parts": [\n {\n "type": "text",\n
+ // "content": "Weather in Paris?"\n }\n ]\n },\n {\n "role": "assistant",\n
+ // "parts": [\n {\n "type": "tool_call",\n "id":
+ // "call_VSPygqKTWdrhaFErNvMV18Yl",\n "name": "get_weather",\n "arguments": {\n
+ // "location": "Paris"\n }\n }\n ]\n },\n {\n "role": "tool",\n "parts": [\n {\n
+ // "type": "tool_call_response",\n "id": " call_VSPygqKTWdrhaFErNvMV18Yl",\n
+ // "result": "rainy, 57°F"\n }\n ]\n }\n]\n"
+ // Note: Instrumentations MUST follow [Input messages JSON schema].
+ // When the attribute is recorded on events, it MUST be recorded in structured
+ // form. When recorded on spans, it MAY be recorded as a JSON string if
+ // structured
+ // format is not supported and SHOULD be recorded in structured form otherwise.
//
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
+ // Messages MUST be provided in the order they were sent to the model.
+ // Instrumentations MAY provide a way for users to filter or truncate
+ // input messages.
//
- // Examples: "scale", "default"
- GenAIOpenAIResponseServiceTierKey = attribute.Key("gen_ai.openai.response.service_tier")
-
- // GenAIOpenAIResponseSystemFingerprintKey is the attribute Key conforming to
- // the "gen_ai.openai.response.system_fingerprint" semantic conventions. It
- // represents a fingerprint to track any eventual change in the Generative AI
- // environment.
+ // > [!Warning]
+ // > This attribute is likely to contain sensitive information including
+ // > user/PII data.
//
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
+ // See [Recording content on attributes]
+ // section for more details.
//
- // Examples: "fp_44709d6fcb"
- GenAIOpenAIResponseSystemFingerprintKey = attribute.Key("gen_ai.openai.response.system_fingerprint")
+ // [Input messages JSON schema]: /docs/gen-ai/gen-ai-input-messages.json
+ // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes
+ GenAIInputMessagesKey = attribute.Key("gen_ai.input.messages")
// GenAIOperationNameKey is the attribute Key conforming to the
// "gen_ai.operation.name" semantic conventions. It represents the name of the
@@ -5913,6 +6069,44 @@ const (
// libraries SHOULD use applicable predefined value.
GenAIOperationNameKey = attribute.Key("gen_ai.operation.name")
+ // GenAIOutputMessagesKey is the attribute Key conforming to the
+ // "gen_ai.output.messages" semantic conventions. It represents the messages
+ // returned by the model where each message represents a specific model response
+ // (choice, candidate).
+ //
+ // Type: any
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "[\n {\n "role": "assistant",\n "parts": [\n {\n "type": "text",\n
+ // "content": "The weather in Paris is currently rainy with a temperature of
+ // 57°F."\n }\n ],\n "finish_reason": "stop"\n }\n]\n"
+ // Note: Instrumentations MUST follow [Output messages JSON schema]
+ //
+ // Each message represents a single output choice/candidate generated by
+ // the model. Each message corresponds to exactly one generation
+ // (choice/candidate) and vice versa - one choice cannot be split across
+ // multiple messages or one message cannot contain parts from multiple choices.
+ //
+ // When the attribute is recorded on events, it MUST be recorded in structured
+ // form. When recorded on spans, it MAY be recorded as a JSON string if
+ // structured
+ // format is not supported and SHOULD be recorded in structured form otherwise.
+ //
+ // Instrumentations MAY provide a way for users to filter or truncate
+ // output messages.
+ //
+ // > [!Warning]
+ // > This attribute is likely to contain sensitive information including
+ // > user/PII data.
+ //
+ // See [Recording content on attributes]
+ // section for more details.
+ //
+ // [Output messages JSON schema]: /docs/gen-ai/gen-ai-output-messages.json
+ // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes
+ GenAIOutputMessagesKey = attribute.Key("gen_ai.output.messages")
+
// GenAIOutputTypeKey is the attribute Key conforming to the
// "gen_ai.output.type" semantic conventions. It represents the represents the
// content type requested by the client.
@@ -5931,6 +6125,35 @@ const (
// `gen_ai.output.{type}.*` attributes.
GenAIOutputTypeKey = attribute.Key("gen_ai.output.type")
+ // GenAIProviderNameKey is the attribute Key conforming to the
+ // "gen_ai.provider.name" semantic conventions. It represents the Generative AI
+ // provider as identified by the client or server instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The attribute SHOULD be set based on the instrumentation's best
+ // knowledge and may differ from the actual model provider.
+ //
+ // Multiple providers, including Azure OpenAI, Gemini, and AI hosting platforms
+ // are accessible using the OpenAI REST API and corresponding client libraries,
+ // but may proxy or host models from different providers.
+ //
+ // The `gen_ai.request.model`, `gen_ai.response.model`, and `server.address`
+ // attributes may help identify the actual system in use.
+ //
+ // The `gen_ai.provider.name` attribute acts as a discriminator that
+ // identifies the GenAI telemetry format flavor specific to that provider
+ // within GenAI semantic conventions.
+ // It SHOULD be set consistently with provider-specific attributes and signals.
+ // For example, GenAI spans, metrics, and events related to AWS Bedrock
+ // should have the `gen_ai.provider.name` set to `aws.bedrock` and include
+ // applicable `aws.bedrock.*` attributes and are not expected to include
+ // `openai.*` attributes.
+ GenAIProviderNameKey = attribute.Key("gen_ai.provider.name")
+
// GenAIRequestChoiceCountKey is the attribute Key conforming to the
// "gen_ai.request.choice.count" semantic conventions. It represents the target
// number of candidate completions to return.
@@ -6088,31 +6311,44 @@ const (
// Examples: "gpt-4-0613"
GenAIResponseModelKey = attribute.Key("gen_ai.response.model")
- // GenAISystemKey is the attribute Key conforming to the "gen_ai.system"
- // semantic conventions. It represents the Generative AI product as identified
- // by the client or server instrumentation.
+ // GenAISystemInstructionsKey is the attribute Key conforming to the
+ // "gen_ai.system_instructions" semantic conventions. It represents the system
+ // message or instructions provided to the GenAI model separately from the chat
+ // history.
//
- // Type: Enum
+ // Type: any
// RequirementLevel: Recommended
// Stability: Development
//
- // Examples: openai
- // Note: The `gen_ai.system` describes a family of GenAI models with specific
- // model identified
- // by `gen_ai.request.model` and `gen_ai.response.model` attributes.
+ // Examples: "[\n {\n "type": "text",\n "content": "You are an Agent that greet
+ // users, always use greetings tool to respond"\n }\n]\n", "[\n {\n "type":
+ // "text",\n "content": "You are a language translator."\n },\n {\n "type":
+ // "text",\n "content": "Your mission is to translate text in English to
+ // French."\n }\n]\n"
+ // Note: This attribute SHOULD be used when the corresponding provider or API
+ // allows to provide system instructions or messages separately from the
+ // chat history.
//
- // The actual GenAI product may differ from the one identified by the client.
- // Multiple systems, including Azure OpenAI and Gemini, are accessible by OpenAI
- // client
- // libraries. In such cases, the `gen_ai.system` is set to `openai` based on the
- // instrumentation's best knowledge, instead of the actual system. The
- // `server.address`
- // attribute may help identify the actual system in use for `openai`.
+ // Instructions that are part of the chat history SHOULD be recorded in
+ // `gen_ai.input.messages` attribute instead.
//
- // For custom model, a custom friendly name SHOULD be used.
- // If none of these options apply, the `gen_ai.system` SHOULD be set to `_OTHER`
- // .
- GenAISystemKey = attribute.Key("gen_ai.system")
+ // Instrumentations MUST follow [System instructions JSON schema].
+ //
+ // When recorded on spans, it MAY be recorded as a JSON string if structured
+ // format is not supported and SHOULD be recorded in structured form otherwise.
+ //
+ // Instrumentations MAY provide a way for users to filter or truncate
+ // system instructions.
+ //
+ // > [!Warning]
+ // > This attribute may contain sensitive information.
+ //
+ // See [Recording content on attributes]
+ // section for more details.
+ //
+ // [System instructions JSON schema]: /docs/gen-ai/gen-ai-system-instructions.json
+ // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes
+ GenAISystemInstructionsKey = attribute.Key("gen_ai.system_instructions")
// GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type"
// semantic conventions. It represents the type of token being counted.
@@ -6237,21 +6473,6 @@ func GenAIDataSourceID(val string) attribute.KeyValue {
return GenAIDataSourceIDKey.String(val)
}
-// GenAIOpenAIResponseServiceTier returns an attribute KeyValue conforming to the
-// "gen_ai.openai.response.service_tier" semantic conventions. It represents the
-// service tier used for the response.
-func GenAIOpenAIResponseServiceTier(val string) attribute.KeyValue {
- return GenAIOpenAIResponseServiceTierKey.String(val)
-}
-
-// GenAIOpenAIResponseSystemFingerprint returns an attribute KeyValue conforming
-// to the "gen_ai.openai.response.system_fingerprint" semantic conventions. It
-// represents a fingerprint to track any eventual change in the Generative AI
-// environment.
-func GenAIOpenAIResponseSystemFingerprint(val string) attribute.KeyValue {
- return GenAIOpenAIResponseSystemFingerprintKey.String(val)
-}
-
// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the
// "gen_ai.request.choice.count" semantic conventions. It represents the target
// number of candidate completions to return.
@@ -6393,16 +6614,6 @@ func GenAIUsageOutputTokens(val int) attribute.KeyValue {
return GenAIUsageOutputTokensKey.Int(val)
}
-// Enum values for gen_ai.openai.request.service_tier
-var (
- // The system will utilize scale tier credits until they are exhausted.
- // Stability: development
- GenAIOpenAIRequestServiceTierAuto = GenAIOpenAIRequestServiceTierKey.String("auto")
- // The system will utilize the default scale tier.
- // Stability: development
- GenAIOpenAIRequestServiceTierDefault = GenAIOpenAIRequestServiceTierKey.String("default")
-)
-
// Enum values for gen_ai.operation.name
var (
// Chat completion operation such as [OpenAI Chat API]
@@ -6452,57 +6663,79 @@ var (
GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech")
)
-// Enum values for gen_ai.system
+// Enum values for gen_ai.provider.name
var (
- // OpenAI
+ // [OpenAI]
// Stability: development
- GenAISystemOpenAI = GenAISystemKey.String("openai")
+ //
+ // [OpenAI]: https://openai.com/
+ GenAIProviderNameOpenAI = GenAIProviderNameKey.String("openai")
// Any Google generative AI endpoint
// Stability: development
- GenAISystemGCPGenAI = GenAISystemKey.String("gcp.gen_ai")
- // Vertex AI
+ GenAIProviderNameGCPGenAI = GenAIProviderNameKey.String("gcp.gen_ai")
+ // [Vertex AI]
// Stability: development
- GenAISystemGCPVertexAI = GenAISystemKey.String("gcp.vertex_ai")
- // Gemini
+ //
+ // [Vertex AI]: https://cloud.google.com/vertex-ai
+ GenAIProviderNameGCPVertexAI = GenAIProviderNameKey.String("gcp.vertex_ai")
+ // [Gemini]
// Stability: development
- GenAISystemGCPGemini = GenAISystemKey.String("gcp.gemini")
- // Deprecated: Use 'gcp.vertex_ai' instead.
- GenAISystemVertexAI = GenAISystemKey.String("vertex_ai")
- // Deprecated: Use 'gcp.gemini' instead.
- GenAISystemGemini = GenAISystemKey.String("gemini")
- // Anthropic
+ //
+ // [Gemini]: https://cloud.google.com/products/gemini
+ GenAIProviderNameGCPGemini = GenAIProviderNameKey.String("gcp.gemini")
+ // [Anthropic]
// Stability: development
- GenAISystemAnthropic = GenAISystemKey.String("anthropic")
- // Cohere
+ //
+ // [Anthropic]: https://www.anthropic.com/
+ GenAIProviderNameAnthropic = GenAIProviderNameKey.String("anthropic")
+ // [Cohere]
// Stability: development
- GenAISystemCohere = GenAISystemKey.String("cohere")
+ //
+ // [Cohere]: https://cohere.com/
+ GenAIProviderNameCohere = GenAIProviderNameKey.String("cohere")
// Azure AI Inference
// Stability: development
- GenAISystemAzAIInference = GenAISystemKey.String("az.ai.inference")
- // Azure OpenAI
+ GenAIProviderNameAzureAIInference = GenAIProviderNameKey.String("azure.ai.inference")
+ // [Azure OpenAI]
// Stability: development
- GenAISystemAzAIOpenAI = GenAISystemKey.String("az.ai.openai")
- // IBM Watsonx AI
+ //
+ // [Azure OpenAI]: https://azure.microsoft.com/products/ai-services/openai-service/
+ GenAIProviderNameAzureAIOpenAI = GenAIProviderNameKey.String("azure.ai.openai")
+ // [IBM Watsonx AI]
// Stability: development
- GenAISystemIBMWatsonxAI = GenAISystemKey.String("ibm.watsonx.ai")
- // AWS Bedrock
+ //
+ // [IBM Watsonx AI]: https://www.ibm.com/products/watsonx-ai
+ GenAIProviderNameIBMWatsonxAI = GenAIProviderNameKey.String("ibm.watsonx.ai")
+ // [AWS Bedrock]
// Stability: development
- GenAISystemAWSBedrock = GenAISystemKey.String("aws.bedrock")
- // Perplexity
+ //
+ // [AWS Bedrock]: https://aws.amazon.com/bedrock
+ GenAIProviderNameAWSBedrock = GenAIProviderNameKey.String("aws.bedrock")
+ // [Perplexity]
// Stability: development
- GenAISystemPerplexity = GenAISystemKey.String("perplexity")
- // xAI
+ //
+ // [Perplexity]: https://www.perplexity.ai/
+ GenAIProviderNamePerplexity = GenAIProviderNameKey.String("perplexity")
+ // [xAI]
// Stability: development
- GenAISystemXai = GenAISystemKey.String("xai")
- // DeepSeek
+ //
+ // [xAI]: https://x.ai/
+ GenAIProviderNameXAI = GenAIProviderNameKey.String("x_ai")
+ // [DeepSeek]
// Stability: development
- GenAISystemDeepseek = GenAISystemKey.String("deepseek")
- // Groq
+ //
+ // [DeepSeek]: https://www.deepseek.com/
+ GenAIProviderNameDeepseek = GenAIProviderNameKey.String("deepseek")
+ // [Groq]
// Stability: development
- GenAISystemGroq = GenAISystemKey.String("groq")
- // Mistral AI
+ //
+ // [Groq]: https://groq.com/
+ GenAIProviderNameGroq = GenAIProviderNameKey.String("groq")
+ // [Mistral AI]
// Stability: development
- GenAISystemMistralAI = GenAISystemKey.String("mistral_ai")
+ //
+ // [Mistral AI]: https://mistral.ai/
+ GenAIProviderNameMistralAI = GenAIProviderNameKey.String("mistral_ai")
)
// Enum values for gen_ai.token.type
@@ -6510,8 +6743,6 @@ var (
// Input tokens (prompt, input, etc.)
// Stability: development
GenAITokenTypeInput = GenAITokenTypeKey.String("input")
- // Deprecated: Replaced by `output`.
- GenAITokenTypeCompletion = GenAITokenTypeKey.String("output")
// Output tokens (completion, response, etc.)
// Stability: development
GenAITokenTypeOutput = GenAITokenTypeKey.String("output")
@@ -7312,6 +7543,14 @@ func HTTPRequestBodySize(val int) attribute.KeyValue {
return HTTPRequestBodySizeKey.Int(val)
}
+// HTTPRequestHeader returns an attribute KeyValue conforming to the
+// "http.request.header" semantic conventions. It represents the HTTP request
+// headers, `` being the normalized HTTP Header name (lowercase), the value
+// being the header values.
+func HTTPRequestHeader(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("http.request.header."+key, val)
+}
+
// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
// "http.request.method_original" semantic conventions. It represents the
// original HTTP method sent by the client in the request line.
@@ -7347,6 +7586,14 @@ func HTTPResponseBodySize(val int) attribute.KeyValue {
return HTTPResponseBodySizeKey.Int(val)
}
+// HTTPResponseHeader returns an attribute KeyValue conforming to the
+// "http.response.header" semantic conventions. It represents the HTTP response
+// headers, `` being the normalized HTTP Header name (lowercase), the value
+// being the header values.
+func HTTPResponseHeader(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("http.response.header."+key, val)
+}
+
// HTTPResponseSize returns an attribute KeyValue conforming to the
// "http.response.size" semantic conventions. It represents the total size of the
// response in bytes. This should be the total number of bytes sent over the
@@ -7418,64 +7665,352 @@ var (
// Namespace: hw
const (
- // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions.
- // It represents an identifier for the hardware component, unique within the
- // monitored host.
+ // HwBatteryCapacityKey is the attribute Key conforming to the
+ // "hw.battery.capacity" semantic conventions. It represents the design capacity
+ // in Watts-hours or Amper-hours.
//
// Type: string
// RequirementLevel: Recommended
// Stability: Development
//
- // Examples: "win32battery_battery_testsysa33_1"
- HwIDKey = attribute.Key("hw.id")
+ // Examples: "9.3Ah", "50Wh"
+ HwBatteryCapacityKey = attribute.Key("hw.battery.capacity")
- // HwNameKey is the attribute Key conforming to the "hw.name" semantic
- // conventions. It represents an easily-recognizable name for the hardware
+ // HwBatteryChemistryKey is the attribute Key conforming to the
+ // "hw.battery.chemistry" semantic conventions. It represents the battery
+ // [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Li-ion", "NiMH"
+ //
+ // [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html
+ HwBatteryChemistryKey = attribute.Key("hw.battery.chemistry")
+
+ // HwBatteryStateKey is the attribute Key conforming to the "hw.battery.state"
+ // semantic conventions. It represents the current state of the battery.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwBatteryStateKey = attribute.Key("hw.battery.state")
+
+ // HwBiosVersionKey is the attribute Key conforming to the "hw.bios_version"
+ // semantic conventions. It represents the BIOS version of the hardware
// component.
//
// Type: string
// RequirementLevel: Recommended
// Stability: Development
//
- // Examples: "eth0"
- HwNameKey = attribute.Key("hw.name")
+ // Examples: "1.2.3"
+ HwBiosVersionKey = attribute.Key("hw.bios_version")
- // HwParentKey is the attribute Key conforming to the "hw.parent" semantic
- // conventions. It represents the unique identifier of the parent component
- // (typically the `hw.id` attribute of the enclosure, or disk controller).
+ // HwDriverVersionKey is the attribute Key conforming to the "hw.driver_version"
+ // semantic conventions. It represents the driver version for the hardware
+ // component.
//
// Type: string
// RequirementLevel: Recommended
// Stability: Development
//
- // Examples: "dellStorage_perc_0"
- HwParentKey = attribute.Key("hw.parent")
+ // Examples: "10.2.1-3"
+ HwDriverVersionKey = attribute.Key("hw.driver_version")
- // HwStateKey is the attribute Key conforming to the "hw.state" semantic
- // conventions. It represents the current state of the component.
+ // HwEnclosureTypeKey is the attribute Key conforming to the "hw.enclosure.type"
+ // semantic conventions. It represents the type of the enclosure (useful for
+ // modular systems).
//
- // Type: Enum
+ // Type: string
// RequirementLevel: Recommended
// Stability: Development
//
- // Examples:
- HwStateKey = attribute.Key("hw.state")
+ // Examples: "Computer", "Storage", "Switch"
+ HwEnclosureTypeKey = attribute.Key("hw.enclosure.type")
- // HwTypeKey is the attribute Key conforming to the "hw.type" semantic
- // conventions. It represents the type of the component.
+ // HwFirmwareVersionKey is the attribute Key conforming to the
+ // "hw.firmware_version" semantic conventions. It represents the firmware
+ // version of the hardware component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2.0.1"
+ HwFirmwareVersionKey = attribute.Key("hw.firmware_version")
+
+ // HwGpuTaskKey is the attribute Key conforming to the "hw.gpu.task" semantic
+ // conventions. It represents the type of task the GPU is performing.
//
// Type: Enum
// RequirementLevel: Recommended
// Stability: Development
//
// Examples:
- // Note: Describes the category of the hardware component for which `hw.state`
+ HwGpuTaskKey = attribute.Key("hw.gpu.task")
+
+ // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions.
+ // It represents an identifier for the hardware component, unique within the
+ // monitored host.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "win32battery_battery_testsysa33_1"
+ HwIDKey = attribute.Key("hw.id")
+
+ // HwLimitTypeKey is the attribute Key conforming to the "hw.limit_type"
+ // semantic conventions. It represents the type of limit for hardware
+ // components.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwLimitTypeKey = attribute.Key("hw.limit_type")
+
+ // HwLogicalDiskRaidLevelKey is the attribute Key conforming to the
+ // "hw.logical_disk.raid_level" semantic conventions. It represents the RAID
+ // Level of the logical disk.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "RAID0+1", "RAID5", "RAID10"
+ HwLogicalDiskRaidLevelKey = attribute.Key("hw.logical_disk.raid_level")
+
+ // HwLogicalDiskStateKey is the attribute Key conforming to the
+ // "hw.logical_disk.state" semantic conventions. It represents the state of the
+ // logical disk space usage.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwLogicalDiskStateKey = attribute.Key("hw.logical_disk.state")
+
+ // HwMemoryTypeKey is the attribute Key conforming to the "hw.memory.type"
+ // semantic conventions. It represents the type of the memory module.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "DDR4", "DDR5", "LPDDR5"
+ HwMemoryTypeKey = attribute.Key("hw.memory.type")
+
+ // HwModelKey is the attribute Key conforming to the "hw.model" semantic
+ // conventions. It represents the descriptive model name of the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "PERC H740P", "Intel(R) Core(TM) i7-10700K", "Dell XPS 15 Battery"
+ HwModelKey = attribute.Key("hw.model")
+
+ // HwNameKey is the attribute Key conforming to the "hw.name" semantic
+ // conventions. It represents an easily-recognizable name for the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "eth0"
+ HwNameKey = attribute.Key("hw.name")
+
+ // HwNetworkLogicalAddressesKey is the attribute Key conforming to the
+ // "hw.network.logical_addresses" semantic conventions. It represents the
+ // logical addresses of the adapter (e.g. IP address, or WWPN).
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "172.16.8.21", "57.11.193.42"
+ HwNetworkLogicalAddressesKey = attribute.Key("hw.network.logical_addresses")
+
+ // HwNetworkPhysicalAddressKey is the attribute Key conforming to the
+ // "hw.network.physical_address" semantic conventions. It represents the
+ // physical address of the adapter (e.g. MAC address, or WWNN).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "00-90-F5-E9-7B-36"
+ HwNetworkPhysicalAddressKey = attribute.Key("hw.network.physical_address")
+
+ // HwParentKey is the attribute Key conforming to the "hw.parent" semantic
+ // conventions. It represents the unique identifier of the parent component
+ // (typically the `hw.id` attribute of the enclosure, or disk controller).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "dellStorage_perc_0"
+ HwParentKey = attribute.Key("hw.parent")
+
+ // HwPhysicalDiskSmartAttributeKey is the attribute Key conforming to the
+ // "hw.physical_disk.smart_attribute" semantic conventions. It represents the
+ // [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute
+ // of the physical disk.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Spin Retry Count", "Seek Error Rate", "Raw Read Error Rate"
+ //
+ // [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T.
+ HwPhysicalDiskSmartAttributeKey = attribute.Key("hw.physical_disk.smart_attribute")
+
+ // HwPhysicalDiskStateKey is the attribute Key conforming to the
+ // "hw.physical_disk.state" semantic conventions. It represents the state of the
+ // physical disk endurance utilization.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwPhysicalDiskStateKey = attribute.Key("hw.physical_disk.state")
+
+ // HwPhysicalDiskTypeKey is the attribute Key conforming to the
+ // "hw.physical_disk.type" semantic conventions. It represents the type of the
+ // physical disk.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "HDD", "SSD", "10K"
+ HwPhysicalDiskTypeKey = attribute.Key("hw.physical_disk.type")
+
+ // HwSensorLocationKey is the attribute Key conforming to the
+ // "hw.sensor_location" semantic conventions. It represents the location of the
+ // sensor.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cpu0", "ps1", "INLET", "CPU0_DIE", "AMBIENT", "MOTHERBOARD", "PS0
+ // V3_3", "MAIN_12V", "CPU_VCORE"
+ HwSensorLocationKey = attribute.Key("hw.sensor_location")
+
+ // HwSerialNumberKey is the attribute Key conforming to the "hw.serial_number"
+ // semantic conventions. It represents the serial number of the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CNFCP0123456789"
+ HwSerialNumberKey = attribute.Key("hw.serial_number")
+
+ // HwStateKey is the attribute Key conforming to the "hw.state" semantic
+ // conventions. It represents the current state of the component.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwStateKey = attribute.Key("hw.state")
+
+ // HwTapeDriveOperationTypeKey is the attribute Key conforming to the
+ // "hw.tape_drive.operation_type" semantic conventions. It represents the type
+ // of tape drive operation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwTapeDriveOperationTypeKey = attribute.Key("hw.tape_drive.operation_type")
+
+ // HwTypeKey is the attribute Key conforming to the "hw.type" semantic
+ // conventions. It represents the type of the component.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: Describes the category of the hardware component for which `hw.state`
// is being reported. For example, `hw.type=temperature` along with
// `hw.state=degraded` would indicate that the temperature of the hardware
// component has been reported as `degraded`.
HwTypeKey = attribute.Key("hw.type")
+
+ // HwVendorKey is the attribute Key conforming to the "hw.vendor" semantic
+ // conventions. It represents the vendor name of the hardware component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Dell", "HP", "Intel", "AMD", "LSI", "Lenovo"
+ HwVendorKey = attribute.Key("hw.vendor")
)
+// HwBatteryCapacity returns an attribute KeyValue conforming to the
+// "hw.battery.capacity" semantic conventions. It represents the design capacity
+// in Watts-hours or Amper-hours.
+func HwBatteryCapacity(val string) attribute.KeyValue {
+ return HwBatteryCapacityKey.String(val)
+}
+
+// HwBatteryChemistry returns an attribute KeyValue conforming to the
+// "hw.battery.chemistry" semantic conventions. It represents the battery
+// [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc.
+//
+// [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html
+func HwBatteryChemistry(val string) attribute.KeyValue {
+ return HwBatteryChemistryKey.String(val)
+}
+
+// HwBiosVersion returns an attribute KeyValue conforming to the
+// "hw.bios_version" semantic conventions. It represents the BIOS version of the
+// hardware component.
+func HwBiosVersion(val string) attribute.KeyValue {
+ return HwBiosVersionKey.String(val)
+}
+
+// HwDriverVersion returns an attribute KeyValue conforming to the
+// "hw.driver_version" semantic conventions. It represents the driver version for
+// the hardware component.
+func HwDriverVersion(val string) attribute.KeyValue {
+ return HwDriverVersionKey.String(val)
+}
+
+// HwEnclosureType returns an attribute KeyValue conforming to the
+// "hw.enclosure.type" semantic conventions. It represents the type of the
+// enclosure (useful for modular systems).
+func HwEnclosureType(val string) attribute.KeyValue {
+ return HwEnclosureTypeKey.String(val)
+}
+
+// HwFirmwareVersion returns an attribute KeyValue conforming to the
+// "hw.firmware_version" semantic conventions. It represents the firmware version
+// of the hardware component.
+func HwFirmwareVersion(val string) attribute.KeyValue {
+ return HwFirmwareVersionKey.String(val)
+}
+
// HwID returns an attribute KeyValue conforming to the "hw.id" semantic
// conventions. It represents an identifier for the hardware component, unique
// within the monitored host.
@@ -7483,6 +8018,26 @@ func HwID(val string) attribute.KeyValue {
return HwIDKey.String(val)
}
+// HwLogicalDiskRaidLevel returns an attribute KeyValue conforming to the
+// "hw.logical_disk.raid_level" semantic conventions. It represents the RAID
+// Level of the logical disk.
+func HwLogicalDiskRaidLevel(val string) attribute.KeyValue {
+ return HwLogicalDiskRaidLevelKey.String(val)
+}
+
+// HwMemoryType returns an attribute KeyValue conforming to the "hw.memory.type"
+// semantic conventions. It represents the type of the memory module.
+func HwMemoryType(val string) attribute.KeyValue {
+ return HwMemoryTypeKey.String(val)
+}
+
+// HwModel returns an attribute KeyValue conforming to the "hw.model" semantic
+// conventions. It represents the descriptive model name of the hardware
+// component.
+func HwModel(val string) attribute.KeyValue {
+ return HwModelKey.String(val)
+}
+
// HwName returns an attribute KeyValue conforming to the "hw.name" semantic
// conventions. It represents an easily-recognizable name for the hardware
// component.
@@ -7490,6 +8045,20 @@ func HwName(val string) attribute.KeyValue {
return HwNameKey.String(val)
}
+// HwNetworkLogicalAddresses returns an attribute KeyValue conforming to the
+// "hw.network.logical_addresses" semantic conventions. It represents the logical
+// addresses of the adapter (e.g. IP address, or WWPN).
+func HwNetworkLogicalAddresses(val ...string) attribute.KeyValue {
+ return HwNetworkLogicalAddressesKey.StringSlice(val)
+}
+
+// HwNetworkPhysicalAddress returns an attribute KeyValue conforming to the
+// "hw.network.physical_address" semantic conventions. It represents the physical
+// address of the adapter (e.g. MAC address, or WWNN).
+func HwNetworkPhysicalAddress(val string) attribute.KeyValue {
+ return HwNetworkPhysicalAddressKey.String(val)
+}
+
// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic
// conventions. It represents the unique identifier of the parent component
// (typically the `hw.id` attribute of the enclosure, or disk controller).
@@ -7497,17 +8066,144 @@ func HwParent(val string) attribute.KeyValue {
return HwParentKey.String(val)
}
-// Enum values for hw.state
+// HwPhysicalDiskSmartAttribute returns an attribute KeyValue conforming to the
+// "hw.physical_disk.smart_attribute" semantic conventions. It represents the
+// [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute
+// of the physical disk.
+//
+// [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T.
+func HwPhysicalDiskSmartAttribute(val string) attribute.KeyValue {
+ return HwPhysicalDiskSmartAttributeKey.String(val)
+}
+
+// HwPhysicalDiskType returns an attribute KeyValue conforming to the
+// "hw.physical_disk.type" semantic conventions. It represents the type of the
+// physical disk.
+func HwPhysicalDiskType(val string) attribute.KeyValue {
+ return HwPhysicalDiskTypeKey.String(val)
+}
+
+// HwSensorLocation returns an attribute KeyValue conforming to the
+// "hw.sensor_location" semantic conventions. It represents the location of the
+// sensor.
+func HwSensorLocation(val string) attribute.KeyValue {
+ return HwSensorLocationKey.String(val)
+}
+
+// HwSerialNumber returns an attribute KeyValue conforming to the
+// "hw.serial_number" semantic conventions. It represents the serial number of
+// the hardware component.
+func HwSerialNumber(val string) attribute.KeyValue {
+ return HwSerialNumberKey.String(val)
+}
+
+// HwVendor returns an attribute KeyValue conforming to the "hw.vendor" semantic
+// conventions. It represents the vendor name of the hardware component.
+func HwVendor(val string) attribute.KeyValue {
+ return HwVendorKey.String(val)
+}
+
+// Enum values for hw.battery.state
var (
- // Ok
+ // Charging
// Stability: development
- HwStateOk = HwStateKey.String("ok")
+ HwBatteryStateCharging = HwBatteryStateKey.String("charging")
+ // Discharging
+ // Stability: development
+ HwBatteryStateDischarging = HwBatteryStateKey.String("discharging")
+)
+
+// Enum values for hw.gpu.task
+var (
+ // Decoder
+ // Stability: development
+ HwGpuTaskDecoder = HwGpuTaskKey.String("decoder")
+ // Encoder
+ // Stability: development
+ HwGpuTaskEncoder = HwGpuTaskKey.String("encoder")
+ // General
+ // Stability: development
+ HwGpuTaskGeneral = HwGpuTaskKey.String("general")
+)
+
+// Enum values for hw.limit_type
+var (
+ // Critical
+ // Stability: development
+ HwLimitTypeCritical = HwLimitTypeKey.String("critical")
+ // Degraded
+ // Stability: development
+ HwLimitTypeDegraded = HwLimitTypeKey.String("degraded")
+ // High Critical
+ // Stability: development
+ HwLimitTypeHighCritical = HwLimitTypeKey.String("high.critical")
+ // High Degraded
+ // Stability: development
+ HwLimitTypeHighDegraded = HwLimitTypeKey.String("high.degraded")
+ // Low Critical
+ // Stability: development
+ HwLimitTypeLowCritical = HwLimitTypeKey.String("low.critical")
+ // Low Degraded
+ // Stability: development
+ HwLimitTypeLowDegraded = HwLimitTypeKey.String("low.degraded")
+ // Maximum
+ // Stability: development
+ HwLimitTypeMax = HwLimitTypeKey.String("max")
+ // Throttled
+ // Stability: development
+ HwLimitTypeThrottled = HwLimitTypeKey.String("throttled")
+ // Turbo
+ // Stability: development
+ HwLimitTypeTurbo = HwLimitTypeKey.String("turbo")
+)
+
+// Enum values for hw.logical_disk.state
+var (
+ // Used
+ // Stability: development
+ HwLogicalDiskStateUsed = HwLogicalDiskStateKey.String("used")
+ // Free
+ // Stability: development
+ HwLogicalDiskStateFree = HwLogicalDiskStateKey.String("free")
+)
+
+// Enum values for hw.physical_disk.state
+var (
+ // Remaining
+ // Stability: development
+ HwPhysicalDiskStateRemaining = HwPhysicalDiskStateKey.String("remaining")
+)
+
+// Enum values for hw.state
+var (
// Degraded
// Stability: development
HwStateDegraded = HwStateKey.String("degraded")
// Failed
// Stability: development
HwStateFailed = HwStateKey.String("failed")
+ // Needs Cleaning
+ // Stability: development
+ HwStateNeedsCleaning = HwStateKey.String("needs_cleaning")
+ // OK
+ // Stability: development
+ HwStateOk = HwStateKey.String("ok")
+ // Predicted Failure
+ // Stability: development
+ HwStatePredictedFailure = HwStateKey.String("predicted_failure")
+)
+
+// Enum values for hw.tape_drive.operation_type
+var (
+ // Mount
+ // Stability: development
+ HwTapeDriveOperationTypeMount = HwTapeDriveOperationTypeKey.String("mount")
+ // Unmount
+ // Stability: development
+ HwTapeDriveOperationTypeUnmount = HwTapeDriveOperationTypeKey.String("unmount")
+ // Clean
+ // Stability: development
+ HwTapeDriveOperationTypeClean = HwTapeDriveOperationTypeKey.String("clean")
)
// Enum values for hw.type
@@ -7686,6 +8382,36 @@ const (
// Examples: "Evicted", "Error"
K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason")
+ // K8SContainerStatusReasonKey is the attribute Key conforming to the
+ // "k8s.container.status.reason" semantic conventions. It represents the reason
+ // for the container state. Corresponds to the `reason` field of the:
+ // [K8s ContainerStateWaiting] or [K8s ContainerStateTerminated].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ContainerCreating", "CrashLoopBackOff",
+ // "CreateContainerConfigError", "ErrImagePull", "ImagePullBackOff",
+ // "OOMKilled", "Completed", "Error", "ContainerCannotRun"
+ //
+ // [K8s ContainerStateWaiting]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core
+ // [K8s ContainerStateTerminated]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core
+ K8SContainerStatusReasonKey = attribute.Key("k8s.container.status.reason")
+
+ // K8SContainerStatusStateKey is the attribute Key conforming to the
+ // "k8s.container.status.state" semantic conventions. It represents the state of
+ // the container. [K8s ContainerState].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "terminated", "running", "waiting"
+ //
+ // [K8s ContainerState]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core
+ K8SContainerStatusStateKey = attribute.Key("k8s.container.status.state")
+
// K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name"
// semantic conventions. It represents the name of the CronJob.
//
@@ -7749,6 +8475,18 @@ const (
// Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+ // K8SHPAMetricTypeKey is the attribute Key conforming to the
+ // "k8s.hpa.metric.type" semantic conventions. It represents the type of metric
+ // source for the horizontal pod autoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Resource", "ContainerResource"
+ // Note: This attribute reflects the `type` field of spec.metrics[] in the HPA.
+ K8SHPAMetricTypeKey = attribute.Key("k8s.hpa.metric.type")
+
// K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic
// conventions. It represents the name of the horizontal pod autoscaler.
//
@@ -7759,6 +8497,43 @@ const (
// Examples: "opentelemetry"
K8SHPANameKey = attribute.Key("k8s.hpa.name")
+ // K8SHPAScaletargetrefAPIVersionKey is the attribute Key conforming to the
+ // "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the
+ // API version of the target resource to scale for the HorizontalPodAutoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "apps/v1", "autoscaling/v2"
+ // Note: This maps to the `apiVersion` field in the `scaleTargetRef` of the HPA
+ // spec.
+ K8SHPAScaletargetrefAPIVersionKey = attribute.Key("k8s.hpa.scaletargetref.api_version")
+
+ // K8SHPAScaletargetrefKindKey is the attribute Key conforming to the
+ // "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of
+ // the target resource to scale for the HorizontalPodAutoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Deployment", "StatefulSet"
+ // Note: This maps to the `kind` field in the `scaleTargetRef` of the HPA spec.
+ K8SHPAScaletargetrefKindKey = attribute.Key("k8s.hpa.scaletargetref.kind")
+
+ // K8SHPAScaletargetrefNameKey is the attribute Key conforming to the
+ // "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of
+ // the target resource to scale for the HorizontalPodAutoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-deployment", "my-statefulset"
+ // Note: This maps to the `name` field in the `scaleTargetRef` of the HPA spec.
+ K8SHPAScaletargetrefNameKey = attribute.Key("k8s.hpa.scaletargetref.name")
+
// K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic
// conventions. It represents the UID of the horizontal pod autoscaler.
//
@@ -7769,6 +8544,17 @@ const (
// Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
K8SHPAUIDKey = attribute.Key("k8s.hpa.uid")
+ // K8SHugepageSizeKey is the attribute Key conforming to the "k8s.hugepage.size"
+ // semantic conventions. It represents the size (identifier) of the K8s huge
+ // page.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2Mi"
+ K8SHugepageSizeKey = attribute.Key("k8s.hugepage.size")
+
// K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic
// conventions. It represents the name of the Job.
//
@@ -7815,6 +8601,46 @@ const (
// [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core
K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase")
+ // K8SNodeConditionStatusKey is the attribute Key conforming to the
+ // "k8s.node.condition.status" semantic conventions. It represents the status of
+ // the condition, one of True, False, Unknown.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "true", "false", "unknown"
+ // Note: This attribute aligns with the `status` field of the
+ // [NodeCondition]
+ //
+ // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core
+ K8SNodeConditionStatusKey = attribute.Key("k8s.node.condition.status")
+
+ // K8SNodeConditionTypeKey is the attribute Key conforming to the
+ // "k8s.node.condition.type" semantic conventions. It represents the condition
+ // type of a K8s Node.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Ready", "DiskPressure"
+ // Note: K8s Node conditions as described
+ // by [K8s documentation].
+ //
+ // This attribute aligns with the `type` field of the
+ // [NodeCondition]
+ //
+ // The set of possible values is not limited to those listed here. Managed
+ // Kubernetes environments,
+ // or custom controllers MAY introduce additional node condition types.
+ // When this occurs, the exact value as reported by the Kubernetes API SHOULD be
+ // used.
+ //
+ // [K8s documentation]: https://v1-32.docs.kubernetes.io/docs/reference/node/node-status/#condition
+ // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core
+ K8SNodeConditionTypeKey = attribute.Key("k8s.node.condition.type")
+
// K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
// semantic conventions. It represents the name of the Node.
//
@@ -7910,6 +8736,25 @@ const (
// Examples: "opentelemetry"
K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name")
+ // K8SResourceQuotaResourceNameKey is the attribute Key conforming to the
+ // "k8s.resourcequota.resource_name" semantic conventions. It represents the
+ // name of the K8s resource a resource quota defines.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "count/replicationcontrollers"
+ // Note: The value for this attribute can be either the full
+ // `count/[.]` string (e.g., count/deployments.apps,
+ // count/pods), or, for certain core Kubernetes resources, just the resource
+ // name (e.g., pods, services, configmaps). Both forms are supported by
+ // Kubernetes for object count quotas. See
+ // [Kubernetes Resource Quotas documentation] for more details.
+ //
+ // [Kubernetes Resource Quotas documentation]: https://kubernetes.io/docs/concepts/policy/resource-quotas/#object-count-quota
+ K8SResourceQuotaResourceNameKey = attribute.Key("k8s.resourcequota.resource_name")
+
// K8SResourceQuotaUIDKey is the attribute Key conforming to the
// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the
// resource quota.
@@ -7943,6 +8788,19 @@ const (
// Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+ // K8SStorageclassNameKey is the attribute Key conforming to the
+ // "k8s.storageclass.name" semantic conventions. It represents the name of K8s
+ // [StorageClass] object.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "gold.storageclass.storage.k8s.io"
+ //
+ // [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io
+ K8SStorageclassNameKey = attribute.Key("k8s.storageclass.name")
+
// K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name"
// semantic conventions. It represents the name of the K8s volume.
//
@@ -8001,6 +8859,22 @@ func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue {
return K8SContainerStatusLastTerminatedReasonKey.String(val)
}
+// K8SCronJobAnnotation returns an attribute KeyValue conforming to the
+// "k8s.cronjob.annotation" semantic conventions. It represents the cronjob
+// annotation placed on the CronJob, the `` being the annotation name, the
+// value being the annotation value.
+func K8SCronJobAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.cronjob.annotation."+key, val)
+}
+
+// K8SCronJobLabel returns an attribute KeyValue conforming to the
+// "k8s.cronjob.label" semantic conventions. It represents the label placed on
+// the CronJob, the `` being the label name, the value being the label
+// value.
+func K8SCronJobLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.cronjob.label."+key, val)
+}
+
// K8SCronJobName returns an attribute KeyValue conforming to the
// "k8s.cronjob.name" semantic conventions. It represents the name of the
// CronJob.
@@ -8014,6 +8888,22 @@ func K8SCronJobUID(val string) attribute.KeyValue {
return K8SCronJobUIDKey.String(val)
}
+// K8SDaemonSetAnnotation returns an attribute KeyValue conforming to the
+// "k8s.daemonset.annotation" semantic conventions. It represents the annotation
+// placed on the DaemonSet, the `` being the annotation name, the value
+// being the annotation value, even if the value is empty.
+func K8SDaemonSetAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.daemonset.annotation."+key, val)
+}
+
+// K8SDaemonSetLabel returns an attribute KeyValue conforming to the
+// "k8s.daemonset.label" semantic conventions. It represents the label placed on
+// the DaemonSet, the `` being the label name, the value being the label
+// value, even if the value is empty.
+func K8SDaemonSetLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.daemonset.label."+key, val)
+}
+
// K8SDaemonSetName returns an attribute KeyValue conforming to the
// "k8s.daemonset.name" semantic conventions. It represents the name of the
// DaemonSet.
@@ -8028,6 +8918,22 @@ func K8SDaemonSetUID(val string) attribute.KeyValue {
return K8SDaemonSetUIDKey.String(val)
}
+// K8SDeploymentAnnotation returns an attribute KeyValue conforming to the
+// "k8s.deployment.annotation" semantic conventions. It represents the annotation
+// placed on the Deployment, the `` being the annotation name, the value
+// being the annotation value, even if the value is empty.
+func K8SDeploymentAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.deployment.annotation."+key, val)
+}
+
+// K8SDeploymentLabel returns an attribute KeyValue conforming to the
+// "k8s.deployment.label" semantic conventions. It represents the label placed on
+// the Deployment, the `` being the label name, the value being the label
+// value, even if the value is empty.
+func K8SDeploymentLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.deployment.label."+key, val)
+}
+
// K8SDeploymentName returns an attribute KeyValue conforming to the
// "k8s.deployment.name" semantic conventions. It represents the name of the
// Deployment.
@@ -8042,18 +8948,69 @@ func K8SDeploymentUID(val string) attribute.KeyValue {
return K8SDeploymentUIDKey.String(val)
}
+// K8SHPAMetricType returns an attribute KeyValue conforming to the
+// "k8s.hpa.metric.type" semantic conventions. It represents the type of metric
+// source for the horizontal pod autoscaler.
+func K8SHPAMetricType(val string) attribute.KeyValue {
+ return K8SHPAMetricTypeKey.String(val)
+}
+
// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name"
// semantic conventions. It represents the name of the horizontal pod autoscaler.
func K8SHPAName(val string) attribute.KeyValue {
return K8SHPANameKey.String(val)
}
+// K8SHPAScaletargetrefAPIVersion returns an attribute KeyValue conforming to the
+// "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the
+// API version of the target resource to scale for the HorizontalPodAutoscaler.
+func K8SHPAScaletargetrefAPIVersion(val string) attribute.KeyValue {
+ return K8SHPAScaletargetrefAPIVersionKey.String(val)
+}
+
+// K8SHPAScaletargetrefKind returns an attribute KeyValue conforming to the
+// "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of
+// the target resource to scale for the HorizontalPodAutoscaler.
+func K8SHPAScaletargetrefKind(val string) attribute.KeyValue {
+ return K8SHPAScaletargetrefKindKey.String(val)
+}
+
+// K8SHPAScaletargetrefName returns an attribute KeyValue conforming to the
+// "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of
+// the target resource to scale for the HorizontalPodAutoscaler.
+func K8SHPAScaletargetrefName(val string) attribute.KeyValue {
+ return K8SHPAScaletargetrefNameKey.String(val)
+}
+
// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid"
// semantic conventions. It represents the UID of the horizontal pod autoscaler.
func K8SHPAUID(val string) attribute.KeyValue {
return K8SHPAUIDKey.String(val)
}
+// K8SHugepageSize returns an attribute KeyValue conforming to the
+// "k8s.hugepage.size" semantic conventions. It represents the size (identifier)
+// of the K8s huge page.
+func K8SHugepageSize(val string) attribute.KeyValue {
+ return K8SHugepageSizeKey.String(val)
+}
+
+// K8SJobAnnotation returns an attribute KeyValue conforming to the
+// "k8s.job.annotation" semantic conventions. It represents the annotation placed
+// on the Job, the `` being the annotation name, the value being the
+// annotation value, even if the value is empty.
+func K8SJobAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.job.annotation."+key, val)
+}
+
+// K8SJobLabel returns an attribute KeyValue conforming to the "k8s.job.label"
+// semantic conventions. It represents the label placed on the Job, the ``
+// being the label name, the value being the label value, even if the value is
+// empty.
+func K8SJobLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.job.label."+key, val)
+}
+
// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
// semantic conventions. It represents the name of the Job.
func K8SJobName(val string) attribute.KeyValue {
@@ -8066,6 +9023,22 @@ func K8SJobUID(val string) attribute.KeyValue {
return K8SJobUIDKey.String(val)
}
+// K8SNamespaceAnnotation returns an attribute KeyValue conforming to the
+// "k8s.namespace.annotation" semantic conventions. It represents the annotation
+// placed on the Namespace, the `` being the annotation name, the value
+// being the annotation value, even if the value is empty.
+func K8SNamespaceAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.namespace.annotation."+key, val)
+}
+
+// K8SNamespaceLabel returns an attribute KeyValue conforming to the
+// "k8s.namespace.label" semantic conventions. It represents the label placed on
+// the Namespace, the `` being the label name, the value being the label
+// value, even if the value is empty.
+func K8SNamespaceLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.namespace.label."+key, val)
+}
+
// K8SNamespaceName returns an attribute KeyValue conforming to the
// "k8s.namespace.name" semantic conventions. It represents the name of the
// namespace that the pod is running in.
@@ -8073,6 +9046,22 @@ func K8SNamespaceName(val string) attribute.KeyValue {
return K8SNamespaceNameKey.String(val)
}
+// K8SNodeAnnotation returns an attribute KeyValue conforming to the
+// "k8s.node.annotation" semantic conventions. It represents the annotation
+// placed on the Node, the `` being the annotation name, the value being the
+// annotation value, even if the value is empty.
+func K8SNodeAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.node.annotation."+key, val)
+}
+
+// K8SNodeLabel returns an attribute KeyValue conforming to the "k8s.node.label"
+// semantic conventions. It represents the label placed on the Node, the ``
+// being the label name, the value being the label value, even if the value is
+// empty.
+func K8SNodeLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.node.label."+key, val)
+}
+
// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name"
// semantic conventions. It represents the name of the Node.
func K8SNodeName(val string) attribute.KeyValue {
@@ -8085,6 +9074,21 @@ func K8SNodeUID(val string) attribute.KeyValue {
return K8SNodeUIDKey.String(val)
}
+// K8SPodAnnotation returns an attribute KeyValue conforming to the
+// "k8s.pod.annotation" semantic conventions. It represents the annotation placed
+// on the Pod, the `` being the annotation name, the value being the
+// annotation value.
+func K8SPodAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.pod.annotation."+key, val)
+}
+
+// K8SPodLabel returns an attribute KeyValue conforming to the "k8s.pod.label"
+// semantic conventions. It represents the label placed on the Pod, the ``
+// being the label name, the value being the label value.
+func K8SPodLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.pod.label."+key, val)
+}
+
// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
// semantic conventions. It represents the name of the Pod.
func K8SPodName(val string) attribute.KeyValue {
@@ -8097,6 +9101,22 @@ func K8SPodUID(val string) attribute.KeyValue {
return K8SPodUIDKey.String(val)
}
+// K8SReplicaSetAnnotation returns an attribute KeyValue conforming to the
+// "k8s.replicaset.annotation" semantic conventions. It represents the annotation
+// placed on the ReplicaSet, the `` being the annotation name, the value
+// being the annotation value, even if the value is empty.
+func K8SReplicaSetAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.replicaset.annotation."+key, val)
+}
+
+// K8SReplicaSetLabel returns an attribute KeyValue conforming to the
+// "k8s.replicaset.label" semantic conventions. It represents the label placed on
+// the ReplicaSet, the `` being the label name, the value being the label
+// value, even if the value is empty.
+func K8SReplicaSetLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.replicaset.label."+key, val)
+}
+
// K8SReplicaSetName returns an attribute KeyValue conforming to the
// "k8s.replicaset.name" semantic conventions. It represents the name of the
// ReplicaSet.
@@ -8132,6 +9152,13 @@ func K8SResourceQuotaName(val string) attribute.KeyValue {
return K8SResourceQuotaNameKey.String(val)
}
+// K8SResourceQuotaResourceName returns an attribute KeyValue conforming to the
+// "k8s.resourcequota.resource_name" semantic conventions. It represents the name
+// of the K8s resource a resource quota defines.
+func K8SResourceQuotaResourceName(val string) attribute.KeyValue {
+ return K8SResourceQuotaResourceNameKey.String(val)
+}
+
// K8SResourceQuotaUID returns an attribute KeyValue conforming to the
// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the
// resource quota.
@@ -8139,6 +9166,22 @@ func K8SResourceQuotaUID(val string) attribute.KeyValue {
return K8SResourceQuotaUIDKey.String(val)
}
+// K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the
+// "k8s.statefulset.annotation" semantic conventions. It represents the
+// annotation placed on the StatefulSet, the `` being the annotation name,
+// the value being the annotation value, even if the value is empty.
+func K8SStatefulSetAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.statefulset.annotation."+key, val)
+}
+
+// K8SStatefulSetLabel returns an attribute KeyValue conforming to the
+// "k8s.statefulset.label" semantic conventions. It represents the label placed
+// on the StatefulSet, the `` being the label name, the value being the
+// label value, even if the value is empty.
+func K8SStatefulSetLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.statefulset.label."+key, val)
+}
+
// K8SStatefulSetName returns an attribute KeyValue conforming to the
// "k8s.statefulset.name" semantic conventions. It represents the name of the
// StatefulSet.
@@ -8153,6 +9196,15 @@ func K8SStatefulSetUID(val string) attribute.KeyValue {
return K8SStatefulSetUIDKey.String(val)
}
+// K8SStorageclassName returns an attribute KeyValue conforming to the
+// "k8s.storageclass.name" semantic conventions. It represents the name of K8s
+// [StorageClass] object.
+//
+// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io
+func K8SStorageclassName(val string) attribute.KeyValue {
+ return K8SStorageclassNameKey.String(val)
+}
+
// K8SVolumeName returns an attribute KeyValue conforming to the
// "k8s.volume.name" semantic conventions. It represents the name of the K8s
// volume.
@@ -8160,6 +9212,50 @@ func K8SVolumeName(val string) attribute.KeyValue {
return K8SVolumeNameKey.String(val)
}
+// Enum values for k8s.container.status.reason
+var (
+ // The container is being created.
+ // Stability: development
+ K8SContainerStatusReasonContainerCreating = K8SContainerStatusReasonKey.String("ContainerCreating")
+ // The container is in a crash loop back off state.
+ // Stability: development
+ K8SContainerStatusReasonCrashLoopBackOff = K8SContainerStatusReasonKey.String("CrashLoopBackOff")
+ // There was an error creating the container configuration.
+ // Stability: development
+ K8SContainerStatusReasonCreateContainerConfigError = K8SContainerStatusReasonKey.String("CreateContainerConfigError")
+ // There was an error pulling the container image.
+ // Stability: development
+ K8SContainerStatusReasonErrImagePull = K8SContainerStatusReasonKey.String("ErrImagePull")
+ // The container image pull is in back off state.
+ // Stability: development
+ K8SContainerStatusReasonImagePullBackOff = K8SContainerStatusReasonKey.String("ImagePullBackOff")
+ // The container was killed due to out of memory.
+ // Stability: development
+ K8SContainerStatusReasonOomKilled = K8SContainerStatusReasonKey.String("OOMKilled")
+ // The container has completed execution.
+ // Stability: development
+ K8SContainerStatusReasonCompleted = K8SContainerStatusReasonKey.String("Completed")
+ // There was an error with the container.
+ // Stability: development
+ K8SContainerStatusReasonError = K8SContainerStatusReasonKey.String("Error")
+ // The container cannot run.
+ // Stability: development
+ K8SContainerStatusReasonContainerCannotRun = K8SContainerStatusReasonKey.String("ContainerCannotRun")
+)
+
+// Enum values for k8s.container.status.state
+var (
+ // The container has terminated.
+ // Stability: development
+ K8SContainerStatusStateTerminated = K8SContainerStatusStateKey.String("terminated")
+ // The container is running.
+ // Stability: development
+ K8SContainerStatusStateRunning = K8SContainerStatusStateKey.String("running")
+ // The container is waiting.
+ // Stability: development
+ K8SContainerStatusStateWaiting = K8SContainerStatusStateKey.String("waiting")
+)
+
// Enum values for k8s.namespace.phase
var (
// Active namespace phase as described by [K8s API]
@@ -8174,6 +9270,39 @@ var (
K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating")
)
+// Enum values for k8s.node.condition.status
+var (
+ // condition_true
+ // Stability: development
+ K8SNodeConditionStatusConditionTrue = K8SNodeConditionStatusKey.String("true")
+ // condition_false
+ // Stability: development
+ K8SNodeConditionStatusConditionFalse = K8SNodeConditionStatusKey.String("false")
+ // condition_unknown
+ // Stability: development
+ K8SNodeConditionStatusConditionUnknown = K8SNodeConditionStatusKey.String("unknown")
+)
+
+// Enum values for k8s.node.condition.type
+var (
+ // The node is healthy and ready to accept pods
+ // Stability: development
+ K8SNodeConditionTypeReady = K8SNodeConditionTypeKey.String("Ready")
+ // Pressure exists on the disk size—that is, if the disk capacity is low
+ // Stability: development
+ K8SNodeConditionTypeDiskPressure = K8SNodeConditionTypeKey.String("DiskPressure")
+ // Pressure exists on the node memory—that is, if the node memory is low
+ // Stability: development
+ K8SNodeConditionTypeMemoryPressure = K8SNodeConditionTypeKey.String("MemoryPressure")
+ // Pressure exists on the processes—that is, if there are too many processes
+ // on the node
+ // Stability: development
+ K8SNodeConditionTypePIDPressure = K8SNodeConditionTypeKey.String("PIDPressure")
+ // The network for the node is not correctly configured
+ // Stability: development
+ K8SNodeConditionTypeNetworkUnavailable = K8SNodeConditionTypeKey.String("NetworkUnavailable")
+)
+
// Enum values for k8s.volume.type
var (
// A [persistentVolumeClaim] volume
@@ -8371,6 +9500,27 @@ var (
LogIostreamStderr = LogIostreamKey.String("stderr")
)
+// Namespace: mainframe
+const (
+ // MainframeLparNameKey is the attribute Key conforming to the
+ // "mainframe.lpar.name" semantic conventions. It represents the name of the
+ // logical partition that hosts a systems with a mainframe operating system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "LPAR01"
+ MainframeLparNameKey = attribute.Key("mainframe.lpar.name")
+)
+
+// MainframeLparName returns an attribute KeyValue conforming to the
+// "mainframe.lpar.name" semantic conventions. It represents the name of the
+// logical partition that hosts a systems with a mainframe operating system.
+func MainframeLparName(val string) attribute.KeyValue {
+ return MainframeLparNameKey.String(val)
+}
+
// Namespace: messaging
const (
// MessagingBatchMessageCountKey is the attribute Key conforming to the
@@ -9084,10 +10234,6 @@ var (
//
// Stability: development
MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle")
- // Deprecated: Replaced by `process`.
- MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("deliver")
- // Deprecated: Replaced by `send`.
- MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish")
)
// Enum values for messaging.rocketmq.consumption_model
@@ -9137,6 +10283,9 @@ var (
// Apache ActiveMQ
// Stability: development
MessagingSystemActiveMQ = MessagingSystemKey.String("activemq")
+ // Amazon Simple Notification Service (SNS)
+ // Stability: development
+ MessagingSystemAWSSNS = MessagingSystemKey.String("aws.sns")
// Amazon Simple Queue Service (SQS)
// Stability: development
MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs")
@@ -9654,6 +10803,66 @@ func OCIManifestDigest(val string) attribute.KeyValue {
return OCIManifestDigestKey.String(val)
}
+// Namespace: openai
+const (
+ // OpenAIRequestServiceTierKey is the attribute Key conforming to the
+ // "openai.request.service_tier" semantic conventions. It represents the service
+ // tier requested. May be a specific tier, default, or auto.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "auto", "default"
+ OpenAIRequestServiceTierKey = attribute.Key("openai.request.service_tier")
+
+ // OpenAIResponseServiceTierKey is the attribute Key conforming to the
+ // "openai.response.service_tier" semantic conventions. It represents the
+ // service tier used for the response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "scale", "default"
+ OpenAIResponseServiceTierKey = attribute.Key("openai.response.service_tier")
+
+ // OpenAIResponseSystemFingerprintKey is the attribute Key conforming to the
+ // "openai.response.system_fingerprint" semantic conventions. It represents a
+ // fingerprint to track any eventual change in the Generative AI environment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "fp_44709d6fcb"
+ OpenAIResponseSystemFingerprintKey = attribute.Key("openai.response.system_fingerprint")
+)
+
+// OpenAIResponseServiceTier returns an attribute KeyValue conforming to the
+// "openai.response.service_tier" semantic conventions. It represents the service
+// tier used for the response.
+func OpenAIResponseServiceTier(val string) attribute.KeyValue {
+ return OpenAIResponseServiceTierKey.String(val)
+}
+
+// OpenAIResponseSystemFingerprint returns an attribute KeyValue conforming to
+// the "openai.response.system_fingerprint" semantic conventions. It represents a
+// fingerprint to track any eventual change in the Generative AI environment.
+func OpenAIResponseSystemFingerprint(val string) attribute.KeyValue {
+ return OpenAIResponseSystemFingerprintKey.String(val)
+}
+
+// Enum values for openai.request.service_tier
+var (
+ // The system will utilize scale tier credits until they are exhausted.
+ // Stability: development
+ OpenAIRequestServiceTierAuto = OpenAIRequestServiceTierKey.String("auto")
+ // The system will utilize the default scale tier.
+ // Stability: development
+ OpenAIRequestServiceTierDefault = OpenAIRequestServiceTierKey.String("default")
+)
+
// Namespace: opentracing
const (
// OpenTracingRefTypeKey is the attribute Key conforming to the
@@ -9802,7 +11011,7 @@ var (
OSTypeSolaris = OSTypeKey.String("solaris")
// IBM z/OS
// Stability: development
- OSTypeZOS = OSTypeKey.String("z_os")
+ OSTypeZOS = OSTypeKey.String("zos")
)
// Namespace: otel
@@ -9866,6 +11075,17 @@ const (
// Examples: "io.opentelemetry.contrib.mongodb"
OTelScopeNameKey = attribute.Key("otel.scope.name")
+ // OTelScopeSchemaURLKey is the attribute Key conforming to the
+ // "otel.scope.schema_url" semantic conventions. It represents the schema URL of
+ // the instrumentation scope.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://opentelemetry.io/schemas/1.31.0"
+ OTelScopeSchemaURLKey = attribute.Key("otel.scope.schema_url")
+
// OTelScopeVersionKey is the attribute Key conforming to the
// "otel.scope.version" semantic conventions. It represents the version of the
// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
@@ -9877,6 +11097,20 @@ const (
// Examples: "1.0.0"
OTelScopeVersionKey = attribute.Key("otel.scope.version")
+ // OTelSpanParentOriginKey is the attribute Key conforming to the
+ // "otel.span.parent.origin" semantic conventions. It represents the determines
+ // whether the span has a parent span, and if so,
+ // [whether it is a remote parent].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+ OTelSpanParentOriginKey = attribute.Key("otel.span.parent.origin")
+
// OTelSpanSamplingResultKey is the attribute Key conforming to the
// "otel.span.sampling_result" semantic conventions. It represents the result
// value of the sampler for this span.
@@ -9926,6 +11160,13 @@ func OTelScopeName(val string) attribute.KeyValue {
return OTelScopeNameKey.String(val)
}
+// OTelScopeSchemaURL returns an attribute KeyValue conforming to the
+// "otel.scope.schema_url" semantic conventions. It represents the schema URL of
+// the instrumentation scope.
+func OTelScopeSchemaURL(val string) attribute.KeyValue {
+ return OTelScopeSchemaURLKey.String(val)
+}
+
// OTelScopeVersion returns an attribute KeyValue conforming to the
// "otel.scope.version" semantic conventions. It represents the version of the
// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
@@ -9970,6 +11211,10 @@ var (
//
// Stability: development
OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter")
+ // Zipkin span exporter over HTTP
+ //
+ // Stability: development
+ OTelComponentTypeZipkinHTTPSpanExporter = OTelComponentTypeKey.String("zipkin_http_span_exporter")
// OTLP log record exporter over gRPC with protobuf serialization
//
// Stability: development
@@ -9998,6 +11243,27 @@ var (
//
// Stability: development
OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter")
+ // Prometheus metric exporter over HTTP with the default text-based format
+ //
+ // Stability: development
+ OTelComponentTypePrometheusHTTPTextMetricExporter = OTelComponentTypeKey.String("prometheus_http_text_metric_exporter")
+)
+
+// Enum values for otel.span.parent.origin
+var (
+ // The span does not have a parent, it is a root span
+ // Stability: development
+ OTelSpanParentOriginNone = OTelSpanParentOriginKey.String("none")
+ // The span has a parent and the parent's span context [isRemote()] is false
+ // Stability: development
+ //
+ // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+ OTelSpanParentOriginLocal = OTelSpanParentOriginKey.String("local")
+ // The span has a parent and the parent's span context [isRemote()] is true
+ // Stability: development
+ //
+ // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+ OTelSpanParentOriginRemote = OTelSpanParentOriginKey.String("remote")
)
// Enum values for otel.span.sampling_result
@@ -10497,6 +11763,14 @@ func ProcessCreationTime(val string) attribute.KeyValue {
return ProcessCreationTimeKey.String(val)
}
+// ProcessEnvironmentVariable returns an attribute KeyValue conforming to the
+// "process.environment_variable" semantic conventions. It represents the process
+// environment variables, `` being the environment variable name, the value
+// being the environment variable value.
+func ProcessEnvironmentVariable(key string, val string) attribute.KeyValue {
+ return attribute.String("process.environment_variable."+key, val)
+}
+
// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the
// "process.executable.build_id.gnu" semantic conventions. It represents the GNU
// build ID as found in the `.note.gnu.build-id` ELF section (hex string).
@@ -10965,6 +12239,38 @@ const (
RPCSystemKey = attribute.Key("rpc.system")
)
+// RPCConnectRPCRequestMetadata returns an attribute KeyValue conforming to the
+// "rpc.connect_rpc.request.metadata" semantic conventions. It represents the
+// connect request metadata, `` being the normalized Connect Metadata key
+// (lowercase), the value being the metadata values.
+func RPCConnectRPCRequestMetadata(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("rpc.connect_rpc.request.metadata."+key, val)
+}
+
+// RPCConnectRPCResponseMetadata returns an attribute KeyValue conforming to the
+// "rpc.connect_rpc.response.metadata" semantic conventions. It represents the
+// connect response metadata, `` being the normalized Connect Metadata key
+// (lowercase), the value being the metadata values.
+func RPCConnectRPCResponseMetadata(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("rpc.connect_rpc.response.metadata."+key, val)
+}
+
+// RPCGRPCRequestMetadata returns an attribute KeyValue conforming to the
+// "rpc.grpc.request.metadata" semantic conventions. It represents the gRPC
+// request metadata, `` being the normalized gRPC Metadata key (lowercase),
+// the value being the metadata values.
+func RPCGRPCRequestMetadata(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("rpc.grpc.request.metadata."+key, val)
+}
+
+// RPCGRPCResponseMetadata returns an attribute KeyValue conforming to the
+// "rpc.grpc.response.metadata" semantic conventions. It represents the gRPC
+// response metadata, `` being the normalized gRPC Metadata key (lowercase),
+// the value being the metadata values.
+func RPCGRPCResponseMetadata(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("rpc.grpc.response.metadata."+key, val)
+}
+
// RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the
// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code`
// property of response if it is an error response.
@@ -11820,15 +13126,12 @@ var (
// Enum values for system.memory.state
var (
- // used
+ // Actual used virtual memory in bytes.
// Stability: development
SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
// free
// Stability: development
SystemMemoryStateFree = SystemMemoryStateKey.String("free")
- // Deprecated: Removed, report shared memory usage with
- // `metric.system.memory.shared` metric.
- SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
// buffers
// Stability: development
SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
@@ -13727,8 +15030,6 @@ var (
//
// [GitLab]: https://gitlab.com
VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab")
- // Deprecated: Replaced by `gitea`.
- VCSProviderNameGittea = VCSProviderNameKey.String("gittea")
// [Gitea]
// Stability: development
//
@@ -13848,4 +15149,45 @@ func WebEngineName(val string) attribute.KeyValue {
// engine.
func WebEngineVersion(val string) attribute.KeyValue {
return WebEngineVersionKey.String(val)
+}
+
+// Namespace: zos
+const (
+ // ZOSSmfIDKey is the attribute Key conforming to the "zos.smf.id" semantic
+ // conventions. It represents the System Management Facility (SMF) Identifier
+ // uniquely identified a z/OS system within a SYSPLEX or mainframe environment
+ // and is used for system and performance analysis.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "SYS1"
+ ZOSSmfIDKey = attribute.Key("zos.smf.id")
+
+ // ZOSSysplexNameKey is the attribute Key conforming to the "zos.sysplex.name"
+ // semantic conventions. It represents the name of the SYSPLEX to which the z/OS
+ // system belongs too.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "SYSPLEX1"
+ ZOSSysplexNameKey = attribute.Key("zos.sysplex.name")
+)
+
+// ZOSSmfID returns an attribute KeyValue conforming to the "zos.smf.id" semantic
+// conventions. It represents the System Management Facility (SMF) Identifier
+// uniquely identified a z/OS system within a SYSPLEX or mainframe environment
+// and is used for system and performance analysis.
+func ZOSSmfID(val string) attribute.KeyValue {
+ return ZOSSmfIDKey.String(val)
+}
+
+// ZOSSysplexName returns an attribute KeyValue conforming to the
+// "zos.sysplex.name" semantic conventions. It represents the name of the SYSPLEX
+// to which the z/OS system belongs too.
+func ZOSSysplexName(val string) attribute.KeyValue {
+ return ZOSSysplexNameKey.String(val)
}
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go
similarity index 96%
rename from vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go
rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go
index 2c5c7ebd04..1110103210 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go
@@ -4,6 +4,6 @@
// Package semconv implements OpenTelemetry semantic conventions.
//
// OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the v1.34.0
+// patterns for OpenTelemetry things. This package represents the v1.37.0
// version of the OpenTelemetry semantic conventions.
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go
new file mode 100644
index 0000000000..666bded4ba
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0"
+
+import (
+ "fmt"
+ "reflect"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// ErrorType returns an [attribute.KeyValue] identifying the error type of err.
+func ErrorType(err error) attribute.KeyValue {
+ if err == nil {
+ return ErrorTypeOther
+ }
+ t := reflect.TypeOf(err)
+ var value string
+ if t.PkgPath() == "" && t.Name() == "" {
+ // Likely a builtin type.
+ value = t.String()
+ } else {
+ value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+ }
+
+ if value == "" {
+ return ErrorTypeOther
+ }
+ return ErrorTypeKey.String(value)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go
similarity index 98%
rename from vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go
rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go
index 88a998f1e5..e67469a4f6 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0"
const (
// ExceptionEventName is the name of the Span event representing an exception.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go
new file mode 100644
index 0000000000..a78eafd1fa
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go
@@ -0,0 +1,2126 @@
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package httpconv provides types and functionality for OpenTelemetry semantic
+// conventions in the "otel" namespace.
+package otelconv
+
+import (
+ "context"
+ "sync"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/noop"
+)
+
+var (
+ addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }}
+ recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }}
+)
+
+// ErrorTypeAttr is an attribute conforming to the error.type semantic
+// conventions. It represents the describes a class of error the operation ended
+// with.
+type ErrorTypeAttr string
+
+var (
+ // ErrorTypeOther is a fallback error value to be used when the instrumentation
+ // doesn't define a custom value.
+ ErrorTypeOther ErrorTypeAttr = "_OTHER"
+)
+
+// ComponentTypeAttr is an attribute conforming to the otel.component.type
+// semantic conventions. It represents a name identifying the type of the
+// OpenTelemetry component.
+type ComponentTypeAttr string
+
+var (
+ // ComponentTypeBatchingSpanProcessor is the builtin SDK batching span
+ // processor.
+ ComponentTypeBatchingSpanProcessor ComponentTypeAttr = "batching_span_processor"
+ // ComponentTypeSimpleSpanProcessor is the builtin SDK simple span processor.
+ ComponentTypeSimpleSpanProcessor ComponentTypeAttr = "simple_span_processor"
+ // ComponentTypeBatchingLogProcessor is the builtin SDK batching log record
+ // processor.
+ ComponentTypeBatchingLogProcessor ComponentTypeAttr = "batching_log_processor"
+ // ComponentTypeSimpleLogProcessor is the builtin SDK simple log record
+ // processor.
+ ComponentTypeSimpleLogProcessor ComponentTypeAttr = "simple_log_processor"
+ // ComponentTypeOtlpGRPCSpanExporter is the OTLP span exporter over gRPC with
+ // protobuf serialization.
+ ComponentTypeOtlpGRPCSpanExporter ComponentTypeAttr = "otlp_grpc_span_exporter"
+ // ComponentTypeOtlpHTTPSpanExporter is the OTLP span exporter over HTTP with
+ // protobuf serialization.
+ ComponentTypeOtlpHTTPSpanExporter ComponentTypeAttr = "otlp_http_span_exporter"
+ // ComponentTypeOtlpHTTPJSONSpanExporter is the OTLP span exporter over HTTP
+ // with JSON serialization.
+ ComponentTypeOtlpHTTPJSONSpanExporter ComponentTypeAttr = "otlp_http_json_span_exporter"
+ // ComponentTypeZipkinHTTPSpanExporter is the zipkin span exporter over HTTP.
+ ComponentTypeZipkinHTTPSpanExporter ComponentTypeAttr = "zipkin_http_span_exporter"
+ // ComponentTypeOtlpGRPCLogExporter is the OTLP log record exporter over gRPC
+ // with protobuf serialization.
+ ComponentTypeOtlpGRPCLogExporter ComponentTypeAttr = "otlp_grpc_log_exporter"
+ // ComponentTypeOtlpHTTPLogExporter is the OTLP log record exporter over HTTP
+ // with protobuf serialization.
+ ComponentTypeOtlpHTTPLogExporter ComponentTypeAttr = "otlp_http_log_exporter"
+ // ComponentTypeOtlpHTTPJSONLogExporter is the OTLP log record exporter over
+ // HTTP with JSON serialization.
+ ComponentTypeOtlpHTTPJSONLogExporter ComponentTypeAttr = "otlp_http_json_log_exporter"
+ // ComponentTypePeriodicMetricReader is the builtin SDK periodically exporting
+ // metric reader.
+ ComponentTypePeriodicMetricReader ComponentTypeAttr = "periodic_metric_reader"
+ // ComponentTypeOtlpGRPCMetricExporter is the OTLP metric exporter over gRPC
+ // with protobuf serialization.
+ ComponentTypeOtlpGRPCMetricExporter ComponentTypeAttr = "otlp_grpc_metric_exporter"
+ // ComponentTypeOtlpHTTPMetricExporter is the OTLP metric exporter over HTTP
+ // with protobuf serialization.
+ ComponentTypeOtlpHTTPMetricExporter ComponentTypeAttr = "otlp_http_metric_exporter"
+ // ComponentTypeOtlpHTTPJSONMetricExporter is the OTLP metric exporter over HTTP
+ // with JSON serialization.
+ ComponentTypeOtlpHTTPJSONMetricExporter ComponentTypeAttr = "otlp_http_json_metric_exporter"
+ // ComponentTypePrometheusHTTPTextMetricExporter is the prometheus metric
+ // exporter over HTTP with the default text-based format.
+ ComponentTypePrometheusHTTPTextMetricExporter ComponentTypeAttr = "prometheus_http_text_metric_exporter"
+)
+
+// SpanParentOriginAttr is an attribute conforming to the otel.span.parent.origin
+// semantic conventions. It represents the determines whether the span has a
+// parent span, and if so, [whether it is a remote parent].
+//
+// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+type SpanParentOriginAttr string
+
+var (
+ // SpanParentOriginNone is the span does not have a parent, it is a root span.
+ SpanParentOriginNone SpanParentOriginAttr = "none"
+ // SpanParentOriginLocal is the span has a parent and the parent's span context
+ // [isRemote()] is false.
+ //
+ // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+ SpanParentOriginLocal SpanParentOriginAttr = "local"
+ // SpanParentOriginRemote is the span has a parent and the parent's span context
+ // [isRemote()] is true.
+ //
+ // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+ SpanParentOriginRemote SpanParentOriginAttr = "remote"
+)
+
+// SpanSamplingResultAttr is an attribute conforming to the
+// otel.span.sampling_result semantic conventions. It represents the result value
+// of the sampler for this span.
+type SpanSamplingResultAttr string
+
+var (
+ // SpanSamplingResultDrop is the span is not sampled and not recording.
+ SpanSamplingResultDrop SpanSamplingResultAttr = "DROP"
+ // SpanSamplingResultRecordOnly is the span is not sampled, but recording.
+ SpanSamplingResultRecordOnly SpanSamplingResultAttr = "RECORD_ONLY"
+ // SpanSamplingResultRecordAndSample is the span is sampled and recording.
+ SpanSamplingResultRecordAndSample SpanSamplingResultAttr = "RECORD_AND_SAMPLE"
+)
+
+// RPCGRPCStatusCodeAttr is an attribute conforming to the rpc.grpc.status_code
+// semantic conventions. It represents the gRPC status code of the last gRPC
+// requests performed in scope of this export call.
+type RPCGRPCStatusCodeAttr int64
+
+var (
+ // RPCGRPCStatusCodeOk is the OK.
+ RPCGRPCStatusCodeOk RPCGRPCStatusCodeAttr = 0
+ // RPCGRPCStatusCodeCancelled is the CANCELLED.
+ RPCGRPCStatusCodeCancelled RPCGRPCStatusCodeAttr = 1
+ // RPCGRPCStatusCodeUnknown is the UNKNOWN.
+ RPCGRPCStatusCodeUnknown RPCGRPCStatusCodeAttr = 2
+ // RPCGRPCStatusCodeInvalidArgument is the INVALID_ARGUMENT.
+ RPCGRPCStatusCodeInvalidArgument RPCGRPCStatusCodeAttr = 3
+ // RPCGRPCStatusCodeDeadlineExceeded is the DEADLINE_EXCEEDED.
+ RPCGRPCStatusCodeDeadlineExceeded RPCGRPCStatusCodeAttr = 4
+ // RPCGRPCStatusCodeNotFound is the NOT_FOUND.
+ RPCGRPCStatusCodeNotFound RPCGRPCStatusCodeAttr = 5
+ // RPCGRPCStatusCodeAlreadyExists is the ALREADY_EXISTS.
+ RPCGRPCStatusCodeAlreadyExists RPCGRPCStatusCodeAttr = 6
+ // RPCGRPCStatusCodePermissionDenied is the PERMISSION_DENIED.
+ RPCGRPCStatusCodePermissionDenied RPCGRPCStatusCodeAttr = 7
+ // RPCGRPCStatusCodeResourceExhausted is the RESOURCE_EXHAUSTED.
+ RPCGRPCStatusCodeResourceExhausted RPCGRPCStatusCodeAttr = 8
+ // RPCGRPCStatusCodeFailedPrecondition is the FAILED_PRECONDITION.
+ RPCGRPCStatusCodeFailedPrecondition RPCGRPCStatusCodeAttr = 9
+ // RPCGRPCStatusCodeAborted is the ABORTED.
+ RPCGRPCStatusCodeAborted RPCGRPCStatusCodeAttr = 10
+ // RPCGRPCStatusCodeOutOfRange is the OUT_OF_RANGE.
+ RPCGRPCStatusCodeOutOfRange RPCGRPCStatusCodeAttr = 11
+ // RPCGRPCStatusCodeUnimplemented is the UNIMPLEMENTED.
+ RPCGRPCStatusCodeUnimplemented RPCGRPCStatusCodeAttr = 12
+ // RPCGRPCStatusCodeInternal is the INTERNAL.
+ RPCGRPCStatusCodeInternal RPCGRPCStatusCodeAttr = 13
+ // RPCGRPCStatusCodeUnavailable is the UNAVAILABLE.
+ RPCGRPCStatusCodeUnavailable RPCGRPCStatusCodeAttr = 14
+ // RPCGRPCStatusCodeDataLoss is the DATA_LOSS.
+ RPCGRPCStatusCodeDataLoss RPCGRPCStatusCodeAttr = 15
+ // RPCGRPCStatusCodeUnauthenticated is the UNAUTHENTICATED.
+ RPCGRPCStatusCodeUnauthenticated RPCGRPCStatusCodeAttr = 16
+)
+
+// SDKExporterLogExported is an instrument used to record metric values
+// conforming to the "otel.sdk.exporter.log.exported" semantic conventions. It
+// represents the number of log records for which the export has finished, either
+// successful or failed.
+type SDKExporterLogExported struct {
+ metric.Int64Counter
+}
+
+// NewSDKExporterLogExported returns a new SDKExporterLogExported instrument.
+func NewSDKExporterLogExported(
+ m metric.Meter,
+ opt ...metric.Int64CounterOption,
+) (SDKExporterLogExported, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKExporterLogExported{noop.Int64Counter{}}, nil
+ }
+
+ i, err := m.Int64Counter(
+ "otel.sdk.exporter.log.exported",
+ append([]metric.Int64CounterOption{
+ metric.WithDescription("The number of log records for which the export has finished, either successful or failed."),
+ metric.WithUnit("{log_record}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKExporterLogExported{noop.Int64Counter{}}, err
+ }
+ return SDKExporterLogExported{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKExporterLogExported) Inst() metric.Int64Counter {
+ return m.Int64Counter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKExporterLogExported) Name() string {
+ return "otel.sdk.exporter.log.exported"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKExporterLogExported) Unit() string {
+ return "{log_record}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKExporterLogExported) Description() string {
+ return "The number of log records for which the export has finished, either successful or failed."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+// For exporters with partial success semantics (e.g. OTLP with
+// `rejected_log_records`), rejected log records MUST count as failed and only
+// non-rejected log records count as success.
+// If no rejection reason is available, `rejected` SHOULD be used as value for
+// `error.type`.
+func (m SDKExporterLogExported) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+// For exporters with partial success semantics (e.g. OTLP with
+// `rejected_log_records`), rejected log records MUST count as failed and only
+// non-rejected log records count as success.
+// If no rejection reason is available, `rejected` SHOULD be used as value for
+// `error.type`.
+func (m SDKExporterLogExported) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (SDKExporterLogExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKExporterLogExported) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKExporterLogExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the server domain name if available without
+// reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+func (SDKExporterLogExported) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the server port number.
+func (SDKExporterLogExported) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// SDKExporterLogInflight is an instrument used to record metric values
+// conforming to the "otel.sdk.exporter.log.inflight" semantic conventions. It
+// represents the number of log records which were passed to the exporter, but
+// that have not been exported yet (neither successful, nor failed).
+type SDKExporterLogInflight struct {
+ metric.Int64UpDownCounter
+}
+
+// NewSDKExporterLogInflight returns a new SDKExporterLogInflight instrument.
+func NewSDKExporterLogInflight(
+ m metric.Meter,
+ opt ...metric.Int64UpDownCounterOption,
+) (SDKExporterLogInflight, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64UpDownCounter(
+ "otel.sdk.exporter.log.inflight",
+ append([]metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
+ metric.WithUnit("{log_record}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err
+ }
+ return SDKExporterLogInflight{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKExporterLogInflight) Inst() metric.Int64UpDownCounter {
+ return m.Int64UpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKExporterLogInflight) Name() string {
+ return "otel.sdk.exporter.log.inflight"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKExporterLogInflight) Unit() string {
+ return "{log_record}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKExporterLogInflight) Description() string {
+ return "The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+func (m SDKExporterLogInflight) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+func (m SDKExporterLogInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKExporterLogInflight) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKExporterLogInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the server domain name if available without
+// reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+func (SDKExporterLogInflight) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the server port number.
+func (SDKExporterLogInflight) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// SDKExporterMetricDataPointExported is an instrument used to record metric
+// values conforming to the "otel.sdk.exporter.metric_data_point.exported"
+// semantic conventions. It represents the number of metric data points for which
+// the export has finished, either successful or failed.
+type SDKExporterMetricDataPointExported struct {
+ metric.Int64Counter
+}
+
+// NewSDKExporterMetricDataPointExported returns a new
+// SDKExporterMetricDataPointExported instrument.
+func NewSDKExporterMetricDataPointExported(
+ m metric.Meter,
+ opt ...metric.Int64CounterOption,
+) (SDKExporterMetricDataPointExported, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, nil
+ }
+
+ i, err := m.Int64Counter(
+ "otel.sdk.exporter.metric_data_point.exported",
+ append([]metric.Int64CounterOption{
+ metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."),
+ metric.WithUnit("{data_point}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err
+ }
+ return SDKExporterMetricDataPointExported{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKExporterMetricDataPointExported) Inst() metric.Int64Counter {
+ return m.Int64Counter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKExporterMetricDataPointExported) Name() string {
+ return "otel.sdk.exporter.metric_data_point.exported"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKExporterMetricDataPointExported) Unit() string {
+ return "{data_point}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKExporterMetricDataPointExported) Description() string {
+ return "The number of metric data points for which the export has finished, either successful or failed."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+// For exporters with partial success semantics (e.g. OTLP with
+// `rejected_data_points`), rejected data points MUST count as failed and only
+// non-rejected data points count as success.
+// If no rejection reason is available, `rejected` SHOULD be used as value for
+// `error.type`.
+func (m SDKExporterMetricDataPointExported) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+// For exporters with partial success semantics (e.g. OTLP with
+// `rejected_data_points`), rejected data points MUST count as failed and only
+// non-rejected data points count as success.
+// If no rejection reason is available, `rejected` SHOULD be used as value for
+// `error.type`.
+func (m SDKExporterMetricDataPointExported) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (SDKExporterMetricDataPointExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKExporterMetricDataPointExported) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKExporterMetricDataPointExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the server domain name if available without
+// reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+func (SDKExporterMetricDataPointExported) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the server port number.
+func (SDKExporterMetricDataPointExported) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// SDKExporterMetricDataPointInflight is an instrument used to record metric
+// values conforming to the "otel.sdk.exporter.metric_data_point.inflight"
+// semantic conventions. It represents the number of metric data points which
+// were passed to the exporter, but that have not been exported yet (neither
+// successful, nor failed).
+type SDKExporterMetricDataPointInflight struct {
+ metric.Int64UpDownCounter
+}
+
+// NewSDKExporterMetricDataPointInflight returns a new
+// SDKExporterMetricDataPointInflight instrument.
+func NewSDKExporterMetricDataPointInflight(
+ m metric.Meter,
+ opt ...metric.Int64UpDownCounterOption,
+) (SDKExporterMetricDataPointInflight, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64UpDownCounter(
+ "otel.sdk.exporter.metric_data_point.inflight",
+ append([]metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
+ metric.WithUnit("{data_point}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err
+ }
+ return SDKExporterMetricDataPointInflight{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKExporterMetricDataPointInflight) Inst() metric.Int64UpDownCounter {
+ return m.Int64UpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKExporterMetricDataPointInflight) Name() string {
+ return "otel.sdk.exporter.metric_data_point.inflight"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKExporterMetricDataPointInflight) Unit() string {
+ return "{data_point}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKExporterMetricDataPointInflight) Description() string {
+ return "The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+func (m SDKExporterMetricDataPointInflight) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+func (m SDKExporterMetricDataPointInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKExporterMetricDataPointInflight) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKExporterMetricDataPointInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the server domain name if available without
+// reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+func (SDKExporterMetricDataPointInflight) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the server port number.
+func (SDKExporterMetricDataPointInflight) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// SDKExporterOperationDuration is an instrument used to record metric values
+// conforming to the "otel.sdk.exporter.operation.duration" semantic conventions.
+// It represents the duration of exporting a batch of telemetry records.
+type SDKExporterOperationDuration struct {
+ metric.Float64Histogram
+}
+
+// NewSDKExporterOperationDuration returns a new SDKExporterOperationDuration
+// instrument.
+func NewSDKExporterOperationDuration(
+ m metric.Meter,
+ opt ...metric.Float64HistogramOption,
+) (SDKExporterOperationDuration, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKExporterOperationDuration{noop.Float64Histogram{}}, nil
+ }
+
+ i, err := m.Float64Histogram(
+ "otel.sdk.exporter.operation.duration",
+ append([]metric.Float64HistogramOption{
+ metric.WithDescription("The duration of exporting a batch of telemetry records."),
+ metric.WithUnit("s"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKExporterOperationDuration{noop.Float64Histogram{}}, err
+ }
+ return SDKExporterOperationDuration{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKExporterOperationDuration) Inst() metric.Float64Histogram {
+ return m.Float64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKExporterOperationDuration) Name() string {
+ return "otel.sdk.exporter.operation.duration"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKExporterOperationDuration) Unit() string {
+ return "s"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKExporterOperationDuration) Description() string {
+ return "The duration of exporting a batch of telemetry records."
+}
+
+// Record records val to the current distribution for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// This metric defines successful operations using the full success definitions
+// for [http]
+// and [grpc]. Anything else is defined as an unsuccessful operation. For
+// successful
+// operations, `error.type` MUST NOT be set. For unsuccessful export operations,
+// `error.type` MUST contain a relevant failure cause.
+//
+// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1
+// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success
+func (m SDKExporterOperationDuration) Record(
+ ctx context.Context,
+ val float64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ return
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// RecordSet records val to the current distribution for set.
+//
+// This metric defines successful operations using the full success definitions
+// for [http]
+// and [grpc]. Anything else is defined as an unsuccessful operation. For
+// successful
+// operations, `error.type` MUST NOT be set. For unsuccessful export operations,
+// `error.type` MUST contain a relevant failure cause.
+//
+// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1
+// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success
+func (m SDKExporterOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (SDKExporterOperationDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrHTTPResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the HTTP status
+// code of the last HTTP request performed in scope of this export call.
+func (SDKExporterOperationDuration) AttrHTTPResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKExporterOperationDuration) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKExporterOperationDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// AttrRPCGRPCStatusCode returns an optional attribute for the
+// "rpc.grpc.status_code" semantic convention. It represents the gRPC status code
+// of the last gRPC requests performed in scope of this export call.
+func (SDKExporterOperationDuration) AttrRPCGRPCStatusCode(val RPCGRPCStatusCodeAttr) attribute.KeyValue {
+ return attribute.Int64("rpc.grpc.status_code", int64(val))
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the server domain name if available without
+// reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+func (SDKExporterOperationDuration) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the server port number.
+func (SDKExporterOperationDuration) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// SDKExporterSpanExported is an instrument used to record metric values
+// conforming to the "otel.sdk.exporter.span.exported" semantic conventions. It
+// represents the number of spans for which the export has finished, either
+// successful or failed.
+type SDKExporterSpanExported struct {
+ metric.Int64Counter
+}
+
+// NewSDKExporterSpanExported returns a new SDKExporterSpanExported instrument.
+func NewSDKExporterSpanExported(
+ m metric.Meter,
+ opt ...metric.Int64CounterOption,
+) (SDKExporterSpanExported, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKExporterSpanExported{noop.Int64Counter{}}, nil
+ }
+
+ i, err := m.Int64Counter(
+ "otel.sdk.exporter.span.exported",
+ append([]metric.Int64CounterOption{
+ metric.WithDescription("The number of spans for which the export has finished, either successful or failed."),
+ metric.WithUnit("{span}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKExporterSpanExported{noop.Int64Counter{}}, err
+ }
+ return SDKExporterSpanExported{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKExporterSpanExported) Inst() metric.Int64Counter {
+ return m.Int64Counter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKExporterSpanExported) Name() string {
+ return "otel.sdk.exporter.span.exported"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKExporterSpanExported) Unit() string {
+ return "{span}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKExporterSpanExported) Description() string {
+ return "The number of spans for which the export has finished, either successful or failed."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+// For exporters with partial success semantics (e.g. OTLP with `rejected_spans`
+// ), rejected spans MUST count as failed and only non-rejected spans count as
+// success.
+// If no rejection reason is available, `rejected` SHOULD be used as value for
+// `error.type`.
+func (m SDKExporterSpanExported) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+// For exporters with partial success semantics (e.g. OTLP with `rejected_spans`
+// ), rejected spans MUST count as failed and only non-rejected spans count as
+// success.
+// If no rejection reason is available, `rejected` SHOULD be used as value for
+// `error.type`.
+func (m SDKExporterSpanExported) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (SDKExporterSpanExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKExporterSpanExported) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKExporterSpanExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the server domain name if available without
+// reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+func (SDKExporterSpanExported) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the server port number.
+func (SDKExporterSpanExported) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// SDKExporterSpanInflight is an instrument used to record metric values
+// conforming to the "otel.sdk.exporter.span.inflight" semantic conventions. It
+// represents the number of spans which were passed to the exporter, but that
+// have not been exported yet (neither successful, nor failed).
+type SDKExporterSpanInflight struct {
+ metric.Int64UpDownCounter
+}
+
+// NewSDKExporterSpanInflight returns a new SDKExporterSpanInflight instrument.
+func NewSDKExporterSpanInflight(
+ m metric.Meter,
+ opt ...metric.Int64UpDownCounterOption,
+) (SDKExporterSpanInflight, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64UpDownCounter(
+ "otel.sdk.exporter.span.inflight",
+ append([]metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
+ metric.WithUnit("{span}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err
+ }
+ return SDKExporterSpanInflight{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKExporterSpanInflight) Inst() metric.Int64UpDownCounter {
+ return m.Int64UpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKExporterSpanInflight) Name() string {
+ return "otel.sdk.exporter.span.inflight"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKExporterSpanInflight) Unit() string {
+ return "{span}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKExporterSpanInflight) Description() string {
+ return "The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+func (m SDKExporterSpanInflight) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+func (m SDKExporterSpanInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKExporterSpanInflight) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKExporterSpanInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the server domain name if available without
+// reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+func (SDKExporterSpanInflight) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the server port number.
+func (SDKExporterSpanInflight) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// SDKLogCreated is an instrument used to record metric values conforming to the
+// "otel.sdk.log.created" semantic conventions. It represents the number of logs
+// submitted to enabled SDK Loggers.
+type SDKLogCreated struct {
+ metric.Int64Counter
+}
+
+// NewSDKLogCreated returns a new SDKLogCreated instrument.
+func NewSDKLogCreated(
+ m metric.Meter,
+ opt ...metric.Int64CounterOption,
+) (SDKLogCreated, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKLogCreated{noop.Int64Counter{}}, nil
+ }
+
+ i, err := m.Int64Counter(
+ "otel.sdk.log.created",
+ append([]metric.Int64CounterOption{
+ metric.WithDescription("The number of logs submitted to enabled SDK Loggers."),
+ metric.WithUnit("{log_record}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKLogCreated{noop.Int64Counter{}}, err
+ }
+ return SDKLogCreated{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKLogCreated) Inst() metric.Int64Counter {
+ return m.Int64Counter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKLogCreated) Name() string {
+ return "otel.sdk.log.created"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKLogCreated) Unit() string {
+ return "{log_record}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKLogCreated) Description() string {
+ return "The number of logs submitted to enabled SDK Loggers."
+}
+
+// Add adds incr to the existing count for attrs.
+func (m SDKLogCreated) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) {
+ if len(attrs) == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributes(attrs...))
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+func (m SDKLogCreated) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// SDKMetricReaderCollectionDuration is an instrument used to record metric
+// values conforming to the "otel.sdk.metric_reader.collection.duration" semantic
+// conventions. It represents the duration of the collect operation of the metric
+// reader.
+type SDKMetricReaderCollectionDuration struct {
+ metric.Float64Histogram
+}
+
+// NewSDKMetricReaderCollectionDuration returns a new
+// SDKMetricReaderCollectionDuration instrument.
+func NewSDKMetricReaderCollectionDuration(
+ m metric.Meter,
+ opt ...metric.Float64HistogramOption,
+) (SDKMetricReaderCollectionDuration, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, nil
+ }
+
+ i, err := m.Float64Histogram(
+ "otel.sdk.metric_reader.collection.duration",
+ append([]metric.Float64HistogramOption{
+ metric.WithDescription("The duration of the collect operation of the metric reader."),
+ metric.WithUnit("s"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err
+ }
+ return SDKMetricReaderCollectionDuration{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKMetricReaderCollectionDuration) Inst() metric.Float64Histogram {
+ return m.Float64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKMetricReaderCollectionDuration) Name() string {
+ return "otel.sdk.metric_reader.collection.duration"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKMetricReaderCollectionDuration) Unit() string {
+ return "s"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKMetricReaderCollectionDuration) Description() string {
+ return "The duration of the collect operation of the metric reader."
+}
+
+// Record records val to the current distribution for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// For successful collections, `error.type` MUST NOT be set. For failed
+// collections, `error.type` SHOULD contain the failure cause.
+// It can happen that metrics collection is successful for some MetricProducers,
+// while others fail. In that case `error.type` SHOULD be set to any of the
+// failure causes.
+func (m SDKMetricReaderCollectionDuration) Record(
+ ctx context.Context,
+ val float64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ return
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// RecordSet records val to the current distribution for set.
+//
+// For successful collections, `error.type` MUST NOT be set. For failed
+// collections, `error.type` SHOULD contain the failure cause.
+// It can happen that metrics collection is successful for some MetricProducers,
+// while others fail. In that case `error.type` SHOULD be set to any of the
+// failure causes.
+func (m SDKMetricReaderCollectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (SDKMetricReaderCollectionDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKMetricReaderCollectionDuration) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKMetricReaderCollectionDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// SDKProcessorLogProcessed is an instrument used to record metric values
+// conforming to the "otel.sdk.processor.log.processed" semantic conventions. It
+// represents the number of log records for which the processing has finished,
+// either successful or failed.
+type SDKProcessorLogProcessed struct {
+ metric.Int64Counter
+}
+
+// NewSDKProcessorLogProcessed returns a new SDKProcessorLogProcessed instrument.
+func NewSDKProcessorLogProcessed(
+ m metric.Meter,
+ opt ...metric.Int64CounterOption,
+) (SDKProcessorLogProcessed, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKProcessorLogProcessed{noop.Int64Counter{}}, nil
+ }
+
+ i, err := m.Int64Counter(
+ "otel.sdk.processor.log.processed",
+ append([]metric.Int64CounterOption{
+ metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."),
+ metric.WithUnit("{log_record}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKProcessorLogProcessed{noop.Int64Counter{}}, err
+ }
+ return SDKProcessorLogProcessed{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKProcessorLogProcessed) Inst() metric.Int64Counter {
+ return m.Int64Counter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKProcessorLogProcessed) Name() string {
+ return "otel.sdk.processor.log.processed"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKProcessorLogProcessed) Unit() string {
+ return "{log_record}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKProcessorLogProcessed) Description() string {
+ return "The number of log records for which the processing has finished, either successful or failed."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// For successful processing, `error.type` MUST NOT be set. For failed
+// processing, `error.type` MUST contain the failure cause.
+// For the SDK Simple and Batching Log Record Processor a log record is
+// considered to be processed already when it has been submitted to the exporter,
+// not when the corresponding export call has finished.
+func (m SDKProcessorLogProcessed) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+//
+// For successful processing, `error.type` MUST NOT be set. For failed
+// processing, `error.type` MUST contain the failure cause.
+// For the SDK Simple and Batching Log Record Processor a log record is
+// considered to be processed already when it has been submitted to the exporter,
+// not when the corresponding export call has finished.
+func (m SDKProcessorLogProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents a low-cardinality description of the failure reason.
+// SDK Batching Log Record Processors MUST use `queue_full` for log records
+// dropped due to a full queue.
+func (SDKProcessorLogProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKProcessorLogProcessed) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKProcessorLogProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// SDKProcessorLogQueueCapacity is an instrument used to record metric values
+// conforming to the "otel.sdk.processor.log.queue.capacity" semantic
+// conventions. It represents the maximum number of log records the queue of a
+// given instance of an SDK Log Record processor can hold.
+type SDKProcessorLogQueueCapacity struct {
+ metric.Int64ObservableUpDownCounter
+}
+
+// NewSDKProcessorLogQueueCapacity returns a new SDKProcessorLogQueueCapacity
+// instrument.
+func NewSDKProcessorLogQueueCapacity(
+ m metric.Meter,
+ opt ...metric.Int64ObservableUpDownCounterOption,
+) (SDKProcessorLogQueueCapacity, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64ObservableUpDownCounter(
+ "otel.sdk.processor.log.queue.capacity",
+ append([]metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."),
+ metric.WithUnit("{log_record}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
+ }
+ return SDKProcessorLogQueueCapacity{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKProcessorLogQueueCapacity) Inst() metric.Int64ObservableUpDownCounter {
+ return m.Int64ObservableUpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKProcessorLogQueueCapacity) Name() string {
+ return "otel.sdk.processor.log.queue.capacity"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKProcessorLogQueueCapacity) Unit() string {
+ return "{log_record}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKProcessorLogQueueCapacity) Description() string {
+ return "The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKProcessorLogQueueCapacity) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKProcessorLogQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// SDKProcessorLogQueueSize is an instrument used to record metric values
+// conforming to the "otel.sdk.processor.log.queue.size" semantic conventions. It
+// represents the number of log records in the queue of a given instance of an
+// SDK log processor.
+type SDKProcessorLogQueueSize struct {
+ metric.Int64ObservableUpDownCounter
+}
+
+// NewSDKProcessorLogQueueSize returns a new SDKProcessorLogQueueSize instrument.
+func NewSDKProcessorLogQueueSize(
+ m metric.Meter,
+ opt ...metric.Int64ObservableUpDownCounterOption,
+) (SDKProcessorLogQueueSize, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64ObservableUpDownCounter(
+ "otel.sdk.processor.log.queue.size",
+ append([]metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."),
+ metric.WithUnit("{log_record}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err
+ }
+ return SDKProcessorLogQueueSize{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKProcessorLogQueueSize) Inst() metric.Int64ObservableUpDownCounter {
+ return m.Int64ObservableUpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKProcessorLogQueueSize) Name() string {
+ return "otel.sdk.processor.log.queue.size"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKProcessorLogQueueSize) Unit() string {
+ return "{log_record}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKProcessorLogQueueSize) Description() string {
+ return "The number of log records in the queue of a given instance of an SDK log processor."
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKProcessorLogQueueSize) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKProcessorLogQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// SDKProcessorSpanProcessed is an instrument used to record metric values
+// conforming to the "otel.sdk.processor.span.processed" semantic conventions. It
+// represents the number of spans for which the processing has finished, either
+// successful or failed.
+type SDKProcessorSpanProcessed struct {
+ metric.Int64Counter
+}
+
+// NewSDKProcessorSpanProcessed returns a new SDKProcessorSpanProcessed
+// instrument.
+func NewSDKProcessorSpanProcessed(
+ m metric.Meter,
+ opt ...metric.Int64CounterOption,
+) (SDKProcessorSpanProcessed, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKProcessorSpanProcessed{noop.Int64Counter{}}, nil
+ }
+
+ i, err := m.Int64Counter(
+ "otel.sdk.processor.span.processed",
+ append([]metric.Int64CounterOption{
+ metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."),
+ metric.WithUnit("{span}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err
+ }
+ return SDKProcessorSpanProcessed{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKProcessorSpanProcessed) Inst() metric.Int64Counter {
+ return m.Int64Counter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKProcessorSpanProcessed) Name() string {
+ return "otel.sdk.processor.span.processed"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKProcessorSpanProcessed) Unit() string {
+ return "{span}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKProcessorSpanProcessed) Description() string {
+ return "The number of spans for which the processing has finished, either successful or failed."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// For successful processing, `error.type` MUST NOT be set. For failed
+// processing, `error.type` MUST contain the failure cause.
+// For the SDK Simple and Batching Span Processor a span is considered to be
+// processed already when it has been submitted to the exporter, not when the
+// corresponding export call has finished.
+func (m SDKProcessorSpanProcessed) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+//
+// For successful processing, `error.type` MUST NOT be set. For failed
+// processing, `error.type` MUST contain the failure cause.
+// For the SDK Simple and Batching Span Processor a span is considered to be
+// processed already when it has been submitted to the exporter, not when the
+// corresponding export call has finished.
+func (m SDKProcessorSpanProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents a low-cardinality description of the failure reason.
+// SDK Batching Span Processors MUST use `queue_full` for spans dropped due to a
+// full queue.
+func (SDKProcessorSpanProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKProcessorSpanProcessed) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKProcessorSpanProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// SDKProcessorSpanQueueCapacity is an instrument used to record metric values
+// conforming to the "otel.sdk.processor.span.queue.capacity" semantic
+// conventions. It represents the maximum number of spans the queue of a given
+// instance of an SDK span processor can hold.
+type SDKProcessorSpanQueueCapacity struct {
+ metric.Int64ObservableUpDownCounter
+}
+
+// NewSDKProcessorSpanQueueCapacity returns a new SDKProcessorSpanQueueCapacity
+// instrument.
+func NewSDKProcessorSpanQueueCapacity(
+ m metric.Meter,
+ opt ...metric.Int64ObservableUpDownCounterOption,
+) (SDKProcessorSpanQueueCapacity, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64ObservableUpDownCounter(
+ "otel.sdk.processor.span.queue.capacity",
+ append([]metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."),
+ metric.WithUnit("{span}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
+ }
+ return SDKProcessorSpanQueueCapacity{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKProcessorSpanQueueCapacity) Inst() metric.Int64ObservableUpDownCounter {
+ return m.Int64ObservableUpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKProcessorSpanQueueCapacity) Name() string {
+ return "otel.sdk.processor.span.queue.capacity"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKProcessorSpanQueueCapacity) Unit() string {
+ return "{span}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKProcessorSpanQueueCapacity) Description() string {
+ return "The maximum number of spans the queue of a given instance of an SDK span processor can hold."
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKProcessorSpanQueueCapacity) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKProcessorSpanQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// SDKProcessorSpanQueueSize is an instrument used to record metric values
+// conforming to the "otel.sdk.processor.span.queue.size" semantic conventions.
+// It represents the number of spans in the queue of a given instance of an SDK
+// span processor.
+type SDKProcessorSpanQueueSize struct {
+ metric.Int64ObservableUpDownCounter
+}
+
+// NewSDKProcessorSpanQueueSize returns a new SDKProcessorSpanQueueSize
+// instrument.
+func NewSDKProcessorSpanQueueSize(
+ m metric.Meter,
+ opt ...metric.Int64ObservableUpDownCounterOption,
+) (SDKProcessorSpanQueueSize, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64ObservableUpDownCounter(
+ "otel.sdk.processor.span.queue.size",
+ append([]metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."),
+ metric.WithUnit("{span}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err
+ }
+ return SDKProcessorSpanQueueSize{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKProcessorSpanQueueSize) Inst() metric.Int64ObservableUpDownCounter {
+ return m.Int64ObservableUpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKProcessorSpanQueueSize) Name() string {
+ return "otel.sdk.processor.span.queue.size"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKProcessorSpanQueueSize) Unit() string {
+ return "{span}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKProcessorSpanQueueSize) Description() string {
+ return "The number of spans in the queue of a given instance of an SDK span processor."
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKProcessorSpanQueueSize) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKProcessorSpanQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// SDKSpanLive is an instrument used to record metric values conforming to the
+// "otel.sdk.span.live" semantic conventions. It represents the number of created
+// spans with `recording=true` for which the end operation has not been called
+// yet.
+type SDKSpanLive struct {
+ metric.Int64UpDownCounter
+}
+
+// NewSDKSpanLive returns a new SDKSpanLive instrument.
+func NewSDKSpanLive(
+ m metric.Meter,
+ opt ...metric.Int64UpDownCounterOption,
+) (SDKSpanLive, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKSpanLive{noop.Int64UpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64UpDownCounter(
+ "otel.sdk.span.live",
+ append([]metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."),
+ metric.WithUnit("{span}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKSpanLive{noop.Int64UpDownCounter{}}, err
+ }
+ return SDKSpanLive{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKSpanLive) Inst() metric.Int64UpDownCounter {
+ return m.Int64UpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKSpanLive) Name() string {
+ return "otel.sdk.span.live"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKSpanLive) Unit() string {
+ return "{span}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKSpanLive) Description() string {
+ return "The number of created spans with `recording=true` for which the end operation has not been called yet."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+func (m SDKSpanLive) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+func (m SDKSpanLive) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AttrSpanSamplingResult returns an optional attribute for the
+// "otel.span.sampling_result" semantic convention. It represents the result
+// value of the sampler for this span.
+func (SDKSpanLive) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue {
+ return attribute.String("otel.span.sampling_result", string(val))
+}
+
+// SDKSpanStarted is an instrument used to record metric values conforming to the
+// "otel.sdk.span.started" semantic conventions. It represents the number of
+// created spans.
+type SDKSpanStarted struct {
+ metric.Int64Counter
+}
+
+// NewSDKSpanStarted returns a new SDKSpanStarted instrument.
+func NewSDKSpanStarted(
+ m metric.Meter,
+ opt ...metric.Int64CounterOption,
+) (SDKSpanStarted, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKSpanStarted{noop.Int64Counter{}}, nil
+ }
+
+ i, err := m.Int64Counter(
+ "otel.sdk.span.started",
+ append([]metric.Int64CounterOption{
+ metric.WithDescription("The number of created spans."),
+ metric.WithUnit("{span}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKSpanStarted{noop.Int64Counter{}}, err
+ }
+ return SDKSpanStarted{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKSpanStarted) Inst() metric.Int64Counter {
+ return m.Int64Counter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKSpanStarted) Name() string {
+ return "otel.sdk.span.started"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKSpanStarted) Unit() string {
+ return "{span}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKSpanStarted) Description() string {
+ return "The number of created spans."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// Implementations MUST record this metric for all spans, even for non-recording
+// ones.
+func (m SDKSpanStarted) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+//
+// Implementations MUST record this metric for all spans, even for non-recording
+// ones.
+func (m SDKSpanStarted) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AttrSpanParentOrigin returns an optional attribute for the
+// "otel.span.parent.origin" semantic convention. It represents the determines
+// whether the span has a parent span, and if so, [whether it is a remote parent]
+// .
+//
+// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+func (SDKSpanStarted) AttrSpanParentOrigin(val SpanParentOriginAttr) attribute.KeyValue {
+ return attribute.String("otel.span.parent.origin", string(val))
+}
+
+// AttrSpanSamplingResult returns an optional attribute for the
+// "otel.span.sampling_result" semantic convention. It represents the result
+// value of the sampler for this span.
+func (SDKSpanStarted) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue {
+ return attribute.String("otel.span.sampling_result", string(val))
+}
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go
similarity index 85%
rename from vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go
rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go
index 3c23d45925..f8a0b70441 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go
@@ -1,9 +1,9 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0"
// SchemaURL is the schema URL that matches the version of the semantic conventions
// that this package defines. Semconv packages starting from v1.4.0 must declare
// non-empty schema URL in the form https://opentelemetry.io/schemas/
-const SchemaURL = "https://opentelemetry.io/schemas/1.34.0"
+const SchemaURL = "https://opentelemetry.io/schemas/1.37.0"
diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE
index 261eeb9e9f..f1aee0f110 100644
--- a/vendor/go.opentelemetry.io/otel/trace/LICENSE
+++ b/vendor/go.opentelemetry.io/otel/trace/LICENSE
@@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go
index f3aa398138..8763936a84 100644
--- a/vendor/go.opentelemetry.io/otel/trace/auto.go
+++ b/vendor/go.opentelemetry.io/otel/trace/auto.go
@@ -20,7 +20,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
- semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
"go.opentelemetry.io/otel/trace/embedded"
"go.opentelemetry.io/otel/trace/internal/telemetry"
)
@@ -39,7 +39,7 @@ type autoTracerProvider struct{ embedded.TracerProvider }
var _ TracerProvider = autoTracerProvider{}
-func (p autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer {
+func (autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer {
cfg := NewTracerConfig(opts...)
return autoTracer{
name: name,
@@ -81,7 +81,7 @@ func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOpt
// Expected to be implemented in eBPF.
//
//go:noinline
-func (t *autoTracer) start(
+func (*autoTracer) start(
ctx context.Context,
spanPtr *autoSpan,
psc *SpanContext,
diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go
index 9c0b720a4d..aea11a2b52 100644
--- a/vendor/go.opentelemetry.io/otel/trace/config.go
+++ b/vendor/go.opentelemetry.io/otel/trace/config.go
@@ -73,7 +73,7 @@ func (cfg *SpanConfig) Timestamp() time.Time {
return cfg.timestamp
}
-// StackTrace checks whether stack trace capturing is enabled.
+// StackTrace reports whether stack trace capturing is enabled.
func (cfg *SpanConfig) StackTrace() bool {
return cfg.stackTrace
}
@@ -154,7 +154,7 @@ func (cfg *EventConfig) Timestamp() time.Time {
return cfg.timestamp
}
-// StackTrace checks whether stack trace capturing is enabled.
+// StackTrace reports whether stack trace capturing is enabled.
func (cfg *EventConfig) StackTrace() bool {
return cfg.stackTrace
}
diff --git a/vendor/go.opentelemetry.io/otel/trace/hex.go b/vendor/go.opentelemetry.io/otel/trace/hex.go
new file mode 100644
index 0000000000..1cbef1d4b9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/hex.go
@@ -0,0 +1,38 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+const (
+ // hexLU is a hex lookup table of the 16 lowercase hex digits.
+ // The character values of the string are indexed at the equivalent
+ // hexadecimal value they represent. This table efficiently encodes byte data
+ // into a string representation of hexadecimal.
+ hexLU = "0123456789abcdef"
+
+ // hexRev is a reverse hex lookup table for lowercase hex digits.
+ // The table is efficiently decodes a hexadecimal string into bytes.
+ // Valid hexadecimal characters are indexed at their respective values. All
+ // other invalid ASCII characters are represented with '\xff'.
+ //
+ // The '\xff' character is used as invalid because no valid character has
+ // the upper 4 bits set. Meaning, an efficient validation can be performed
+ // over multiple character parsing by checking these bits remain zero.
+ hexRev = "" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+)
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go
index f663547b4e..ff0f6eac62 100644
--- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go
@@ -52,7 +52,7 @@ func Map(key string, value ...Attr) Attr {
return Attr{key, MapValue(value...)}
}
-// Equal returns if a is equal to b.
+// Equal reports whether a is equal to b.
func (a Attr) Equal(b Attr) bool {
return a.Key == b.Key && a.Value.Equal(b.Value)
}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go
index 7b1ae3c4ea..bea56f2e7d 100644
--- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go
@@ -22,7 +22,7 @@ func (tid TraceID) String() string {
return hex.EncodeToString(tid[:])
}
-// IsEmpty returns false if id contains at least one non-zero byte.
+// IsEmpty reports whether the TraceID contains only zero bytes.
func (tid TraceID) IsEmpty() bool {
return tid == [traceIDSize]byte{}
}
@@ -50,7 +50,7 @@ func (sid SpanID) String() string {
return hex.EncodeToString(sid[:])
}
-// IsEmpty returns true if the span ID contains at least one non-zero byte.
+// IsEmpty reports whether the SpanID contains only zero bytes.
func (sid SpanID) IsEmpty() bool {
return sid == [spanIDSize]byte{}
}
@@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) {
}
// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
-func unmarshalJSON(dst []byte, src []byte) error {
+func unmarshalJSON(dst, src []byte) error {
if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' {
src = src[1 : l-1]
}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go
index ae9ce102a9..cb7927b816 100644
--- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go
@@ -257,10 +257,10 @@ func (v Value) Kind() ValueKind {
}
}
-// Empty returns if v does not hold any value.
+// Empty reports whether v does not hold any value.
func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty }
-// Equal returns if v is equal to w.
+// Equal reports whether v is equal to w.
func (v Value) Equal(w Value) bool {
k1 := v.Kind()
k2 := w.Kind()
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go
index 0f56e4dbb3..400fab1238 100644
--- a/vendor/go.opentelemetry.io/otel/trace/noop.go
+++ b/vendor/go.opentelemetry.io/otel/trace/noop.go
@@ -26,7 +26,7 @@ type noopTracerProvider struct{ embedded.TracerProvider }
var _ TracerProvider = noopTracerProvider{}
// Tracer returns noop implementation of Tracer.
-func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer {
+func (noopTracerProvider) Tracer(string, ...TracerOption) Tracer {
return noopTracer{}
}
@@ -37,7 +37,7 @@ var _ Tracer = noopTracer{}
// Start carries forward a non-recording Span, if one is present in the context, otherwise it
// creates a no-op Span.
-func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) {
+func (noopTracer) Start(ctx context.Context, _ string, _ ...SpanStartOption) (context.Context, Span) {
span := SpanFromContext(ctx)
if _, ok := span.(nonRecordingSpan); !ok {
// span is likely already a noopSpan, but let's be sure
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
index 64a4f1b362..689d220df7 100644
--- a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
+++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
@@ -51,7 +51,7 @@ type Tracer struct{ embedded.Tracer }
// If ctx contains a span context, the returned span will also contain that
// span context. If the span context in ctx is for a non-recording span, that
// span instance will be returned directly.
-func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) {
+func (Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) {
span := trace.SpanFromContext(ctx)
// If the parent context contains a non-zero span context, that span
diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go
index d49adf671b..ee6f4bcb2a 100644
--- a/vendor/go.opentelemetry.io/otel/trace/trace.go
+++ b/vendor/go.opentelemetry.io/otel/trace/trace.go
@@ -4,8 +4,6 @@
package trace // import "go.opentelemetry.io/otel/trace"
import (
- "bytes"
- "encoding/hex"
"encoding/json"
)
@@ -38,21 +36,47 @@ var (
_ json.Marshaler = nilTraceID
)
-// IsValid checks whether the trace TraceID is valid. A valid trace ID does
+// IsValid reports whether the trace TraceID is valid. A valid trace ID does
// not consist of zeros only.
func (t TraceID) IsValid() bool {
- return !bytes.Equal(t[:], nilTraceID[:])
+ return t != nilTraceID
}
// MarshalJSON implements a custom marshal function to encode TraceID
// as a hex string.
func (t TraceID) MarshalJSON() ([]byte, error) {
- return json.Marshal(t.String())
+ b := [32 + 2]byte{0: '"', 33: '"'}
+ h := t.hexBytes()
+ copy(b[1:], h[:])
+ return b[:], nil
}
// String returns the hex string representation form of a TraceID.
func (t TraceID) String() string {
- return hex.EncodeToString(t[:])
+ h := t.hexBytes()
+ return string(h[:])
+}
+
+// hexBytes returns the hex string representation form of a TraceID.
+func (t TraceID) hexBytes() [32]byte {
+ return [32]byte{
+ hexLU[t[0x0]>>4], hexLU[t[0x0]&0xf],
+ hexLU[t[0x1]>>4], hexLU[t[0x1]&0xf],
+ hexLU[t[0x2]>>4], hexLU[t[0x2]&0xf],
+ hexLU[t[0x3]>>4], hexLU[t[0x3]&0xf],
+ hexLU[t[0x4]>>4], hexLU[t[0x4]&0xf],
+ hexLU[t[0x5]>>4], hexLU[t[0x5]&0xf],
+ hexLU[t[0x6]>>4], hexLU[t[0x6]&0xf],
+ hexLU[t[0x7]>>4], hexLU[t[0x7]&0xf],
+ hexLU[t[0x8]>>4], hexLU[t[0x8]&0xf],
+ hexLU[t[0x9]>>4], hexLU[t[0x9]&0xf],
+ hexLU[t[0xa]>>4], hexLU[t[0xa]&0xf],
+ hexLU[t[0xb]>>4], hexLU[t[0xb]&0xf],
+ hexLU[t[0xc]>>4], hexLU[t[0xc]&0xf],
+ hexLU[t[0xd]>>4], hexLU[t[0xd]&0xf],
+ hexLU[t[0xe]>>4], hexLU[t[0xe]&0xf],
+ hexLU[t[0xf]>>4], hexLU[t[0xf]&0xf],
+ }
}
// SpanID is a unique identity of a span in a trace.
@@ -63,21 +87,38 @@ var (
_ json.Marshaler = nilSpanID
)
-// IsValid checks whether the SpanID is valid. A valid SpanID does not consist
+// IsValid reports whether the SpanID is valid. A valid SpanID does not consist
// of zeros only.
func (s SpanID) IsValid() bool {
- return !bytes.Equal(s[:], nilSpanID[:])
+ return s != nilSpanID
}
// MarshalJSON implements a custom marshal function to encode SpanID
// as a hex string.
func (s SpanID) MarshalJSON() ([]byte, error) {
- return json.Marshal(s.String())
+ b := [16 + 2]byte{0: '"', 17: '"'}
+ h := s.hexBytes()
+ copy(b[1:], h[:])
+ return b[:], nil
}
// String returns the hex string representation form of a SpanID.
func (s SpanID) String() string {
- return hex.EncodeToString(s[:])
+ b := s.hexBytes()
+ return string(b[:])
+}
+
+func (s SpanID) hexBytes() [16]byte {
+ return [16]byte{
+ hexLU[s[0]>>4], hexLU[s[0]&0xf],
+ hexLU[s[1]>>4], hexLU[s[1]&0xf],
+ hexLU[s[2]>>4], hexLU[s[2]&0xf],
+ hexLU[s[3]>>4], hexLU[s[3]&0xf],
+ hexLU[s[4]>>4], hexLU[s[4]&0xf],
+ hexLU[s[5]>>4], hexLU[s[5]&0xf],
+ hexLU[s[6]>>4], hexLU[s[6]&0xf],
+ hexLU[s[7]>>4], hexLU[s[7]&0xf],
+ }
}
// TraceIDFromHex returns a TraceID from a hex string if it is compliant with
@@ -85,65 +126,58 @@ func (s SpanID) String() string {
// https://www.w3.org/TR/trace-context/#trace-id
// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`.
func TraceIDFromHex(h string) (TraceID, error) {
- t := TraceID{}
if len(h) != 32 {
- return t, errInvalidTraceIDLength
+ return [16]byte{}, errInvalidTraceIDLength
}
-
- if err := decodeHex(h, t[:]); err != nil {
- return t, err
+ var b [16]byte
+ invalidMark := byte(0)
+ for i := 0; i < len(h); i += 4 {
+ b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]]
+ b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]]
+ invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]]
}
-
- if !t.IsValid() {
- return t, errNilTraceID
+ // If the upper 4 bits of any byte are not zero, there was an invalid hex
+ // character since invalid hex characters are 0xff in hexRev.
+ if invalidMark&0xf0 != 0 {
+ return [16]byte{}, errInvalidHexID
+ }
+ // If we didn't set any bits, then h was all zeros.
+ if invalidMark == 0 {
+ return [16]byte{}, errNilTraceID
}
- return t, nil
+ return b, nil
}
// SpanIDFromHex returns a SpanID from a hex string if it is compliant
// with the w3c trace-context specification.
// See more at https://www.w3.org/TR/trace-context/#parent-id
func SpanIDFromHex(h string) (SpanID, error) {
- s := SpanID{}
if len(h) != 16 {
- return s, errInvalidSpanIDLength
- }
-
- if err := decodeHex(h, s[:]); err != nil {
- return s, err
+ return [8]byte{}, errInvalidSpanIDLength
}
-
- if !s.IsValid() {
- return s, errNilSpanID
+ var b [8]byte
+ invalidMark := byte(0)
+ for i := 0; i < len(h); i += 4 {
+ b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]]
+ b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]]
+ invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]]
}
- return s, nil
-}
-
-func decodeHex(h string, b []byte) error {
- for _, r := range h {
- switch {
- case 'a' <= r && r <= 'f':
- continue
- case '0' <= r && r <= '9':
- continue
- default:
- return errInvalidHexID
- }
+ // If the upper 4 bits of any byte are not zero, there was an invalid hex
+ // character since invalid hex characters are 0xff in hexRev.
+ if invalidMark&0xf0 != 0 {
+ return [8]byte{}, errInvalidHexID
}
-
- decoded, err := hex.DecodeString(h)
- if err != nil {
- return err
+ // If we didn't set any bits, then h was all zeros.
+ if invalidMark == 0 {
+ return [8]byte{}, errNilSpanID
}
-
- copy(b, decoded)
- return nil
+ return b, nil
}
// TraceFlags contains flags that can be set on a SpanContext.
type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`.
-// IsSampled returns if the sampling bit is set in the TraceFlags.
+// IsSampled reports whether the sampling bit is set in the TraceFlags.
func (tf TraceFlags) IsSampled() bool {
return tf&FlagsSampled == FlagsSampled
}
@@ -160,12 +194,20 @@ func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive //
// MarshalJSON implements a custom marshal function to encode TraceFlags
// as a hex string.
func (tf TraceFlags) MarshalJSON() ([]byte, error) {
- return json.Marshal(tf.String())
+ b := [2 + 2]byte{0: '"', 3: '"'}
+ h := tf.hexBytes()
+ copy(b[1:], h[:])
+ return b[:], nil
}
// String returns the hex string representation form of TraceFlags.
func (tf TraceFlags) String() string {
- return hex.EncodeToString([]byte{byte(tf)}[:])
+ h := tf.hexBytes()
+ return string(h[:])
+}
+
+func (tf TraceFlags) hexBytes() [2]byte {
+ return [2]byte{hexLU[tf>>4], hexLU[tf&0xf]}
}
// SpanContextConfig contains mutable fields usable for constructing
@@ -201,13 +243,13 @@ type SpanContext struct {
var _ json.Marshaler = SpanContext{}
-// IsValid returns if the SpanContext is valid. A valid span context has a
+// IsValid reports whether the SpanContext is valid. A valid span context has a
// valid TraceID and SpanID.
func (sc SpanContext) IsValid() bool {
return sc.HasTraceID() && sc.HasSpanID()
}
-// IsRemote indicates whether the SpanContext represents a remotely-created Span.
+// IsRemote reports whether the SpanContext represents a remotely-created Span.
func (sc SpanContext) IsRemote() bool {
return sc.remote
}
@@ -228,7 +270,7 @@ func (sc SpanContext) TraceID() TraceID {
return sc.traceID
}
-// HasTraceID checks if the SpanContext has a valid TraceID.
+// HasTraceID reports whether the SpanContext has a valid TraceID.
func (sc SpanContext) HasTraceID() bool {
return sc.traceID.IsValid()
}
@@ -249,7 +291,7 @@ func (sc SpanContext) SpanID() SpanID {
return sc.spanID
}
-// HasSpanID checks if the SpanContext has a valid SpanID.
+// HasSpanID reports whether the SpanContext has a valid SpanID.
func (sc SpanContext) HasSpanID() bool {
return sc.spanID.IsValid()
}
@@ -270,7 +312,7 @@ func (sc SpanContext) TraceFlags() TraceFlags {
return sc.traceFlags
}
-// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags.
+// IsSampled reports whether the sampling bit is set in the SpanContext's TraceFlags.
func (sc SpanContext) IsSampled() bool {
return sc.traceFlags.IsSampled()
}
@@ -302,7 +344,7 @@ func (sc SpanContext) WithTraceState(state TraceState) SpanContext {
}
}
-// Equal is a predicate that determines whether two SpanContext values are equal.
+// Equal reports whether two SpanContext values are equal.
func (sc SpanContext) Equal(other SpanContext) bool {
return sc.traceID == other.traceID &&
sc.spanID == other.spanID &&
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
index dc5e34cad0..073adae2fa 100644
--- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go
+++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
@@ -80,7 +80,7 @@ func checkKeyRemain(key string) bool {
//
// param n is remain part length, should be 255 in simple-key or 13 in system-id.
func checkKeyPart(key string, n int) bool {
- if len(key) == 0 {
+ if key == "" {
return false
}
first := key[0] // key's first char
@@ -102,7 +102,7 @@ func isAlphaNum(c byte) bool {
//
// param n is remain part length, should be 240 exactly.
func checkKeyTenant(key string, n int) bool {
- if len(key) == 0 {
+ if key == "" {
return false
}
return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:])
@@ -191,7 +191,7 @@ func ParseTraceState(ts string) (TraceState, error) {
for ts != "" {
var memberStr string
memberStr, ts, _ = strings.Cut(ts, listDelimiters)
- if len(memberStr) == 0 {
+ if memberStr == "" {
continue
}
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
index 7afe92b598..bcaa5aa537 100644
--- a/vendor/go.opentelemetry.io/otel/version.go
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
- return "1.37.0"
+ return "1.38.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
index 9d4742a176..07145e254b 100644
--- a/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -3,7 +3,7 @@
module-sets:
stable-v1:
- version: v1.37.0
+ version: v1.38.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opencensus
@@ -22,11 +22,11 @@ module-sets:
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
- version: v0.59.0
+ version: v0.60.0
modules:
- go.opentelemetry.io/otel/exporters/prometheus
experimental-logs:
- version: v0.13.0
+ version: v0.14.0
modules:
- go.opentelemetry.io/otel/log
- go.opentelemetry.io/otel/log/logtest
@@ -36,7 +36,7 @@ module-sets:
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
- go.opentelemetry.io/otel/exporters/stdout/stdoutlog
experimental-schema:
- version: v0.0.12
+ version: v0.0.13
modules:
- go.opentelemetry.io/otel/schema
excluded-modules:
diff --git a/vendor/go.yaml.in/yaml/v2/.travis.yml b/vendor/go.yaml.in/yaml/v2/.travis.yml
new file mode 100644
index 0000000000..7348c50c0c
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+
+go:
+ - "1.4.x"
+ - "1.5.x"
+ - "1.6.x"
+ - "1.7.x"
+ - "1.8.x"
+ - "1.9.x"
+ - "1.10.x"
+ - "1.11.x"
+ - "1.12.x"
+ - "1.13.x"
+ - "1.14.x"
+ - "tip"
+
+go_import_path: gopkg.in/yaml.v2
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE b/vendor/go.yaml.in/yaml/v2/LICENSE
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE
rename to vendor/go.yaml.in/yaml/v2/LICENSE
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml b/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml
rename to vendor/go.yaml.in/yaml/v2/LICENSE.libyaml
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE b/vendor/go.yaml.in/yaml/v2/NOTICE
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE
rename to vendor/go.yaml.in/yaml/v2/NOTICE
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md b/vendor/go.yaml.in/yaml/v2/README.md
similarity index 76%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/README.md
rename to vendor/go.yaml.in/yaml/v2/README.md
index 53f4139dc3..c9388da425 100644
--- a/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md
+++ b/vendor/go.yaml.in/yaml/v2/README.md
@@ -1,13 +1,3 @@
-# go-yaml fork
-
-This package is a fork of the go-yaml library and is intended solely for consumption
-by kubernetes projects. In this fork, we plan to support only critical changes required for
-kubernetes, such as small bug fixes and regressions. Larger, general-purpose feature requests
-should be made in the upstream go-yaml library, and we will reject such changes in this fork
-unless we are pulling them from upstream.
-
-This fork is based on v2.4.0: https://github.com/go-yaml/yaml/releases/tag/v2.4.0
-
# YAML support for the Go language
Introduction
@@ -30,18 +20,16 @@ supported since they're a poor design and are gone in YAML 1.2.
Installation and usage
----------------------
-The import path for the package is *gopkg.in/yaml.v2*.
+The import path for the package is *go.yaml.in/yaml/v2*.
To install it, run:
- go get gopkg.in/yaml.v2
+ go get go.yaml.in/yaml/v2
API documentation
-----------------
-If opened in a browser, the import path itself leads to the API documentation:
-
- * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+See:
API stability
-------------
@@ -65,7 +53,7 @@ import (
"fmt"
"log"
- "gopkg.in/yaml.v2"
+ "go.yaml.in/yaml/v2"
)
var data = `
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go b/vendor/go.yaml.in/yaml/v2/apic.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go
rename to vendor/go.yaml.in/yaml/v2/apic.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go b/vendor/go.yaml.in/yaml/v2/decode.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go
rename to vendor/go.yaml.in/yaml/v2/decode.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go b/vendor/go.yaml.in/yaml/v2/emitterc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go
rename to vendor/go.yaml.in/yaml/v2/emitterc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go b/vendor/go.yaml.in/yaml/v2/encode.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go
rename to vendor/go.yaml.in/yaml/v2/encode.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go b/vendor/go.yaml.in/yaml/v2/parserc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go
rename to vendor/go.yaml.in/yaml/v2/parserc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go b/vendor/go.yaml.in/yaml/v2/readerc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go
rename to vendor/go.yaml.in/yaml/v2/readerc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go b/vendor/go.yaml.in/yaml/v2/resolve.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go
rename to vendor/go.yaml.in/yaml/v2/resolve.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go b/vendor/go.yaml.in/yaml/v2/scannerc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go
rename to vendor/go.yaml.in/yaml/v2/scannerc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go b/vendor/go.yaml.in/yaml/v2/sorter.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go
rename to vendor/go.yaml.in/yaml/v2/sorter.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go b/vendor/go.yaml.in/yaml/v2/writerc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go
rename to vendor/go.yaml.in/yaml/v2/writerc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go b/vendor/go.yaml.in/yaml/v2/yaml.go
similarity index 99%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go
rename to vendor/go.yaml.in/yaml/v2/yaml.go
index 30813884c0..5248e1263c 100644
--- a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go
+++ b/vendor/go.yaml.in/yaml/v2/yaml.go
@@ -2,7 +2,7 @@
//
// Source code and other details for the project are available at GitHub:
//
-// https://github.com/go-yaml/yaml
+// https://github.com/yaml/go-yaml
//
package yaml
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go b/vendor/go.yaml.in/yaml/v2/yamlh.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go
rename to vendor/go.yaml.in/yaml/v2/yamlh.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go b/vendor/go.yaml.in/yaml/v2/yamlprivateh.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go
rename to vendor/go.yaml.in/yaml/v2/yamlprivateh.go
diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go
index 21ca3b2ee4..8ff087df4c 100644
--- a/vendor/golang.org/x/crypto/curve25519/curve25519.go
+++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go
@@ -36,7 +36,7 @@ func ScalarBaseMult(dst, scalar *[32]byte) {
curve := ecdh.X25519()
priv, err := curve.NewPrivateKey(scalar[:])
if err != nil {
- panic("curve25519: internal error: scalarBaseMult was not 32 bytes")
+ panic("curve25519: " + err.Error())
}
copy(dst[:], priv.PublicKey().Bytes())
}
diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
new file mode 100644
index 0000000000..28cd99c7f3
--- /dev/null
+++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
@@ -0,0 +1,77 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC
+2898 / PKCS #5 v2.0.
+
+A key derivation function is useful when encrypting data based on a password
+or any other not-fully-random data. It uses a pseudorandom function to derive
+a secure encryption key based on the password.
+
+While v2.0 of the standard defines only one pseudorandom function to use,
+HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved
+Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
+choose, you can pass the `New` functions from the different SHA packages to
+pbkdf2.Key.
+*/
+package pbkdf2
+
+import (
+ "crypto/hmac"
+ "hash"
+)
+
+// Key derives a key from the password, salt and iteration count, returning a
+// []byte of length keylen that can be used as cryptographic key. The key is
+// derived based on the method described as PBKDF2 with the HMAC variant using
+// the supplied hash function.
+//
+// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you
+// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
+// doing:
+//
+// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
+//
+// Remember to get a good random salt. At least 8 bytes is recommended by the
+// RFC.
+//
+// Using a higher iteration count will increase the cost of an exhaustive
+// search but will also make derivation proportionally slower.
+func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte {
+ prf := hmac.New(h, password)
+ hashLen := prf.Size()
+ numBlocks := (keyLen + hashLen - 1) / hashLen
+
+ var buf [4]byte
+ dk := make([]byte, 0, numBlocks*hashLen)
+ U := make([]byte, hashLen)
+ for block := 1; block <= numBlocks; block++ {
+ // N.B.: || means concatenation, ^ means XOR
+ // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter
+ // U_1 = PRF(password, salt || uint(i))
+ prf.Reset()
+ prf.Write(salt)
+ buf[0] = byte(block >> 24)
+ buf[1] = byte(block >> 16)
+ buf[2] = byte(block >> 8)
+ buf[3] = byte(block)
+ prf.Write(buf[:4])
+ dk = prf.Sum(dk)
+ T := dk[len(dk)-hashLen:]
+ copy(U, T)
+
+ // U_n = PRF(password, U_(n-1))
+ for n := 2; n <= iter; n++ {
+ prf.Reset()
+ prf.Write(U)
+ U = U[:0]
+ U = prf.Sum(U)
+ for x := range U {
+ T[x] ^= U[x]
+ }
+ }
+ }
+ return dk[:keyLen]
+}
diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go
index ca645d9a1a..02fe0c2d48 100644
--- a/vendor/golang.org/x/net/http2/config.go
+++ b/vendor/golang.org/x/net/http2/config.go
@@ -55,7 +55,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config {
PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites,
CountError: h2.CountError,
}
- fillNetHTTPServerConfig(&conf, h1)
+ fillNetHTTPConfig(&conf, h1.HTTP2)
setConfigDefaults(&conf, true)
return conf
}
@@ -81,7 +81,7 @@ func configFromTransport(h2 *Transport) http2Config {
}
if h2.t1 != nil {
- fillNetHTTPTransportConfig(&conf, h2.t1)
+ fillNetHTTPConfig(&conf, h2.t1.HTTP2)
}
setConfigDefaults(&conf, false)
return conf
@@ -120,3 +120,45 @@ func adjustHTTP1MaxHeaderSize(n int64) int64 {
const typicalHeaders = 10 // conservative
return n + typicalHeaders*perFieldOverhead
}
+
+func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
+ if h2 == nil {
+ return
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if h2.MaxEncoderHeaderTableSize != 0 {
+ conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
+ }
+ if h2.MaxDecoderHeaderTableSize != 0 {
+ conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if h2.MaxReadFrameSize != 0 {
+ conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
+ }
+ if h2.MaxReceiveBufferPerConnection != 0 {
+ conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
+ }
+ if h2.MaxReceiveBufferPerStream != 0 {
+ conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
+ }
+ if h2.SendPingTimeout != 0 {
+ conf.SendPingTimeout = h2.SendPingTimeout
+ }
+ if h2.PingTimeout != 0 {
+ conf.PingTimeout = h2.PingTimeout
+ }
+ if h2.WriteByteTimeout != 0 {
+ conf.WriteByteTimeout = h2.WriteByteTimeout
+ }
+ if h2.PermitProhibitedCipherSuites {
+ conf.PermitProhibitedCipherSuites = true
+ }
+ if h2.CountError != nil {
+ conf.CountError = h2.CountError
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go
deleted file mode 100644
index 5b516c55ff..0000000000
--- a/vendor/golang.org/x/net/http2/config_go124.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.24
-
-package http2
-
-import "net/http"
-
-// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2.
-func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
- fillNetHTTPConfig(conf, srv.HTTP2)
-}
-
-// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2.
-func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
- fillNetHTTPConfig(conf, tr.HTTP2)
-}
-
-func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
- if h2 == nil {
- return
- }
- if h2.MaxConcurrentStreams != 0 {
- conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
- }
- if h2.MaxEncoderHeaderTableSize != 0 {
- conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
- }
- if h2.MaxDecoderHeaderTableSize != 0 {
- conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
- }
- if h2.MaxConcurrentStreams != 0 {
- conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
- }
- if h2.MaxReadFrameSize != 0 {
- conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
- }
- if h2.MaxReceiveBufferPerConnection != 0 {
- conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
- }
- if h2.MaxReceiveBufferPerStream != 0 {
- conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
- }
- if h2.SendPingTimeout != 0 {
- conf.SendPingTimeout = h2.SendPingTimeout
- }
- if h2.PingTimeout != 0 {
- conf.PingTimeout = h2.PingTimeout
- }
- if h2.WriteByteTimeout != 0 {
- conf.WriteByteTimeout = h2.WriteByteTimeout
- }
- if h2.PermitProhibitedCipherSuites {
- conf.PermitProhibitedCipherSuites = true
- }
- if h2.CountError != nil {
- conf.CountError = h2.CountError
- }
-}
diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go
deleted file mode 100644
index 060fd6c64c..0000000000
--- a/vendor/golang.org/x/net/http2/config_pre_go124.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.24
-
-package http2
-
-import "net/http"
-
-// Pre-Go 1.24 fallback.
-// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24.
-
-func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {}
-
-func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {}
diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go
index 9933c9f8c7..9921ca096d 100644
--- a/vendor/golang.org/x/net/http2/gotrack.go
+++ b/vendor/golang.org/x/net/http2/gotrack.go
@@ -15,21 +15,32 @@ import (
"runtime"
"strconv"
"sync"
+ "sync/atomic"
)
var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
+// Setting DebugGoroutines to false during a test to disable goroutine debugging
+// results in race detector complaints when a test leaves goroutines running before
+// returning. Tests shouldn't do this, of course, but when they do it generally shows
+// up as infrequent, hard-to-debug flakes. (See #66519.)
+//
+// Disable goroutine debugging during individual tests with an atomic bool.
+// (Note that it's safe to enable/disable debugging mid-test, so the actual race condition
+// here is harmless.)
+var disableDebugGoroutines atomic.Bool
+
type goroutineLock uint64
func newGoroutineLock() goroutineLock {
- if !DebugGoroutines {
+ if !DebugGoroutines || disableDebugGoroutines.Load() {
return 0
}
return goroutineLock(curGoroutineID())
}
func (g goroutineLock) check() {
- if !DebugGoroutines {
+ if !DebugGoroutines || disableDebugGoroutines.Load() {
return
}
if curGoroutineID() != uint64(g) {
@@ -38,7 +49,7 @@ func (g goroutineLock) check() {
}
func (g goroutineLock) checkNotOn() {
- if !DebugGoroutines {
+ if !DebugGoroutines || disableDebugGoroutines.Load() {
return
}
if curGoroutineID() == uint64(g) {
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index ea5ae629fd..6878f8ecc9 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -15,7 +15,6 @@ package http2 // import "golang.org/x/net/http2"
import (
"bufio"
- "context"
"crypto/tls"
"errors"
"fmt"
@@ -255,15 +254,13 @@ func (cw closeWaiter) Wait() {
// idle memory usage with many connections.
type bufferedWriter struct {
_ incomparable
- group synctestGroupInterface // immutable
- conn net.Conn // immutable
- bw *bufio.Writer // non-nil when data is buffered
- byteTimeout time.Duration // immutable, WriteByteTimeout
+ conn net.Conn // immutable
+ bw *bufio.Writer // non-nil when data is buffered
+ byteTimeout time.Duration // immutable, WriteByteTimeout
}
-func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter {
+func newBufferedWriter(conn net.Conn, timeout time.Duration) *bufferedWriter {
return &bufferedWriter{
- group: group,
conn: conn,
byteTimeout: timeout,
}
@@ -314,24 +311,18 @@ func (w *bufferedWriter) Flush() error {
type bufferedWriterTimeoutWriter bufferedWriter
func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) {
- return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p)
+ return writeWithByteTimeout(w.conn, w.byteTimeout, p)
}
// writeWithByteTimeout writes to conn.
// If more than timeout passes without any bytes being written to the connection,
// the write fails.
-func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
+func writeWithByteTimeout(conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
if timeout <= 0 {
return conn.Write(p)
}
for {
- var now time.Time
- if group == nil {
- now = time.Now()
- } else {
- now = group.Now()
- }
- conn.SetWriteDeadline(now.Add(timeout))
+ conn.SetWriteDeadline(time.Now().Add(timeout))
nn, err := conn.Write(p[n:])
n += nn
if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) {
@@ -417,14 +408,3 @@ func (s *sorter) SortStrings(ss []string) {
// makes that struct also non-comparable, and generally doesn't add
// any size (as long as it's first).
type incomparable [0]func()
-
-// synctestGroupInterface is the methods of synctestGroup used by Server and Transport.
-// It's defined as an interface here to let us keep synctestGroup entirely test-only
-// and not a part of non-test builds.
-type synctestGroupInterface interface {
- Join()
- Now() time.Time
- NewTimer(d time.Duration) timer
- AfterFunc(d time.Duration, f func()) timer
- ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc)
-}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 51fca38f61..64085f6e16 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -176,39 +176,6 @@ type Server struct {
// so that we don't embed a Mutex in this struct, which will make the
// struct non-copyable, which might break some callers.
state *serverInternalState
-
- // Synchronization group used for testing.
- // Outside of tests, this is nil.
- group synctestGroupInterface
-}
-
-func (s *Server) markNewGoroutine() {
- if s.group != nil {
- s.group.Join()
- }
-}
-
-func (s *Server) now() time.Time {
- if s.group != nil {
- return s.group.Now()
- }
- return time.Now()
-}
-
-// newTimer creates a new time.Timer, or a synthetic timer in tests.
-func (s *Server) newTimer(d time.Duration) timer {
- if s.group != nil {
- return s.group.NewTimer(d)
- }
- return timeTimer{time.NewTimer(d)}
-}
-
-// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
-func (s *Server) afterFunc(d time.Duration, f func()) timer {
- if s.group != nil {
- return s.group.AfterFunc(d, f)
- }
- return timeTimer{time.AfterFunc(d, f)}
}
type serverInternalState struct {
@@ -423,6 +390,9 @@ func (o *ServeConnOpts) handler() http.Handler {
//
// The opts parameter is optional. If nil, default values are used.
func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
+ if opts == nil {
+ opts = &ServeConnOpts{}
+ }
s.serveConn(c, opts, nil)
}
@@ -438,7 +408,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
conn: c,
baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(),
- bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout),
+ bw: newBufferedWriter(c, conf.WriteByteTimeout),
handler: opts.handler(),
streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult),
@@ -638,11 +608,11 @@ type serverConn struct {
pingSent bool
sentPingData [8]byte
goAwayCode ErrCode
- shutdownTimer timer // nil until used
- idleTimer timer // nil if unused
+ shutdownTimer *time.Timer // nil until used
+ idleTimer *time.Timer // nil if unused
readIdleTimeout time.Duration
pingTimeout time.Duration
- readIdleTimer timer // nil if unused
+ readIdleTimer *time.Timer // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
@@ -687,12 +657,12 @@ type stream struct {
flow outflow // limits writing from Handler to client
inflow inflow // what the client is allowed to POST/etc to us
state streamState
- resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
- gotTrailerHeader bool // HEADER frame for trailers was seen
- wroteHeaders bool // whether we wrote headers (not status 100)
- readDeadline timer // nil if unused
- writeDeadline timer // nil if unused
- closeErr error // set before cw is closed
+ resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ readDeadline *time.Timer // nil if unused
+ writeDeadline *time.Timer // nil if unused
+ closeErr error // set before cw is closed
trailer http.Header // accumulated trailers
reqTrailer http.Header // handler's Request.Trailer
@@ -848,7 +818,6 @@ type readFrameResult struct {
// consumer is done with the frame.
// It's run on its own goroutine.
func (sc *serverConn) readFrames() {
- sc.srv.markNewGoroutine()
gate := make(chan struct{})
gateDone := func() { gate <- struct{}{} }
for {
@@ -881,7 +850,6 @@ type frameWriteResult struct {
// At most one goroutine can be running writeFrameAsync at a time per
// serverConn.
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
- sc.srv.markNewGoroutine()
var err error
if wd == nil {
err = wr.write.writeFrame(sc)
@@ -965,22 +933,22 @@ func (sc *serverConn) serve(conf http2Config) {
sc.setConnState(http.StateIdle)
if sc.srv.IdleTimeout > 0 {
- sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
+ sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
defer sc.idleTimer.Stop()
}
if conf.SendPingTimeout > 0 {
sc.readIdleTimeout = conf.SendPingTimeout
- sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
+ sc.readIdleTimer = time.AfterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
defer sc.readIdleTimer.Stop()
}
go sc.readFrames() // closed by defer sc.conn.Close above
- settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
+ settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop()
- lastFrameTime := sc.srv.now()
+ lastFrameTime := time.Now()
loopNum := 0
for {
loopNum++
@@ -994,7 +962,7 @@ func (sc *serverConn) serve(conf http2Config) {
case res := <-sc.wroteFrameCh:
sc.wroteFrame(res)
case res := <-sc.readFrameCh:
- lastFrameTime = sc.srv.now()
+ lastFrameTime = time.Now()
// Process any written frames before reading new frames from the client since a
// written frame could have triggered a new stream to be started.
if sc.writingFrameAsync {
@@ -1077,7 +1045,7 @@ func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
}
pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
- now := sc.srv.now()
+ now := time.Now()
if pingAt.After(now) {
// We received frames since arming the ping timer.
// Reset it for the next possible timeout.
@@ -1141,10 +1109,10 @@ func (sc *serverConn) readPreface() error {
errc <- nil
}
}()
- timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server?
+ timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
defer timer.Stop()
select {
- case <-timer.C():
+ case <-timer.C:
return errPrefaceTimeout
case err := <-errc:
if err == nil {
@@ -1160,6 +1128,21 @@ var errChanPool = sync.Pool{
New: func() interface{} { return make(chan error, 1) },
}
+func getErrChan() chan error {
+ if inTests {
+ // Channels cannot be reused across synctest tests.
+ return make(chan error, 1)
+ } else {
+ return errChanPool.Get().(chan error)
+ }
+}
+
+func putErrChan(ch chan error) {
+ if !inTests {
+ errChanPool.Put(ch)
+ }
+}
+
var writeDataPool = sync.Pool{
New: func() interface{} { return new(writeData) },
}
@@ -1167,7 +1150,7 @@ var writeDataPool = sync.Pool{
// writeDataFromHandler writes DATA response frames from a handler on
// the given stream.
func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
- ch := errChanPool.Get().(chan error)
+ ch := getErrChan()
writeArg := writeDataPool.Get().(*writeData)
*writeArg = writeData{stream.id, data, endStream}
err := sc.writeFrameFromHandler(FrameWriteRequest{
@@ -1199,7 +1182,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea
return errStreamClosed
}
}
- errChanPool.Put(ch)
+ putErrChan(ch)
if frameWriteDone {
writeDataPool.Put(writeArg)
}
@@ -1513,7 +1496,7 @@ func (sc *serverConn) goAway(code ErrCode) {
func (sc *serverConn) shutDownIn(d time.Duration) {
sc.serveG.check()
- sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer)
+ sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
}
func (sc *serverConn) resetStream(se StreamError) {
@@ -2118,7 +2101,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// (in Go 1.8), though. That's a more sane option anyway.
if sc.hs.ReadTimeout > 0 {
sc.conn.SetReadDeadline(time.Time{})
- st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
+ st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
}
return sc.scheduleHandler(id, rw, req, handler)
@@ -2216,7 +2199,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
st.flow.add(sc.initialStreamSendWindowSize)
st.inflow.init(sc.initialStreamRecvWindowSize)
if sc.hs.WriteTimeout > 0 {
- st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
+ st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
sc.streams[id] = st
@@ -2405,7 +2388,6 @@ func (sc *serverConn) handlerDone() {
// Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
- sc.srv.markNewGoroutine()
defer sc.sendServeMsg(handlerDoneMsg)
didPanic := true
defer func() {
@@ -2454,7 +2436,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
// waiting for this frame to be written, so an http.Flush mid-handler
// writes out the correct value of keys, before a handler later potentially
// mutates it.
- errc = errChanPool.Get().(chan error)
+ errc = getErrChan()
}
if err := sc.writeFrameFromHandler(FrameWriteRequest{
write: headerData,
@@ -2466,7 +2448,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
if errc != nil {
select {
case err := <-errc:
- errChanPool.Put(errc)
+ putErrChan(errc)
return err
case <-sc.doneServing:
return errClientDisconnected
@@ -2573,7 +2555,7 @@ func (b *requestBody) Read(p []byte) (n int, err error) {
if err == io.EOF {
b.sawEOF = true
}
- if b.conn == nil && inTests {
+ if b.conn == nil {
return
}
b.conn.noteBodyReadFromHandler(b.stream, n, err)
@@ -2702,7 +2684,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
var date string
if _, ok := rws.snapHeader["Date"]; !ok {
// TODO(bradfitz): be faster here, like net/http? measure.
- date = rws.conn.srv.now().UTC().Format(http.TimeFormat)
+ date = time.Now().UTC().Format(http.TimeFormat)
}
for _, v := range rws.snapHeader["Trailer"] {
@@ -2824,7 +2806,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() {
func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
st := w.rws.stream
- if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+ if !deadline.IsZero() && deadline.Before(time.Now()) {
// If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail.
st.onReadTimeout()
@@ -2840,9 +2822,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
if deadline.IsZero() {
st.readDeadline = nil
} else if st.readDeadline == nil {
- st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout)
+ st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
} else {
- st.readDeadline.Reset(deadline.Sub(sc.srv.now()))
+ st.readDeadline.Reset(deadline.Sub(time.Now()))
}
})
return nil
@@ -2850,7 +2832,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
st := w.rws.stream
- if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+ if !deadline.IsZero() && deadline.Before(time.Now()) {
// If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail.
st.onWriteTimeout()
@@ -2866,9 +2848,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
if deadline.IsZero() {
st.writeDeadline = nil
} else if st.writeDeadline == nil {
- st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout)
+ st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
} else {
- st.writeDeadline.Reset(deadline.Sub(sc.srv.now()))
+ st.writeDeadline.Reset(deadline.Sub(time.Now()))
}
})
return nil
@@ -3147,7 +3129,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
method: opts.Method,
url: u,
header: cloneHeader(opts.Header),
- done: errChanPool.Get().(chan error),
+ done: getErrChan(),
}
select {
@@ -3164,7 +3146,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
case <-st.cw:
return errStreamClosed
case err := <-msg.done:
- errChanPool.Put(msg.done)
+ putErrChan(msg.done)
return err
}
}
diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go
deleted file mode 100644
index 0b1c17b812..0000000000
--- a/vendor/golang.org/x/net/http2/timer.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-package http2
-
-import "time"
-
-// A timer is a time.Timer, as an interface which can be replaced in tests.
-type timer = interface {
- C() <-chan time.Time
- Reset(d time.Duration) bool
- Stop() bool
-}
-
-// timeTimer adapts a time.Timer to the timer interface.
-type timeTimer struct {
- *time.Timer
-}
-
-func (t timeTimer) C() <-chan time.Time { return t.Timer.C }
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index f26356b9cd..35e3902519 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -193,50 +193,6 @@ type Transport struct {
type transportTestHooks struct {
newclientconn func(*ClientConn)
- group synctestGroupInterface
-}
-
-func (t *Transport) markNewGoroutine() {
- if t != nil && t.transportTestHooks != nil {
- t.transportTestHooks.group.Join()
- }
-}
-
-func (t *Transport) now() time.Time {
- if t != nil && t.transportTestHooks != nil {
- return t.transportTestHooks.group.Now()
- }
- return time.Now()
-}
-
-func (t *Transport) timeSince(when time.Time) time.Duration {
- if t != nil && t.transportTestHooks != nil {
- return t.now().Sub(when)
- }
- return time.Since(when)
-}
-
-// newTimer creates a new time.Timer, or a synthetic timer in tests.
-func (t *Transport) newTimer(d time.Duration) timer {
- if t.transportTestHooks != nil {
- return t.transportTestHooks.group.NewTimer(d)
- }
- return timeTimer{time.NewTimer(d)}
-}
-
-// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
-func (t *Transport) afterFunc(d time.Duration, f func()) timer {
- if t.transportTestHooks != nil {
- return t.transportTestHooks.group.AfterFunc(d, f)
- }
- return timeTimer{time.AfterFunc(d, f)}
-}
-
-func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
- if t.transportTestHooks != nil {
- return t.transportTestHooks.group.ContextWithTimeout(ctx, d)
- }
- return context.WithTimeout(ctx, d)
}
func (t *Transport) maxHeaderListSize() uint32 {
@@ -366,7 +322,7 @@ type ClientConn struct {
readerErr error // set before readerDone is closed
idleTimeout time.Duration // or 0 for never
- idleTimer timer
+ idleTimer *time.Timer
mu sync.Mutex // guards following
cond *sync.Cond // hold mu; broadcast on flow/closed changes
@@ -534,14 +490,12 @@ func (cs *clientStream) closeReqBodyLocked() {
cs.reqBodyClosed = make(chan struct{})
reqBodyClosed := cs.reqBodyClosed
go func() {
- cs.cc.t.markNewGoroutine()
cs.reqBody.Close()
close(reqBodyClosed)
}()
}
type stickyErrWriter struct {
- group synctestGroupInterface
conn net.Conn
timeout time.Duration
err *error
@@ -551,7 +505,7 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
if *sew.err != nil {
return 0, *sew.err
}
- n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p)
+ n, err = writeWithByteTimeout(sew.conn, sew.timeout, p)
*sew.err = err
return n, err
}
@@ -650,9 +604,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
backoff := float64(uint(1) << (uint(retry) - 1))
backoff += backoff * (0.1 * mathrand.Float64())
d := time.Second * time.Duration(backoff)
- tm := t.newTimer(d)
+ tm := time.NewTimer(d)
select {
- case <-tm.C():
+ case <-tm.C:
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
case <-req.Context().Done():
@@ -699,6 +653,7 @@ var (
errClientConnUnusable = errors.New("http2: client conn not usable")
errClientConnNotEstablished = errors.New("http2: client conn could not be established")
errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
+ errClientConnForceClosed = errors.New("http2: client connection force closed via ClientConn.Close")
)
// shouldRetryRequest is called by RoundTrip when a request fails to get
@@ -838,14 +793,11 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
pingTimeout: conf.PingTimeout,
pings: make(map[[8]byte]chan struct{}),
reqHeaderMu: make(chan struct{}, 1),
- lastActive: t.now(),
+ lastActive: time.Now(),
}
- var group synctestGroupInterface
if t.transportTestHooks != nil {
- t.markNewGoroutine()
t.transportTestHooks.newclientconn(cc)
c = cc.tconn
- group = t.group
}
if VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
@@ -857,7 +809,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
// TODO: adjust this writer size to account for frame size +
// MTU + crypto/tls record padding.
cc.bw = bufio.NewWriter(stickyErrWriter{
- group: group,
conn: c,
timeout: conf.WriteByteTimeout,
err: &cc.werr,
@@ -906,7 +857,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
// Start the idle timer after the connection is fully initialized.
if d := t.idleConnTimeout(); d != 0 {
cc.idleTimeout = d
- cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout)
+ cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
}
go cc.readLoop()
@@ -917,7 +868,7 @@ func (cc *ClientConn) healthCheck() {
pingTimeout := cc.pingTimeout
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
// trigger the healthCheck again if there is no frame received.
- ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
+ ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
defer cancel()
cc.vlogf("http2: Transport sending health check")
err := cc.Ping(ctx)
@@ -1120,7 +1071,7 @@ func (cc *ClientConn) tooIdleLocked() bool {
// times are compared based on their wall time. We don't want
// to reuse a connection that's been sitting idle during
// VM/laptop suspend if monotonic time was also frozen.
- return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout
+ return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
}
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
@@ -1186,7 +1137,6 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
done := make(chan struct{})
cancelled := false // guarded by cc.mu
go func() {
- cc.t.markNewGoroutine()
cc.mu.Lock()
defer cc.mu.Unlock()
for {
@@ -1257,8 +1207,7 @@ func (cc *ClientConn) closeForError(err error) {
//
// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
func (cc *ClientConn) Close() error {
- err := errors.New("http2: client connection force closed via ClientConn.Close")
- cc.closeForError(err)
+ cc.closeForError(errClientConnForceClosed)
return nil
}
@@ -1427,7 +1376,6 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
//
// It sends the request and performs post-request cleanup (closing Request.Body, etc.).
func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) {
- cs.cc.t.markNewGoroutine()
err := cs.writeRequest(req, streamf)
cs.cleanupWriteRequest(err)
}
@@ -1558,9 +1506,9 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre
var respHeaderTimer <-chan time.Time
var respHeaderRecv chan struct{}
if d := cc.responseHeaderTimeout(); d != 0 {
- timer := cc.t.newTimer(d)
+ timer := time.NewTimer(d)
defer timer.Stop()
- respHeaderTimer = timer.C()
+ respHeaderTimer = timer.C
respHeaderRecv = cs.respHeaderRecv
}
// Wait until the peer half-closes its end of the stream,
@@ -1753,7 +1701,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
// Return a fatal error which aborts the retry loop.
return errClientConnNotEstablished
}
- cc.lastActive = cc.t.now()
+ cc.lastActive = time.Now()
if cc.closed || !cc.canTakeNewRequestLocked() {
return errClientConnUnusable
}
@@ -2092,10 +2040,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) {
if len(cc.streams) != slen-1 {
panic("forgetting unknown stream id")
}
- cc.lastActive = cc.t.now()
+ cc.lastActive = time.Now()
if len(cc.streams) == 0 && cc.idleTimer != nil {
cc.idleTimer.Reset(cc.idleTimeout)
- cc.lastIdle = cc.t.now()
+ cc.lastIdle = time.Now()
}
// Wake up writeRequestBody via clientStream.awaitFlowControl and
// wake up RoundTrip if there is a pending request.
@@ -2121,7 +2069,6 @@ type clientConnReadLoop struct {
// readLoop runs in its own goroutine and reads and dispatches frames.
func (cc *ClientConn) readLoop() {
- cc.t.markNewGoroutine()
rl := &clientConnReadLoop{cc: cc}
defer rl.cleanup()
cc.readerErr = rl.run()
@@ -2188,9 +2135,9 @@ func (rl *clientConnReadLoop) cleanup() {
if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout {
unusedWaitTime = cc.idleTimeout
}
- idleTime := cc.t.now().Sub(cc.lastActive)
+ idleTime := time.Now().Sub(cc.lastActive)
if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle {
- cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() {
+ cc.idleTimer = time.AfterFunc(unusedWaitTime-idleTime, func() {
cc.t.connPool().MarkDead(cc)
})
} else {
@@ -2250,9 +2197,9 @@ func (rl *clientConnReadLoop) run() error {
cc := rl.cc
gotSettings := false
readIdleTimeout := cc.readIdleTimeout
- var t timer
+ var t *time.Timer
if readIdleTimeout != 0 {
- t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)
+ t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
}
for {
f, err := cc.fr.ReadFrame()
@@ -2998,7 +2945,6 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
var pingError error
errc := make(chan struct{})
go func() {
- cc.t.markNewGoroutine()
cc.wmu.Lock()
defer cc.wmu.Unlock()
if pingError = cc.fr.WritePing(false, p); pingError != nil {
@@ -3228,7 +3174,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) {
cc.mu.Lock()
ci.WasIdle = len(cc.streams) == 0 && reused
if ci.WasIdle && !cc.lastActive.IsZero() {
- ci.IdleTime = cc.t.timeSince(cc.lastActive)
+ ci.IdleTime = time.Since(cc.lastActive)
}
cc.mu.Unlock()
diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s
new file mode 100644
index 0000000000..269e173ca4
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s
@@ -0,0 +1,17 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+
+#include "textflag.h"
+
+//
+// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go
+//
+
+TEXT ·syscall6(SB),NOSPLIT,$0-88
+ JMP syscall·syscall6(SB)
+
+TEXT ·rawSyscall6(SB),NOSPLIT,$0-88
+ JMP syscall·rawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s
new file mode 100644
index 0000000000..ec2acfe540
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s
@@ -0,0 +1,17 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin && amd64 && gc
+
+#include "textflag.h"
+
+TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_sysctl(SB)
+GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
+DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
+
+TEXT libc_sysctlbyname_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_sysctlbyname(SB)
+GLOBL ·libc_sysctlbyname_trampoline_addr(SB), RODATA, $8
+DATA ·libc_sysctlbyname_trampoline_addr(SB)/8, $libc_sysctlbyname_trampoline<>(SB)
diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go
new file mode 100644
index 0000000000..271055be0b
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/byteorder.go
@@ -0,0 +1,66 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+import (
+ "runtime"
+)
+
+// byteOrder is a subset of encoding/binary.ByteOrder.
+type byteOrder interface {
+ Uint32([]byte) uint32
+ Uint64([]byte) uint64
+}
+
+type littleEndian struct{}
+type bigEndian struct{}
+
+func (littleEndian) Uint32(b []byte) uint32 {
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func (littleEndian) Uint64(b []byte) uint64 {
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func (bigEndian) Uint32(b []byte) uint32 {
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+}
+
+func (bigEndian) Uint64(b []byte) uint64 {
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+}
+
+// hostByteOrder returns littleEndian on little-endian machines and
+// bigEndian on big-endian machines.
+func hostByteOrder() byteOrder {
+ switch runtime.GOARCH {
+ case "386", "amd64", "amd64p32",
+ "alpha",
+ "arm", "arm64",
+ "loong64",
+ "mipsle", "mips64le", "mips64p32le",
+ "nios2",
+ "ppc64le",
+ "riscv", "riscv64",
+ "sh":
+ return littleEndian{}
+ case "armbe", "arm64be",
+ "m68k",
+ "mips", "mips64", "mips64p32",
+ "ppc", "ppc64",
+ "s390", "s390x",
+ "shbe",
+ "sparc", "sparc64":
+ return bigEndian{}
+ }
+ panic("unknown architecture")
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go
new file mode 100644
index 0000000000..63541994ef
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu.go
@@ -0,0 +1,338 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cpu implements processor feature detection for
+// various CPU architectures.
+package cpu
+
+import (
+ "os"
+ "strings"
+)
+
+// Initialized reports whether the CPU features were initialized.
+//
+// For some GOOS/GOARCH combinations initialization of the CPU features depends
+// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm
+// Initialized will report false if reading the file fails.
+var Initialized bool
+
+// CacheLinePad is used to pad structs to avoid false sharing.
+type CacheLinePad struct{ _ [cacheLineSize]byte }
+
+// X86 contains the supported CPU features of the
+// current X86/AMD64 platform. If the current platform
+// is not X86/AMD64 then all feature flags are false.
+//
+// X86 is padded to avoid false sharing. Further the HasAVX
+// and HasAVX2 are only set if the OS supports XMM and YMM
+// registers in addition to the CPUID feature bit being set.
+var X86 struct {
+ _ CacheLinePad
+ HasAES bool // AES hardware implementation (AES NI)
+ HasADX bool // Multi-precision add-carry instruction extensions
+ HasAVX bool // Advanced vector extension
+ HasAVX2 bool // Advanced vector extension 2
+ HasAVX512 bool // Advanced vector extension 512
+ HasAVX512F bool // Advanced vector extension 512 Foundation Instructions
+ HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions
+ HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions
+ HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions
+ HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions
+ HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions
+ HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions
+ HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add
+ HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions
+ HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision
+ HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision
+ HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions
+ HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations
+ HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions
+ HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions
+ HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions
+ HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2
+ HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms
+ HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions
+ HasAMXTile bool // Advanced Matrix Extension Tile instructions
+ HasAMXInt8 bool // Advanced Matrix Extension Int8 instructions
+ HasAMXBF16 bool // Advanced Matrix Extension BFloat16 instructions
+ HasBMI1 bool // Bit manipulation instruction set 1
+ HasBMI2 bool // Bit manipulation instruction set 2
+ HasCX16 bool // Compare and exchange 16 Bytes
+ HasERMS bool // Enhanced REP for MOVSB and STOSB
+ HasFMA bool // Fused-multiply-add instructions
+ HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers.
+ HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM
+ HasPOPCNT bool // Hamming weight instruction POPCNT.
+ HasRDRAND bool // RDRAND instruction (on-chip random number generator)
+ HasRDSEED bool // RDSEED instruction (on-chip random number generator)
+ HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64)
+ HasSSE3 bool // Streaming SIMD extension 3
+ HasSSSE3 bool // Supplemental streaming SIMD extension 3
+ HasSSE41 bool // Streaming SIMD extension 4 and 4.1
+ HasSSE42 bool // Streaming SIMD extension 4 and 4.2
+ HasAVXIFMA bool // Advanced vector extension Integer Fused Multiply Add
+ HasAVXVNNI bool // Advanced vector extension Vector Neural Network Instructions
+ HasAVXVNNIInt8 bool // Advanced vector extension Vector Neural Network Int8 instructions
+ _ CacheLinePad
+}
+
+// ARM64 contains the supported CPU features of the
+// current ARMv8(aarch64) platform. If the current platform
+// is not arm64 then all feature flags are false.
+var ARM64 struct {
+ _ CacheLinePad
+ HasFP bool // Floating-point instruction set (always available)
+ HasASIMD bool // Advanced SIMD (always available)
+ HasEVTSTRM bool // Event stream support
+ HasAES bool // AES hardware implementation
+ HasPMULL bool // Polynomial multiplication instruction set
+ HasSHA1 bool // SHA1 hardware implementation
+ HasSHA2 bool // SHA2 hardware implementation
+ HasCRC32 bool // CRC32 hardware implementation
+ HasATOMICS bool // Atomic memory operation instruction set
+ HasFPHP bool // Half precision floating-point instruction set
+ HasASIMDHP bool // Advanced SIMD half precision instruction set
+ HasCPUID bool // CPUID identification scheme registers
+ HasASIMDRDM bool // Rounding double multiply add/subtract instruction set
+ HasJSCVT bool // Javascript conversion from floating-point to integer
+ HasFCMA bool // Floating-point multiplication and addition of complex numbers
+ HasLRCPC bool // Release Consistent processor consistent support
+ HasDCPOP bool // Persistent memory support
+ HasSHA3 bool // SHA3 hardware implementation
+ HasSM3 bool // SM3 hardware implementation
+ HasSM4 bool // SM4 hardware implementation
+ HasASIMDDP bool // Advanced SIMD double precision instruction set
+ HasSHA512 bool // SHA512 hardware implementation
+ HasSVE bool // Scalable Vector Extensions
+ HasSVE2 bool // Scalable Vector Extensions 2
+ HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32
+ HasDIT bool // Data Independent Timing support
+ HasI8MM bool // Advanced SIMD Int8 matrix multiplication instructions
+ _ CacheLinePad
+}
+
+// ARM contains the supported CPU features of the current ARM (32-bit) platform.
+// All feature flags are false if:
+// 1. the current platform is not arm, or
+// 2. the current operating system is not Linux.
+var ARM struct {
+ _ CacheLinePad
+ HasSWP bool // SWP instruction support
+ HasHALF bool // Half-word load and store support
+ HasTHUMB bool // ARM Thumb instruction set
+ Has26BIT bool // Address space limited to 26-bits
+ HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support
+ HasFPA bool // Floating point arithmetic support
+ HasVFP bool // Vector floating point support
+ HasEDSP bool // DSP Extensions support
+ HasJAVA bool // Java instruction set
+ HasIWMMXT bool // Intel Wireless MMX technology support
+ HasCRUNCH bool // MaverickCrunch context switching and handling
+ HasTHUMBEE bool // Thumb EE instruction set
+ HasNEON bool // NEON instruction set
+ HasVFPv3 bool // Vector floating point version 3 support
+ HasVFPv3D16 bool // Vector floating point version 3 D8-D15
+ HasTLS bool // Thread local storage support
+ HasVFPv4 bool // Vector floating point version 4 support
+ HasIDIVA bool // Integer divide instruction support in ARM mode
+ HasIDIVT bool // Integer divide instruction support in Thumb mode
+ HasVFPD32 bool // Vector floating point version 3 D15-D31
+ HasLPAE bool // Large Physical Address Extensions
+ HasEVTSTRM bool // Event stream support
+ HasAES bool // AES hardware implementation
+ HasPMULL bool // Polynomial multiplication instruction set
+ HasSHA1 bool // SHA1 hardware implementation
+ HasSHA2 bool // SHA2 hardware implementation
+ HasCRC32 bool // CRC32 hardware implementation
+ _ CacheLinePad
+}
+
+// The booleans in Loong64 contain the correspondingly named cpu feature bit.
+// The struct is padded to avoid false sharing.
+var Loong64 struct {
+ _ CacheLinePad
+ HasLSX bool // support 128-bit vector extension
+ HasLASX bool // support 256-bit vector extension
+ HasCRC32 bool // support CRC instruction
+ HasLAM_BH bool // support AM{SWAP/ADD}[_DB].{B/H} instruction
+ HasLAMCAS bool // support AMCAS[_DB].{B/H/W/D} instruction
+ _ CacheLinePad
+}
+
+// MIPS64X contains the supported CPU features of the current mips64/mips64le
+// platforms. If the current platform is not mips64/mips64le or the current
+// operating system is not Linux then all feature flags are false.
+var MIPS64X struct {
+ _ CacheLinePad
+ HasMSA bool // MIPS SIMD architecture
+ _ CacheLinePad
+}
+
+// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms.
+// If the current platform is not ppc64/ppc64le then all feature flags are false.
+//
+// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00,
+// since there are no optional categories. There are some exceptions that also
+// require kernel support to work (DARN, SCV), so there are feature bits for
+// those as well. The struct is padded to avoid false sharing.
+var PPC64 struct {
+ _ CacheLinePad
+ HasDARN bool // Hardware random number generator (requires kernel enablement)
+ HasSCV bool // Syscall vectored (requires kernel enablement)
+ IsPOWER8 bool // ISA v2.07 (POWER8)
+ IsPOWER9 bool // ISA v3.00 (POWER9), implies IsPOWER8
+ _ CacheLinePad
+}
+
+// S390X contains the supported CPU features of the current IBM Z
+// (s390x) platform. If the current platform is not IBM Z then all
+// feature flags are false.
+//
+// S390X is padded to avoid false sharing. Further HasVX is only set
+// if the OS supports vector registers in addition to the STFLE
+// feature bit being set.
+var S390X struct {
+ _ CacheLinePad
+ HasZARCH bool // z/Architecture mode is active [mandatory]
+ HasSTFLE bool // store facility list extended
+ HasLDISP bool // long (20-bit) displacements
+ HasEIMM bool // 32-bit immediates
+ HasDFP bool // decimal floating point
+ HasETF3EH bool // ETF-3 enhanced
+ HasMSA bool // message security assist (CPACF)
+ HasAES bool // KM-AES{128,192,256} functions
+ HasAESCBC bool // KMC-AES{128,192,256} functions
+ HasAESCTR bool // KMCTR-AES{128,192,256} functions
+ HasAESGCM bool // KMA-GCM-AES{128,192,256} functions
+ HasGHASH bool // KIMD-GHASH function
+ HasSHA1 bool // K{I,L}MD-SHA-1 functions
+ HasSHA256 bool // K{I,L}MD-SHA-256 functions
+ HasSHA512 bool // K{I,L}MD-SHA-512 functions
+ HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions
+ HasVX bool // vector facility
+ HasVXE bool // vector-enhancements facility 1
+ _ CacheLinePad
+}
+
+// RISCV64 contains the supported CPU features and performance characteristics for riscv64
+// platforms. The booleans in RISCV64, with the exception of HasFastMisaligned, indicate
+// the presence of RISC-V extensions.
+//
+// It is safe to assume that all the RV64G extensions are supported and so they are omitted from
+// this structure. As riscv64 Go programs require at least RV64G, the code that populates
+// this structure cannot run successfully if some of the RV64G extensions are missing.
+// The struct is padded to avoid false sharing.
+var RISCV64 struct {
+ _ CacheLinePad
+ HasFastMisaligned bool // Fast misaligned accesses
+ HasC bool // Compressed instruction-set extension
+ HasV bool // Vector extension compatible with RVV 1.0
+ HasZba bool // Address generation instructions extension
+ HasZbb bool // Basic bit-manipulation extension
+ HasZbs bool // Single-bit instructions extension
+ HasZvbb bool // Vector Basic Bit-manipulation
+ HasZvbc bool // Vector Carryless Multiplication
+ HasZvkb bool // Vector Cryptography Bit-manipulation
+ HasZvkt bool // Vector Data-Independent Execution Latency
+ HasZvkg bool // Vector GCM/GMAC
+ HasZvkn bool // NIST Algorithm Suite (AES/SHA256/SHA512)
+ HasZvknc bool // NIST Algorithm Suite with carryless multiply
+ HasZvkng bool // NIST Algorithm Suite with GCM
+ HasZvks bool // ShangMi Algorithm Suite
+ HasZvksc bool // ShangMi Algorithm Suite with carryless multiplication
+ HasZvksg bool // ShangMi Algorithm Suite with GCM
+ _ CacheLinePad
+}
+
+func init() {
+ archInit()
+ initOptions()
+ processOptions()
+}
+
+// options contains the cpu debug options that can be used in GODEBUG.
+// Options are arch dependent and are added by the arch specific initOptions functions.
+// Features that are mandatory for the specific GOARCH should have the Required field set
+// (e.g. SSE2 on amd64).
+var options []option
+
+// Option names should be lower case. e.g. avx instead of AVX.
+type option struct {
+ Name string
+ Feature *bool
+ Specified bool // whether feature value was specified in GODEBUG
+ Enable bool // whether feature should be enabled
+ Required bool // whether feature is mandatory and can not be disabled
+}
+
+func processOptions() {
+ env := os.Getenv("GODEBUG")
+field:
+ for env != "" {
+ field := ""
+ i := strings.IndexByte(env, ',')
+ if i < 0 {
+ field, env = env, ""
+ } else {
+ field, env = env[:i], env[i+1:]
+ }
+ if len(field) < 4 || field[:4] != "cpu." {
+ continue
+ }
+ i = strings.IndexByte(field, '=')
+ if i < 0 {
+ print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n")
+ continue
+ }
+ key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on"
+
+ var enable bool
+ switch value {
+ case "on":
+ enable = true
+ case "off":
+ enable = false
+ default:
+ print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n")
+ continue field
+ }
+
+ if key == "all" {
+ for i := range options {
+ options[i].Specified = true
+ options[i].Enable = enable || options[i].Required
+ }
+ continue field
+ }
+
+ for i := range options {
+ if options[i].Name == key {
+ options[i].Specified = true
+ options[i].Enable = enable
+ continue field
+ }
+ }
+
+ print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n")
+ }
+
+ for _, o := range options {
+ if !o.Specified {
+ continue
+ }
+
+ if o.Enable && !*o.Feature {
+ print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n")
+ continue
+ }
+
+ if !o.Enable && o.Required {
+ print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n")
+ continue
+ }
+
+ *o.Feature = o.Enable
+ }
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go
new file mode 100644
index 0000000000..9bf0c32eb6
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_aix.go
@@ -0,0 +1,33 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix
+
+package cpu
+
+const (
+ // getsystemcfg constants
+ _SC_IMPL = 2
+ _IMPL_POWER8 = 0x10000
+ _IMPL_POWER9 = 0x20000
+)
+
+func archInit() {
+ impl := getsystemcfg(_SC_IMPL)
+ if impl&_IMPL_POWER8 != 0 {
+ PPC64.IsPOWER8 = true
+ }
+ if impl&_IMPL_POWER9 != 0 {
+ PPC64.IsPOWER8 = true
+ PPC64.IsPOWER9 = true
+ }
+
+ Initialized = true
+}
+
+func getsystemcfg(label int) (n uint64) {
+ r0, _ := callgetsystemcfg(label)
+ n = uint64(r0)
+ return
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go
new file mode 100644
index 0000000000..301b752e9c
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_arm.go
@@ -0,0 +1,73 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+const cacheLineSize = 32
+
+// HWCAP/HWCAP2 bits.
+// These are specific to Linux.
+const (
+ hwcap_SWP = 1 << 0
+ hwcap_HALF = 1 << 1
+ hwcap_THUMB = 1 << 2
+ hwcap_26BIT = 1 << 3
+ hwcap_FAST_MULT = 1 << 4
+ hwcap_FPA = 1 << 5
+ hwcap_VFP = 1 << 6
+ hwcap_EDSP = 1 << 7
+ hwcap_JAVA = 1 << 8
+ hwcap_IWMMXT = 1 << 9
+ hwcap_CRUNCH = 1 << 10
+ hwcap_THUMBEE = 1 << 11
+ hwcap_NEON = 1 << 12
+ hwcap_VFPv3 = 1 << 13
+ hwcap_VFPv3D16 = 1 << 14
+ hwcap_TLS = 1 << 15
+ hwcap_VFPv4 = 1 << 16
+ hwcap_IDIVA = 1 << 17
+ hwcap_IDIVT = 1 << 18
+ hwcap_VFPD32 = 1 << 19
+ hwcap_LPAE = 1 << 20
+ hwcap_EVTSTRM = 1 << 21
+
+ hwcap2_AES = 1 << 0
+ hwcap2_PMULL = 1 << 1
+ hwcap2_SHA1 = 1 << 2
+ hwcap2_SHA2 = 1 << 3
+ hwcap2_CRC32 = 1 << 4
+)
+
+func initOptions() {
+ options = []option{
+ {Name: "pmull", Feature: &ARM.HasPMULL},
+ {Name: "sha1", Feature: &ARM.HasSHA1},
+ {Name: "sha2", Feature: &ARM.HasSHA2},
+ {Name: "swp", Feature: &ARM.HasSWP},
+ {Name: "thumb", Feature: &ARM.HasTHUMB},
+ {Name: "thumbee", Feature: &ARM.HasTHUMBEE},
+ {Name: "tls", Feature: &ARM.HasTLS},
+ {Name: "vfp", Feature: &ARM.HasVFP},
+ {Name: "vfpd32", Feature: &ARM.HasVFPD32},
+ {Name: "vfpv3", Feature: &ARM.HasVFPv3},
+ {Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16},
+ {Name: "vfpv4", Feature: &ARM.HasVFPv4},
+ {Name: "half", Feature: &ARM.HasHALF},
+ {Name: "26bit", Feature: &ARM.Has26BIT},
+ {Name: "fastmul", Feature: &ARM.HasFASTMUL},
+ {Name: "fpa", Feature: &ARM.HasFPA},
+ {Name: "edsp", Feature: &ARM.HasEDSP},
+ {Name: "java", Feature: &ARM.HasJAVA},
+ {Name: "iwmmxt", Feature: &ARM.HasIWMMXT},
+ {Name: "crunch", Feature: &ARM.HasCRUNCH},
+ {Name: "neon", Feature: &ARM.HasNEON},
+ {Name: "idivt", Feature: &ARM.HasIDIVT},
+ {Name: "idiva", Feature: &ARM.HasIDIVA},
+ {Name: "lpae", Feature: &ARM.HasLPAE},
+ {Name: "evtstrm", Feature: &ARM.HasEVTSTRM},
+ {Name: "aes", Feature: &ARM.HasAES},
+ {Name: "crc32", Feature: &ARM.HasCRC32},
+ }
+
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go
new file mode 100644
index 0000000000..af2aa99f9f
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go
@@ -0,0 +1,194 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+import "runtime"
+
+// cacheLineSize is used to prevent false sharing of cache lines.
+// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size.
+// It doesn't cost much and is much more future-proof.
+const cacheLineSize = 128
+
+func initOptions() {
+ options = []option{
+ {Name: "fp", Feature: &ARM64.HasFP},
+ {Name: "asimd", Feature: &ARM64.HasASIMD},
+ {Name: "evstrm", Feature: &ARM64.HasEVTSTRM},
+ {Name: "aes", Feature: &ARM64.HasAES},
+ {Name: "fphp", Feature: &ARM64.HasFPHP},
+ {Name: "jscvt", Feature: &ARM64.HasJSCVT},
+ {Name: "lrcpc", Feature: &ARM64.HasLRCPC},
+ {Name: "pmull", Feature: &ARM64.HasPMULL},
+ {Name: "sha1", Feature: &ARM64.HasSHA1},
+ {Name: "sha2", Feature: &ARM64.HasSHA2},
+ {Name: "sha3", Feature: &ARM64.HasSHA3},
+ {Name: "sha512", Feature: &ARM64.HasSHA512},
+ {Name: "sm3", Feature: &ARM64.HasSM3},
+ {Name: "sm4", Feature: &ARM64.HasSM4},
+ {Name: "sve", Feature: &ARM64.HasSVE},
+ {Name: "sve2", Feature: &ARM64.HasSVE2},
+ {Name: "crc32", Feature: &ARM64.HasCRC32},
+ {Name: "atomics", Feature: &ARM64.HasATOMICS},
+ {Name: "asimdhp", Feature: &ARM64.HasASIMDHP},
+ {Name: "cpuid", Feature: &ARM64.HasCPUID},
+ {Name: "asimrdm", Feature: &ARM64.HasASIMDRDM},
+ {Name: "fcma", Feature: &ARM64.HasFCMA},
+ {Name: "dcpop", Feature: &ARM64.HasDCPOP},
+ {Name: "asimddp", Feature: &ARM64.HasASIMDDP},
+ {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM},
+ {Name: "dit", Feature: &ARM64.HasDIT},
+ {Name: "i8mm", Feature: &ARM64.HasI8MM},
+ }
+}
+
+func archInit() {
+ switch runtime.GOOS {
+ case "freebsd":
+ readARM64Registers()
+ case "linux", "netbsd", "openbsd":
+ doinit()
+ default:
+ // Many platforms don't seem to allow reading these registers.
+ setMinimalFeatures()
+ }
+}
+
+// setMinimalFeatures fakes the minimal ARM64 features expected by
+// TestARM64minimalFeatures.
+func setMinimalFeatures() {
+ ARM64.HasASIMD = true
+ ARM64.HasFP = true
+}
+
+func readARM64Registers() {
+ Initialized = true
+
+ parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0())
+}
+
+func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) {
+ // ID_AA64ISAR0_EL1
+ switch extractBits(isar0, 4, 7) {
+ case 1:
+ ARM64.HasAES = true
+ case 2:
+ ARM64.HasAES = true
+ ARM64.HasPMULL = true
+ }
+
+ switch extractBits(isar0, 8, 11) {
+ case 1:
+ ARM64.HasSHA1 = true
+ }
+
+ switch extractBits(isar0, 12, 15) {
+ case 1:
+ ARM64.HasSHA2 = true
+ case 2:
+ ARM64.HasSHA2 = true
+ ARM64.HasSHA512 = true
+ }
+
+ switch extractBits(isar0, 16, 19) {
+ case 1:
+ ARM64.HasCRC32 = true
+ }
+
+ switch extractBits(isar0, 20, 23) {
+ case 2:
+ ARM64.HasATOMICS = true
+ }
+
+ switch extractBits(isar0, 28, 31) {
+ case 1:
+ ARM64.HasASIMDRDM = true
+ }
+
+ switch extractBits(isar0, 32, 35) {
+ case 1:
+ ARM64.HasSHA3 = true
+ }
+
+ switch extractBits(isar0, 36, 39) {
+ case 1:
+ ARM64.HasSM3 = true
+ }
+
+ switch extractBits(isar0, 40, 43) {
+ case 1:
+ ARM64.HasSM4 = true
+ }
+
+ switch extractBits(isar0, 44, 47) {
+ case 1:
+ ARM64.HasASIMDDP = true
+ }
+
+ // ID_AA64ISAR1_EL1
+ switch extractBits(isar1, 0, 3) {
+ case 1:
+ ARM64.HasDCPOP = true
+ }
+
+ switch extractBits(isar1, 12, 15) {
+ case 1:
+ ARM64.HasJSCVT = true
+ }
+
+ switch extractBits(isar1, 16, 19) {
+ case 1:
+ ARM64.HasFCMA = true
+ }
+
+ switch extractBits(isar1, 20, 23) {
+ case 1:
+ ARM64.HasLRCPC = true
+ }
+
+ switch extractBits(isar1, 52, 55) {
+ case 1:
+ ARM64.HasI8MM = true
+ }
+
+ // ID_AA64PFR0_EL1
+ switch extractBits(pfr0, 16, 19) {
+ case 0:
+ ARM64.HasFP = true
+ case 1:
+ ARM64.HasFP = true
+ ARM64.HasFPHP = true
+ }
+
+ switch extractBits(pfr0, 20, 23) {
+ case 0:
+ ARM64.HasASIMD = true
+ case 1:
+ ARM64.HasASIMD = true
+ ARM64.HasASIMDHP = true
+ }
+
+ switch extractBits(pfr0, 32, 35) {
+ case 1:
+ ARM64.HasSVE = true
+
+ parseARM64SVERegister(getzfr0())
+ }
+
+ switch extractBits(pfr0, 48, 51) {
+ case 1:
+ ARM64.HasDIT = true
+ }
+}
+
+func parseARM64SVERegister(zfr0 uint64) {
+ switch extractBits(zfr0, 0, 3) {
+ case 1:
+ ARM64.HasSVE2 = true
+ }
+}
+
+func extractBits(data uint64, start, end uint) uint {
+ return (uint)(data>>start) & ((1 << (end - start + 1)) - 1)
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s
new file mode 100644
index 0000000000..22cc99844a
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s
@@ -0,0 +1,39 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+
+#include "textflag.h"
+
+// func getisar0() uint64
+TEXT ·getisar0(SB),NOSPLIT,$0-8
+ // get Instruction Set Attributes 0 into x0
+ // mrs x0, ID_AA64ISAR0_EL1 = d5380600
+ WORD $0xd5380600
+ MOVD R0, ret+0(FP)
+ RET
+
+// func getisar1() uint64
+TEXT ·getisar1(SB),NOSPLIT,$0-8
+ // get Instruction Set Attributes 1 into x0
+ // mrs x0, ID_AA64ISAR1_EL1 = d5380620
+ WORD $0xd5380620
+ MOVD R0, ret+0(FP)
+ RET
+
+// func getpfr0() uint64
+TEXT ·getpfr0(SB),NOSPLIT,$0-8
+ // get Processor Feature Register 0 into x0
+ // mrs x0, ID_AA64PFR0_EL1 = d5380400
+ WORD $0xd5380400
+ MOVD R0, ret+0(FP)
+ RET
+
+// func getzfr0() uint64
+TEXT ·getzfr0(SB),NOSPLIT,$0-8
+ // get SVE Feature Register 0 into x0
+ // mrs x0, ID_AA64ZFR0_EL1 = d5380480
+ WORD $0xd5380480
+ MOVD R0, ret+0(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go
new file mode 100644
index 0000000000..b838cb9e95
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go
@@ -0,0 +1,61 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin && amd64 && gc
+
+package cpu
+
+// darwinSupportsAVX512 checks Darwin kernel for AVX512 support via sysctl
+// call (see issue 43089). It also restricts AVX512 support for Darwin to
+// kernel version 21.3.0 (MacOS 12.2.0) or later (see issue 49233).
+//
+// Background:
+// Darwin implements a special mechanism to economize on thread state when
+// AVX512 specific registers are not in use. This scheme minimizes state when
+// preempting threads that haven't yet used any AVX512 instructions, but adds
+// special requirements to check for AVX512 hardware support at runtime (e.g.
+// via sysctl call or commpage inspection). See issue 43089 and link below for
+// full background:
+// https://github.com/apple-oss-distributions/xnu/blob/xnu-11215.1.10/osfmk/i386/fpu.c#L214-L240
+//
+// Additionally, all versions of the Darwin kernel from 19.6.0 through 21.2.0
+// (corresponding to MacOS 10.15.6 - 12.1) have a bug that can cause corruption
+// of the AVX512 mask registers (K0-K7) upon signal return. For this reason
+// AVX512 is considered unsafe to use on Darwin for kernel versions prior to
+// 21.3.0, where a fix has been confirmed. See issue 49233 for full background.
+func darwinSupportsAVX512() bool {
+ return darwinSysctlEnabled([]byte("hw.optional.avx512f\x00")) && darwinKernelVersionCheck(21, 3, 0)
+}
+
+// Ensure Darwin kernel version is at least major.minor.patch, avoiding dependencies
+func darwinKernelVersionCheck(major, minor, patch int) bool {
+ var release [256]byte
+ err := darwinOSRelease(&release)
+ if err != nil {
+ return false
+ }
+
+ var mmp [3]int
+ c := 0
+Loop:
+ for _, b := range release[:] {
+ switch {
+ case b >= '0' && b <= '9':
+ mmp[c] = 10*mmp[c] + int(b-'0')
+ case b == '.':
+ c++
+ if c > 2 {
+ return false
+ }
+ case b == 0:
+ break Loop
+ default:
+ return false
+ }
+ }
+ if c != 2 {
+ return false
+ }
+ return mmp[0] > major || mmp[0] == major && (mmp[1] > minor || mmp[1] == minor && mmp[2] >= patch)
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go
new file mode 100644
index 0000000000..6ac6e1efb2
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go
@@ -0,0 +1,12 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+
+package cpu
+
+func getisar0() uint64
+func getisar1() uint64
+func getpfr0() uint64
+func getzfr0() uint64
diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go
new file mode 100644
index 0000000000..c8ae6ddc15
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go
@@ -0,0 +1,21 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+
+package cpu
+
+// haveAsmFunctions reports whether the other functions in this file can
+// be safely called.
+func haveAsmFunctions() bool { return true }
+
+// The following feature detection functions are defined in cpu_s390x.s.
+// They are likely to be expensive to call so the results should be cached.
+func stfle() facilityList
+func kmQuery() queryResult
+func kmcQuery() queryResult
+func kmctrQuery() queryResult
+func kmaQuery() queryResult
+func kimdQuery() queryResult
+func klmdQuery() queryResult
diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go
new file mode 100644
index 0000000000..32a44514e2
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go
@@ -0,0 +1,15 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (386 || amd64 || amd64p32) && gc
+
+package cpu
+
+// cpuid is implemented in cpu_gc_x86.s for gc compiler
+// and in cpu_gccgo.c for gccgo.
+func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
+
+// xgetbv with ecx = 0 is implemented in cpu_gc_x86.s for gc compiler
+// and in cpu_gccgo.c for gccgo.
+func xgetbv() (eax, edx uint32)
diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s
new file mode 100644
index 0000000000..ce208ce6d6
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s
@@ -0,0 +1,26 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (386 || amd64 || amd64p32) && gc
+
+#include "textflag.h"
+
+// func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·cpuid(SB), NOSPLIT, $0-24
+ MOVL eaxArg+0(FP), AX
+ MOVL ecxArg+4(FP), CX
+ CPUID
+ MOVL AX, eax+8(FP)
+ MOVL BX, ebx+12(FP)
+ MOVL CX, ecx+16(FP)
+ MOVL DX, edx+20(FP)
+ RET
+
+// func xgetbv() (eax, edx uint32)
+TEXT ·xgetbv(SB), NOSPLIT, $0-8
+ MOVL $0, CX
+ XGETBV
+ MOVL AX, eax+0(FP)
+ MOVL DX, edx+4(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go
new file mode 100644
index 0000000000..7f1946780b
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go
@@ -0,0 +1,11 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gccgo
+
+package cpu
+
+func getisar0() uint64 { return 0 }
+func getisar1() uint64 { return 0 }
+func getpfr0() uint64 { return 0 }
diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go
new file mode 100644
index 0000000000..9526d2ce3a
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go
@@ -0,0 +1,22 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gccgo
+
+package cpu
+
+// haveAsmFunctions reports whether the other functions in this file can
+// be safely called.
+func haveAsmFunctions() bool { return false }
+
+// TODO(mundaym): the following feature detection functions are currently
+// stubs. See https://golang.org/cl/162887 for how to fix this.
+// They are likely to be expensive to call so the results should be cached.
+func stfle() facilityList { panic("not implemented for gccgo") }
+func kmQuery() queryResult { panic("not implemented for gccgo") }
+func kmcQuery() queryResult { panic("not implemented for gccgo") }
+func kmctrQuery() queryResult { panic("not implemented for gccgo") }
+func kmaQuery() queryResult { panic("not implemented for gccgo") }
+func kimdQuery() queryResult { panic("not implemented for gccgo") }
+func klmdQuery() queryResult { panic("not implemented for gccgo") }
diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c
new file mode 100644
index 0000000000..3f73a05dcf
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c
@@ -0,0 +1,37 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (386 || amd64 || amd64p32) && gccgo
+
+#include
+#include
+#include
+
+// Need to wrap __get_cpuid_count because it's declared as static.
+int
+gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx);
+}
+
+#pragma GCC diagnostic ignored "-Wunknown-pragmas"
+#pragma GCC push_options
+#pragma GCC target("xsave")
+#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function)
+
+// xgetbv reads the contents of an XCR (Extended Control Register)
+// specified in the ECX register into registers EDX:EAX.
+// Currently, the only supported value for XCR is 0.
+void
+gccgoXgetbv(uint32_t *eax, uint32_t *edx)
+{
+ uint64_t v = _xgetbv(0);
+ *eax = v & 0xffffffff;
+ *edx = v >> 32;
+}
+
+#pragma clang attribute pop
+#pragma GCC pop_options
diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go
new file mode 100644
index 0000000000..170d21ddfd
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go
@@ -0,0 +1,25 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (386 || amd64 || amd64p32) && gccgo
+
+package cpu
+
+//extern gccgoGetCpuidCount
+func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32)
+
+func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) {
+ var a, b, c, d uint32
+ gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d)
+ return a, b, c, d
+}
+
+//extern gccgoXgetbv
+func gccgoXgetbv(eax, edx *uint32)
+
+func xgetbv() (eax, edx uint32) {
+ var a, d uint32
+ gccgoXgetbv(&a, &d)
+ return a, d
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go
new file mode 100644
index 0000000000..743eb54354
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go
@@ -0,0 +1,15 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !386 && !amd64 && !amd64p32 && !arm64
+
+package cpu
+
+func archInit() {
+ if err := readHWCAP(); err != nil {
+ return
+ }
+ doinit()
+ Initialized = true
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go
new file mode 100644
index 0000000000..2057006dce
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go
@@ -0,0 +1,39 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+func doinit() {
+ ARM.HasSWP = isSet(hwCap, hwcap_SWP)
+ ARM.HasHALF = isSet(hwCap, hwcap_HALF)
+ ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB)
+ ARM.Has26BIT = isSet(hwCap, hwcap_26BIT)
+ ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT)
+ ARM.HasFPA = isSet(hwCap, hwcap_FPA)
+ ARM.HasVFP = isSet(hwCap, hwcap_VFP)
+ ARM.HasEDSP = isSet(hwCap, hwcap_EDSP)
+ ARM.HasJAVA = isSet(hwCap, hwcap_JAVA)
+ ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT)
+ ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH)
+ ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE)
+ ARM.HasNEON = isSet(hwCap, hwcap_NEON)
+ ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3)
+ ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16)
+ ARM.HasTLS = isSet(hwCap, hwcap_TLS)
+ ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4)
+ ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA)
+ ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT)
+ ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32)
+ ARM.HasLPAE = isSet(hwCap, hwcap_LPAE)
+ ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM)
+ ARM.HasAES = isSet(hwCap2, hwcap2_AES)
+ ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL)
+ ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1)
+ ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2)
+ ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32)
+}
+
+func isSet(hwc uint, value uint) bool {
+ return hwc&value != 0
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go
new file mode 100644
index 0000000000..f1caf0f78e
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go
@@ -0,0 +1,120 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+import (
+ "strings"
+ "syscall"
+)
+
+// HWCAP/HWCAP2 bits. These are exposed by Linux.
+const (
+ hwcap_FP = 1 << 0
+ hwcap_ASIMD = 1 << 1
+ hwcap_EVTSTRM = 1 << 2
+ hwcap_AES = 1 << 3
+ hwcap_PMULL = 1 << 4
+ hwcap_SHA1 = 1 << 5
+ hwcap_SHA2 = 1 << 6
+ hwcap_CRC32 = 1 << 7
+ hwcap_ATOMICS = 1 << 8
+ hwcap_FPHP = 1 << 9
+ hwcap_ASIMDHP = 1 << 10
+ hwcap_CPUID = 1 << 11
+ hwcap_ASIMDRDM = 1 << 12
+ hwcap_JSCVT = 1 << 13
+ hwcap_FCMA = 1 << 14
+ hwcap_LRCPC = 1 << 15
+ hwcap_DCPOP = 1 << 16
+ hwcap_SHA3 = 1 << 17
+ hwcap_SM3 = 1 << 18
+ hwcap_SM4 = 1 << 19
+ hwcap_ASIMDDP = 1 << 20
+ hwcap_SHA512 = 1 << 21
+ hwcap_SVE = 1 << 22
+ hwcap_ASIMDFHM = 1 << 23
+ hwcap_DIT = 1 << 24
+
+ hwcap2_SVE2 = 1 << 1
+ hwcap2_I8MM = 1 << 13
+)
+
+// linuxKernelCanEmulateCPUID reports whether we're running
+// on Linux 4.11+. Ideally we'd like to ask the question about
+// whether the current kernel contains
+// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2
+// but the version number will have to do.
+func linuxKernelCanEmulateCPUID() bool {
+ var un syscall.Utsname
+ syscall.Uname(&un)
+ var sb strings.Builder
+ for _, b := range un.Release[:] {
+ if b == 0 {
+ break
+ }
+ sb.WriteByte(byte(b))
+ }
+ major, minor, _, ok := parseRelease(sb.String())
+ return ok && (major > 4 || major == 4 && minor >= 11)
+}
+
+func doinit() {
+ if err := readHWCAP(); err != nil {
+ // We failed to read /proc/self/auxv. This can happen if the binary has
+ // been given extra capabilities(7) with /bin/setcap.
+ //
+ // When this happens, we have two options. If the Linux kernel is new
+ // enough (4.11+), we can read the arm64 registers directly which'll
+ // trap into the kernel and then return back to userspace.
+ //
+ // But on older kernels, such as Linux 4.4.180 as used on many Synology
+ // devices, calling readARM64Registers (specifically getisar0) will
+ // cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo
+ // instead.
+ //
+ // See golang/go#57336.
+ if linuxKernelCanEmulateCPUID() {
+ readARM64Registers()
+ } else {
+ readLinuxProcCPUInfo()
+ }
+ return
+ }
+
+ // HWCAP feature bits
+ ARM64.HasFP = isSet(hwCap, hwcap_FP)
+ ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD)
+ ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM)
+ ARM64.HasAES = isSet(hwCap, hwcap_AES)
+ ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL)
+ ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1)
+ ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2)
+ ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32)
+ ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS)
+ ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP)
+ ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP)
+ ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID)
+ ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM)
+ ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT)
+ ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA)
+ ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC)
+ ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP)
+ ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3)
+ ARM64.HasSM3 = isSet(hwCap, hwcap_SM3)
+ ARM64.HasSM4 = isSet(hwCap, hwcap_SM4)
+ ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP)
+ ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512)
+ ARM64.HasSVE = isSet(hwCap, hwcap_SVE)
+ ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM)
+ ARM64.HasDIT = isSet(hwCap, hwcap_DIT)
+
+ // HWCAP2 feature bits
+ ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2)
+ ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM)
+}
+
+func isSet(hwc uint, value uint) bool {
+ return hwc&value != 0
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go
new file mode 100644
index 0000000000..4f34114329
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go
@@ -0,0 +1,22 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+// HWCAP bits. These are exposed by the Linux kernel.
+const (
+ hwcap_LOONGARCH_LSX = 1 << 4
+ hwcap_LOONGARCH_LASX = 1 << 5
+)
+
+func doinit() {
+ // TODO: Features that require kernel support like LSX and LASX can
+ // be detected here once needed in std library or by the compiler.
+ Loong64.HasLSX = hwcIsSet(hwCap, hwcap_LOONGARCH_LSX)
+ Loong64.HasLASX = hwcIsSet(hwCap, hwcap_LOONGARCH_LASX)
+}
+
+func hwcIsSet(hwc uint, val uint) bool {
+ return hwc&val != 0
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go
new file mode 100644
index 0000000000..4686c1d541
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go
@@ -0,0 +1,22 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (mips64 || mips64le)
+
+package cpu
+
+// HWCAP bits. These are exposed by the Linux kernel 5.4.
+const (
+ // CPU features
+ hwcap_MIPS_MSA = 1 << 1
+)
+
+func doinit() {
+ // HWCAP feature bits
+ MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA)
+}
+
+func isSet(hwc uint, value uint) bool {
+ return hwc&value != 0
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
new file mode 100644
index 0000000000..a428dec9cd
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
@@ -0,0 +1,9 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && !arm && !arm64 && !loong64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64
+
+package cpu
+
+func doinit() {}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go
new file mode 100644
index 0000000000..197188e67f
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go
@@ -0,0 +1,30 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (ppc64 || ppc64le)
+
+package cpu
+
+// HWCAP/HWCAP2 bits. These are exposed by the kernel.
+const (
+ // ISA Level
+ _PPC_FEATURE2_ARCH_2_07 = 0x80000000
+ _PPC_FEATURE2_ARCH_3_00 = 0x00800000
+
+ // CPU features
+ _PPC_FEATURE2_DARN = 0x00200000
+ _PPC_FEATURE2_SCV = 0x00100000
+)
+
+func doinit() {
+ // HWCAP2 feature bits
+ PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07)
+ PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00)
+ PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN)
+ PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV)
+}
+
+func isSet(hwc uint, value uint) bool {
+ return hwc&value != 0
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go
new file mode 100644
index 0000000000..ad741536f3
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go
@@ -0,0 +1,160 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// RISC-V extension discovery code for Linux. The approach here is to first try the riscv_hwprobe
+// syscall falling back to HWCAP to check for the C extension if riscv_hwprobe is not available.
+//
+// A note on detection of the Vector extension using HWCAP.
+//
+// Support for the Vector extension version 1.0 was added to the Linux kernel in release 6.5.
+// Support for the riscv_hwprobe syscall was added in 6.4. It follows that if the riscv_hwprobe
+// syscall is not available then neither is the Vector extension (which needs kernel support).
+// The riscv_hwprobe syscall should then be all we need to detect the Vector extension.
+// However, some RISC-V board manufacturers ship boards with an older kernel on top of which
+// they have back-ported various versions of the Vector extension patches but not the riscv_hwprobe
+// patches. These kernels advertise support for the Vector extension using HWCAP. Falling
+// back to HWCAP to detect the Vector extension, if riscv_hwprobe is not available, or simply not
+// bothering with riscv_hwprobe at all and just using HWCAP may then seem like an attractive option.
+//
+// Unfortunately, simply checking the 'V' bit in AT_HWCAP will not work as this bit is used by
+// RISC-V board and cloud instance providers to mean different things. The Lichee Pi 4A board
+// and the Scaleway RV1 cloud instances use the 'V' bit to advertise their support for the unratified
+// 0.7.1 version of the Vector Specification. The Banana Pi BPI-F3 and the CanMV-K230 board use
+// it to advertise support for 1.0 of the Vector extension. Versions 0.7.1 and 1.0 of the Vector
+// extension are binary incompatible. HWCAP can then not be used in isolation to populate the
+// HasV field as this field indicates that the underlying CPU is compatible with RVV 1.0.
+//
+// There is a way at runtime to distinguish between versions 0.7.1 and 1.0 of the Vector
+// specification by issuing a RVV 1.0 vsetvli instruction and checking the vill bit of the vtype
+// register. This check would allow us to safely detect version 1.0 of the Vector extension
+// with HWCAP, if riscv_hwprobe were not available. However, the check cannot
+// be added until the assembler supports the Vector instructions.
+//
+// Note the riscv_hwprobe syscall does not suffer from these ambiguities by design as all of the
+// extensions it advertises support for are explicitly versioned. It's also worth noting that
+// the riscv_hwprobe syscall is the only way to detect multi-letter RISC-V extensions, e.g., Zba.
+// These cannot be detected using HWCAP and so riscv_hwprobe must be used to detect the majority
+// of RISC-V extensions.
+//
+// Please see https://docs.kernel.org/arch/riscv/hwprobe.html for more information.
+
+// golang.org/x/sys/cpu is not allowed to depend on golang.org/x/sys/unix so we must
+// reproduce the constants, types and functions needed to make the riscv_hwprobe syscall
+// here.
+
+const (
+ // Copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go.
+ riscv_HWPROBE_KEY_IMA_EXT_0 = 0x4
+ riscv_HWPROBE_IMA_C = 0x2
+ riscv_HWPROBE_IMA_V = 0x4
+ riscv_HWPROBE_EXT_ZBA = 0x8
+ riscv_HWPROBE_EXT_ZBB = 0x10
+ riscv_HWPROBE_EXT_ZBS = 0x20
+ riscv_HWPROBE_EXT_ZVBB = 0x20000
+ riscv_HWPROBE_EXT_ZVBC = 0x40000
+ riscv_HWPROBE_EXT_ZVKB = 0x80000
+ riscv_HWPROBE_EXT_ZVKG = 0x100000
+ riscv_HWPROBE_EXT_ZVKNED = 0x200000
+ riscv_HWPROBE_EXT_ZVKNHB = 0x800000
+ riscv_HWPROBE_EXT_ZVKSED = 0x1000000
+ riscv_HWPROBE_EXT_ZVKSH = 0x2000000
+ riscv_HWPROBE_EXT_ZVKT = 0x4000000
+ riscv_HWPROBE_KEY_CPUPERF_0 = 0x5
+ riscv_HWPROBE_MISALIGNED_FAST = 0x3
+ riscv_HWPROBE_MISALIGNED_MASK = 0x7
+)
+
+const (
+ // sys_RISCV_HWPROBE is copied from golang.org/x/sys/unix/zsysnum_linux_riscv64.go.
+ sys_RISCV_HWPROBE = 258
+)
+
+// riscvHWProbePairs is copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go.
+type riscvHWProbePairs struct {
+ key int64
+ value uint64
+}
+
+const (
+ // CPU features
+ hwcap_RISCV_ISA_C = 1 << ('C' - 'A')
+)
+
+func doinit() {
+ // A slice of key/value pair structures is passed to the RISCVHWProbe syscall. The key
+ // field should be initialised with one of the key constants defined above, e.g.,
+ // RISCV_HWPROBE_KEY_IMA_EXT_0. The syscall will set the value field to the appropriate value.
+ // If the kernel does not recognise a key it will set the key field to -1 and the value field to 0.
+
+ pairs := []riscvHWProbePairs{
+ {riscv_HWPROBE_KEY_IMA_EXT_0, 0},
+ {riscv_HWPROBE_KEY_CPUPERF_0, 0},
+ }
+
+ // This call only indicates that extensions are supported if they are implemented on all cores.
+ if riscvHWProbe(pairs, 0) {
+ if pairs[0].key != -1 {
+ v := uint(pairs[0].value)
+ RISCV64.HasC = isSet(v, riscv_HWPROBE_IMA_C)
+ RISCV64.HasV = isSet(v, riscv_HWPROBE_IMA_V)
+ RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA)
+ RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB)
+ RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS)
+ RISCV64.HasZvbb = isSet(v, riscv_HWPROBE_EXT_ZVBB)
+ RISCV64.HasZvbc = isSet(v, riscv_HWPROBE_EXT_ZVBC)
+ RISCV64.HasZvkb = isSet(v, riscv_HWPROBE_EXT_ZVKB)
+ RISCV64.HasZvkg = isSet(v, riscv_HWPROBE_EXT_ZVKG)
+ RISCV64.HasZvkt = isSet(v, riscv_HWPROBE_EXT_ZVKT)
+ // Cryptography shorthand extensions
+ RISCV64.HasZvkn = isSet(v, riscv_HWPROBE_EXT_ZVKNED) &&
+ isSet(v, riscv_HWPROBE_EXT_ZVKNHB) && RISCV64.HasZvkb && RISCV64.HasZvkt
+ RISCV64.HasZvknc = RISCV64.HasZvkn && RISCV64.HasZvbc
+ RISCV64.HasZvkng = RISCV64.HasZvkn && RISCV64.HasZvkg
+ RISCV64.HasZvks = isSet(v, riscv_HWPROBE_EXT_ZVKSED) &&
+ isSet(v, riscv_HWPROBE_EXT_ZVKSH) && RISCV64.HasZvkb && RISCV64.HasZvkt
+ RISCV64.HasZvksc = RISCV64.HasZvks && RISCV64.HasZvbc
+ RISCV64.HasZvksg = RISCV64.HasZvks && RISCV64.HasZvkg
+ }
+ if pairs[1].key != -1 {
+ v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK
+ RISCV64.HasFastMisaligned = v == riscv_HWPROBE_MISALIGNED_FAST
+ }
+ }
+
+ // Let's double check with HWCAP if the C extension does not appear to be supported.
+ // This may happen if we're running on a kernel older than 6.4.
+
+ if !RISCV64.HasC {
+ RISCV64.HasC = isSet(hwCap, hwcap_RISCV_ISA_C)
+ }
+}
+
+func isSet(hwc uint, value uint) bool {
+ return hwc&value != 0
+}
+
+// riscvHWProbe is a simplified version of the generated wrapper function found in
+// golang.org/x/sys/unix/zsyscall_linux_riscv64.go. We simplify it by removing the
+// cpuCount and cpus parameters which we do not need. We always want to pass 0 for
+// these parameters here so the kernel only reports the extensions that are present
+// on all cores.
+func riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool {
+ var _zero uintptr
+ var p0 unsafe.Pointer
+ if len(pairs) > 0 {
+ p0 = unsafe.Pointer(&pairs[0])
+ } else {
+ p0 = unsafe.Pointer(&_zero)
+ }
+
+ _, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(p0), uintptr(len(pairs)), uintptr(0), uintptr(0), uintptr(flags), 0)
+ return e1 == 0
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go
new file mode 100644
index 0000000000..1517ac61d3
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go
@@ -0,0 +1,40 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+const (
+ // bit mask values from /usr/include/bits/hwcap.h
+ hwcap_ZARCH = 2
+ hwcap_STFLE = 4
+ hwcap_MSA = 8
+ hwcap_LDISP = 16
+ hwcap_EIMM = 32
+ hwcap_DFP = 64
+ hwcap_ETF3EH = 256
+ hwcap_VX = 2048
+ hwcap_VXE = 8192
+)
+
+func initS390Xbase() {
+ // test HWCAP bit vector
+ has := func(featureMask uint) bool {
+ return hwCap&featureMask == featureMask
+ }
+
+ // mandatory
+ S390X.HasZARCH = has(hwcap_ZARCH)
+
+ // optional
+ S390X.HasSTFLE = has(hwcap_STFLE)
+ S390X.HasLDISP = has(hwcap_LDISP)
+ S390X.HasEIMM = has(hwcap_EIMM)
+ S390X.HasETF3EH = has(hwcap_ETF3EH)
+ S390X.HasDFP = has(hwcap_DFP)
+ S390X.HasMSA = has(hwcap_MSA)
+ S390X.HasVX = has(hwcap_VX)
+ if S390X.HasVX {
+ S390X.HasVXE = has(hwcap_VXE)
+ }
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go
new file mode 100644
index 0000000000..45ecb29ae7
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.go
@@ -0,0 +1,50 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build loong64
+
+package cpu
+
+const cacheLineSize = 64
+
+// Bit fields for CPUCFG registers, Related reference documents:
+// https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_cpucfg
+const (
+ // CPUCFG1 bits
+ cpucfg1_CRC32 = 1 << 25
+
+ // CPUCFG2 bits
+ cpucfg2_LAM_BH = 1 << 27
+ cpucfg2_LAMCAS = 1 << 28
+)
+
+func initOptions() {
+ options = []option{
+ {Name: "lsx", Feature: &Loong64.HasLSX},
+ {Name: "lasx", Feature: &Loong64.HasLASX},
+ {Name: "crc32", Feature: &Loong64.HasCRC32},
+ {Name: "lam_bh", Feature: &Loong64.HasLAM_BH},
+ {Name: "lamcas", Feature: &Loong64.HasLAMCAS},
+ }
+
+ // The CPUCFG data on Loong64 only reflects the hardware capabilities,
+ // not the kernel support status, so features such as LSX and LASX that
+ // require kernel support cannot be obtained from the CPUCFG data.
+ //
+ // These features only require hardware capability support and do not
+ // require kernel specific support, so they can be obtained directly
+ // through CPUCFG
+ cfg1 := get_cpucfg(1)
+ cfg2 := get_cpucfg(2)
+
+ Loong64.HasCRC32 = cfgIsSet(cfg1, cpucfg1_CRC32)
+ Loong64.HasLAMCAS = cfgIsSet(cfg2, cpucfg2_LAMCAS)
+ Loong64.HasLAM_BH = cfgIsSet(cfg2, cpucfg2_LAM_BH)
+}
+
+func get_cpucfg(reg uint32) uint32
+
+func cfgIsSet(cfg uint32, val uint32) bool {
+ return cfg&val != 0
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.s b/vendor/golang.org/x/sys/cpu/cpu_loong64.s
new file mode 100644
index 0000000000..71cbaf1ce2
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.s
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func get_cpucfg(reg uint32) uint32
+TEXT ·get_cpucfg(SB), NOSPLIT|NOFRAME, $0
+ MOVW reg+0(FP), R5
+ // CPUCFG R5, R4 = 0x00006ca4
+ WORD $0x00006ca4
+ MOVW R4, ret+8(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go
new file mode 100644
index 0000000000..fedb00cc4c
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go
@@ -0,0 +1,15 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build mips64 || mips64le
+
+package cpu
+
+const cacheLineSize = 32
+
+func initOptions() {
+ options = []option{
+ {Name: "msa", Feature: &MIPS64X.HasMSA},
+ }
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go
new file mode 100644
index 0000000000..ffb4ec7eb3
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go
@@ -0,0 +1,11 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build mips || mipsle
+
+package cpu
+
+const cacheLineSize = 32
+
+func initOptions() {}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go
new file mode 100644
index 0000000000..ebfb3fc8e7
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go
@@ -0,0 +1,173 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// Minimal copy of functionality from x/sys/unix so the cpu package can call
+// sysctl without depending on x/sys/unix.
+
+const (
+ _CTL_QUERY = -2
+
+ _SYSCTL_VERS_1 = 0x1000000
+)
+
+var _zero uintptr
+
+func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
+ var _p0 unsafe.Pointer
+ if len(mib) > 0 {
+ _p0 = unsafe.Pointer(&mib[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, errno := syscall.Syscall6(
+ syscall.SYS___SYSCTL,
+ uintptr(_p0),
+ uintptr(len(mib)),
+ uintptr(unsafe.Pointer(old)),
+ uintptr(unsafe.Pointer(oldlen)),
+ uintptr(unsafe.Pointer(new)),
+ uintptr(newlen))
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
+
+type sysctlNode struct {
+ Flags uint32
+ Num int32
+ Name [32]int8
+ Ver uint32
+ __rsvd uint32
+ Un [16]byte
+ _sysctl_size [8]byte
+ _sysctl_func [8]byte
+ _sysctl_parent [8]byte
+ _sysctl_desc [8]byte
+}
+
+func sysctlNodes(mib []int32) ([]sysctlNode, error) {
+ var olen uintptr
+
+ // Get a list of all sysctl nodes below the given MIB by performing
+ // a sysctl for the given MIB with CTL_QUERY appended.
+ mib = append(mib, _CTL_QUERY)
+ qnode := sysctlNode{Flags: _SYSCTL_VERS_1}
+ qp := (*byte)(unsafe.Pointer(&qnode))
+ sz := unsafe.Sizeof(qnode)
+ if err := sysctl(mib, nil, &olen, qp, sz); err != nil {
+ return nil, err
+ }
+
+ // Now that we know the size, get the actual nodes.
+ nodes := make([]sysctlNode, olen/sz)
+ np := (*byte)(unsafe.Pointer(&nodes[0]))
+ if err := sysctl(mib, np, &olen, qp, sz); err != nil {
+ return nil, err
+ }
+
+ return nodes, nil
+}
+
+func nametomib(name string) ([]int32, error) {
+ // Split name into components.
+ var parts []string
+ last := 0
+ for i := 0; i < len(name); i++ {
+ if name[i] == '.' {
+ parts = append(parts, name[last:i])
+ last = i + 1
+ }
+ }
+ parts = append(parts, name[last:])
+
+ mib := []int32{}
+ // Discover the nodes and construct the MIB OID.
+ for partno, part := range parts {
+ nodes, err := sysctlNodes(mib)
+ if err != nil {
+ return nil, err
+ }
+ for _, node := range nodes {
+ n := make([]byte, 0)
+ for i := range node.Name {
+ if node.Name[i] != 0 {
+ n = append(n, byte(node.Name[i]))
+ }
+ }
+ if string(n) == part {
+ mib = append(mib, int32(node.Num))
+ break
+ }
+ }
+ if len(mib) != partno+1 {
+ return nil, err
+ }
+ }
+
+ return mib, nil
+}
+
+// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's
+type aarch64SysctlCPUID struct {
+ midr uint64 /* Main ID Register */
+ revidr uint64 /* Revision ID Register */
+ mpidr uint64 /* Multiprocessor Affinity Register */
+ aa64dfr0 uint64 /* A64 Debug Feature Register 0 */
+ aa64dfr1 uint64 /* A64 Debug Feature Register 1 */
+ aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */
+ aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */
+ aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */
+ aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */
+ aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */
+ aa64pfr0 uint64 /* A64 Processor Feature Register 0 */
+ aa64pfr1 uint64 /* A64 Processor Feature Register 1 */
+ aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */
+ mvfr0 uint32 /* Media and VFP Feature Register 0 */
+ mvfr1 uint32 /* Media and VFP Feature Register 1 */
+ mvfr2 uint32 /* Media and VFP Feature Register 2 */
+ pad uint32
+ clidr uint64 /* Cache Level ID Register */
+ ctr uint64 /* Cache Type Register */
+}
+
+func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) {
+ mib, err := nametomib(name)
+ if err != nil {
+ return nil, err
+ }
+
+ out := aarch64SysctlCPUID{}
+ n := unsafe.Sizeof(out)
+ _, _, errno := syscall.Syscall6(
+ syscall.SYS___SYSCTL,
+ uintptr(unsafe.Pointer(&mib[0])),
+ uintptr(len(mib)),
+ uintptr(unsafe.Pointer(&out)),
+ uintptr(unsafe.Pointer(&n)),
+ uintptr(0),
+ uintptr(0))
+ if errno != 0 {
+ return nil, errno
+ }
+ return &out, nil
+}
+
+func doinit() {
+ cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id")
+ if err != nil {
+ setMinimalFeatures()
+ return
+ }
+ parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0)
+
+ Initialized = true
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go
new file mode 100644
index 0000000000..85b64d5ccb
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go
@@ -0,0 +1,65 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// Minimal copy of functionality from x/sys/unix so the cpu package can call
+// sysctl without depending on x/sys/unix.
+
+const (
+ // From OpenBSD's sys/sysctl.h.
+ _CTL_MACHDEP = 7
+
+ // From OpenBSD's machine/cpu.h.
+ _CPU_ID_AA64ISAR0 = 2
+ _CPU_ID_AA64ISAR1 = 3
+)
+
+// Implemented in the runtime package (runtime/sys_openbsd3.go)
+func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+//go:linkname syscall_syscall6 syscall.syscall6
+
+func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
+ _, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
+
+var libc_sysctl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_sysctl sysctl "libc.so"
+
+func sysctlUint64(mib []uint32) (uint64, bool) {
+ var out uint64
+ nout := unsafe.Sizeof(out)
+ if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil {
+ return 0, false
+ }
+ return out, true
+}
+
+func doinit() {
+ setMinimalFeatures()
+
+ // Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl.
+ isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0})
+ if !ok {
+ return
+ }
+ isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1})
+ if !ok {
+ return
+ }
+ parseARM64SystemRegisters(isar0, isar1, 0)
+
+ Initialized = true
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s
new file mode 100644
index 0000000000..054ba05d60
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s
@@ -0,0 +1,11 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_sysctl(SB)
+
+GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
+DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go
new file mode 100644
index 0000000000..e9ecf2a456
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go
@@ -0,0 +1,9 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !linux && arm
+
+package cpu
+
+func archInit() {}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go
new file mode 100644
index 0000000000..5341e7f88d
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go
@@ -0,0 +1,9 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !linux && !netbsd && !openbsd && arm64
+
+package cpu
+
+func doinit() {}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go
new file mode 100644
index 0000000000..5f8f2419ab
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go
@@ -0,0 +1,11 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !linux && (mips64 || mips64le)
+
+package cpu
+
+func archInit() {
+ Initialized = true
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go
new file mode 100644
index 0000000000..89608fba27
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go
@@ -0,0 +1,12 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !aix && !linux && (ppc64 || ppc64le)
+
+package cpu
+
+func archInit() {
+ PPC64.IsPOWER8 = true
+ Initialized = true
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go
new file mode 100644
index 0000000000..5ab87808f7
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go
@@ -0,0 +1,11 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !linux && riscv64
+
+package cpu
+
+func archInit() {
+ Initialized = true
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_x86.go b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go
new file mode 100644
index 0000000000..a0fd7e2f75
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go
@@ -0,0 +1,11 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build 386 || amd64p32 || (amd64 && (!darwin || !gc))
+
+package cpu
+
+func darwinSupportsAVX512() bool {
+ panic("only implemented for gc && amd64 && darwin")
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go
new file mode 100644
index 0000000000..c14f12b149
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go
@@ -0,0 +1,16 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64 || ppc64le
+
+package cpu
+
+const cacheLineSize = 128
+
+func initOptions() {
+ options = []option{
+ {Name: "darn", Feature: &PPC64.HasDARN},
+ {Name: "scv", Feature: &PPC64.HasSCV},
+ }
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go
new file mode 100644
index 0000000000..0f617aef54
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go
@@ -0,0 +1,32 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build riscv64
+
+package cpu
+
+const cacheLineSize = 64
+
+func initOptions() {
+ options = []option{
+ {Name: "fastmisaligned", Feature: &RISCV64.HasFastMisaligned},
+ {Name: "c", Feature: &RISCV64.HasC},
+ {Name: "v", Feature: &RISCV64.HasV},
+ {Name: "zba", Feature: &RISCV64.HasZba},
+ {Name: "zbb", Feature: &RISCV64.HasZbb},
+ {Name: "zbs", Feature: &RISCV64.HasZbs},
+ // RISC-V Cryptography Extensions
+ {Name: "zvbb", Feature: &RISCV64.HasZvbb},
+ {Name: "zvbc", Feature: &RISCV64.HasZvbc},
+ {Name: "zvkb", Feature: &RISCV64.HasZvkb},
+ {Name: "zvkg", Feature: &RISCV64.HasZvkg},
+ {Name: "zvkt", Feature: &RISCV64.HasZvkt},
+ {Name: "zvkn", Feature: &RISCV64.HasZvkn},
+ {Name: "zvknc", Feature: &RISCV64.HasZvknc},
+ {Name: "zvkng", Feature: &RISCV64.HasZvkng},
+ {Name: "zvks", Feature: &RISCV64.HasZvks},
+ {Name: "zvksc", Feature: &RISCV64.HasZvksc},
+ {Name: "zvksg", Feature: &RISCV64.HasZvksg},
+ }
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go
new file mode 100644
index 0000000000..5881b8833f
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.go
@@ -0,0 +1,172 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+const cacheLineSize = 256
+
+func initOptions() {
+ options = []option{
+ {Name: "zarch", Feature: &S390X.HasZARCH, Required: true},
+ {Name: "stfle", Feature: &S390X.HasSTFLE, Required: true},
+ {Name: "ldisp", Feature: &S390X.HasLDISP, Required: true},
+ {Name: "eimm", Feature: &S390X.HasEIMM, Required: true},
+ {Name: "dfp", Feature: &S390X.HasDFP},
+ {Name: "etf3eh", Feature: &S390X.HasETF3EH},
+ {Name: "msa", Feature: &S390X.HasMSA},
+ {Name: "aes", Feature: &S390X.HasAES},
+ {Name: "aescbc", Feature: &S390X.HasAESCBC},
+ {Name: "aesctr", Feature: &S390X.HasAESCTR},
+ {Name: "aesgcm", Feature: &S390X.HasAESGCM},
+ {Name: "ghash", Feature: &S390X.HasGHASH},
+ {Name: "sha1", Feature: &S390X.HasSHA1},
+ {Name: "sha256", Feature: &S390X.HasSHA256},
+ {Name: "sha3", Feature: &S390X.HasSHA3},
+ {Name: "sha512", Feature: &S390X.HasSHA512},
+ {Name: "vx", Feature: &S390X.HasVX},
+ {Name: "vxe", Feature: &S390X.HasVXE},
+ }
+}
+
+// bitIsSet reports whether the bit at index is set. The bit index
+// is in big endian order, so bit index 0 is the leftmost bit.
+func bitIsSet(bits []uint64, index uint) bool {
+ return bits[index/64]&((1<<63)>>(index%64)) != 0
+}
+
+// facility is a bit index for the named facility.
+type facility uint8
+
+const (
+ // mandatory facilities
+ zarch facility = 1 // z architecture mode is active
+ stflef facility = 7 // store-facility-list-extended
+ ldisp facility = 18 // long-displacement
+ eimm facility = 21 // extended-immediate
+
+ // miscellaneous facilities
+ dfp facility = 42 // decimal-floating-point
+ etf3eh facility = 30 // extended-translation 3 enhancement
+
+ // cryptography facilities
+ msa facility = 17 // message-security-assist
+ msa3 facility = 76 // message-security-assist extension 3
+ msa4 facility = 77 // message-security-assist extension 4
+ msa5 facility = 57 // message-security-assist extension 5
+ msa8 facility = 146 // message-security-assist extension 8
+ msa9 facility = 155 // message-security-assist extension 9
+
+ // vector facilities
+ vx facility = 129 // vector facility
+ vxe facility = 135 // vector-enhancements 1
+ vxe2 facility = 148 // vector-enhancements 2
+)
+
+// facilityList contains the result of an STFLE call.
+// Bits are numbered in big endian order so the
+// leftmost bit (the MSB) is at index 0.
+type facilityList struct {
+ bits [4]uint64
+}
+
+// Has reports whether the given facilities are present.
+func (s *facilityList) Has(fs ...facility) bool {
+ if len(fs) == 0 {
+ panic("no facility bits provided")
+ }
+ for _, f := range fs {
+ if !bitIsSet(s.bits[:], uint(f)) {
+ return false
+ }
+ }
+ return true
+}
+
+// function is the code for the named cryptographic function.
+type function uint8
+
+const (
+ // KM{,A,C,CTR} function codes
+ aes128 function = 18 // AES-128
+ aes192 function = 19 // AES-192
+ aes256 function = 20 // AES-256
+
+ // K{I,L}MD function codes
+ sha1 function = 1 // SHA-1
+ sha256 function = 2 // SHA-256
+ sha512 function = 3 // SHA-512
+ sha3_224 function = 32 // SHA3-224
+ sha3_256 function = 33 // SHA3-256
+ sha3_384 function = 34 // SHA3-384
+ sha3_512 function = 35 // SHA3-512
+ shake128 function = 36 // SHAKE-128
+ shake256 function = 37 // SHAKE-256
+
+ // KLMD function codes
+ ghash function = 65 // GHASH
+)
+
+// queryResult contains the result of a Query function
+// call. Bits are numbered in big endian order so the
+// leftmost bit (the MSB) is at index 0.
+type queryResult struct {
+ bits [2]uint64
+}
+
+// Has reports whether the given functions are present.
+func (q *queryResult) Has(fns ...function) bool {
+ if len(fns) == 0 {
+ panic("no function codes provided")
+ }
+ for _, f := range fns {
+ if !bitIsSet(q.bits[:], uint(f)) {
+ return false
+ }
+ }
+ return true
+}
+
+func doinit() {
+ initS390Xbase()
+
+ // We need implementations of stfle, km and so on
+ // to detect cryptographic features.
+ if !haveAsmFunctions() {
+ return
+ }
+
+ // optional cryptographic functions
+ if S390X.HasMSA {
+ aes := []function{aes128, aes192, aes256}
+
+ // cipher message
+ km, kmc := kmQuery(), kmcQuery()
+ S390X.HasAES = km.Has(aes...)
+ S390X.HasAESCBC = kmc.Has(aes...)
+ if S390X.HasSTFLE {
+ facilities := stfle()
+ if facilities.Has(msa4) {
+ kmctr := kmctrQuery()
+ S390X.HasAESCTR = kmctr.Has(aes...)
+ }
+ if facilities.Has(msa8) {
+ kma := kmaQuery()
+ S390X.HasAESGCM = kma.Has(aes...)
+ }
+ }
+
+ // compute message digest
+ kimd := kimdQuery() // intermediate (no padding)
+ klmd := klmdQuery() // last (padding)
+ S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1)
+ S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256)
+ S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512)
+ S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist
+ sha3 := []function{
+ sha3_224, sha3_256, sha3_384, sha3_512,
+ shake128, shake256,
+ }
+ S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...)
+ }
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s
new file mode 100644
index 0000000000..1fb4b70133
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s
@@ -0,0 +1,57 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+
+#include "textflag.h"
+
+// func stfle() facilityList
+TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32
+ MOVD $ret+0(FP), R1
+ MOVD $3, R0 // last doubleword index to store
+ XC $32, (R1), (R1) // clear 4 doublewords (32 bytes)
+ WORD $0xb2b01000 // store facility list extended (STFLE)
+ RET
+
+// func kmQuery() queryResult
+TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16
+ MOVD $0, R0 // set function code to 0 (KM-Query)
+ MOVD $ret+0(FP), R1 // address of 16-byte return value
+ WORD $0xB92E0024 // cipher message (KM)
+ RET
+
+// func kmcQuery() queryResult
+TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16
+ MOVD $0, R0 // set function code to 0 (KMC-Query)
+ MOVD $ret+0(FP), R1 // address of 16-byte return value
+ WORD $0xB92F0024 // cipher message with chaining (KMC)
+ RET
+
+// func kmctrQuery() queryResult
+TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16
+ MOVD $0, R0 // set function code to 0 (KMCTR-Query)
+ MOVD $ret+0(FP), R1 // address of 16-byte return value
+ WORD $0xB92D4024 // cipher message with counter (KMCTR)
+ RET
+
+// func kmaQuery() queryResult
+TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16
+ MOVD $0, R0 // set function code to 0 (KMA-Query)
+ MOVD $ret+0(FP), R1 // address of 16-byte return value
+ WORD $0xb9296024 // cipher message with authentication (KMA)
+ RET
+
+// func kimdQuery() queryResult
+TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16
+ MOVD $0, R0 // set function code to 0 (KIMD-Query)
+ MOVD $ret+0(FP), R1 // address of 16-byte return value
+ WORD $0xB93E0024 // compute intermediate message digest (KIMD)
+ RET
+
+// func klmdQuery() queryResult
+TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16
+ MOVD $0, R0 // set function code to 0 (KLMD-Query)
+ MOVD $ret+0(FP), R1 // address of 16-byte return value
+ WORD $0xB93F0024 // compute last message digest (KLMD)
+ RET
diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go
new file mode 100644
index 0000000000..384787ea30
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go
@@ -0,0 +1,17 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build wasm
+
+package cpu
+
+// We're compiling the cpu package for an unknown (software-abstracted) CPU.
+// Make CacheLinePad an empty struct and hope that the usual struct alignment
+// rules are good enough.
+
+const cacheLineSize = 0
+
+func initOptions() {}
+
+func archInit() {}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go
new file mode 100644
index 0000000000..1e642f3304
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go
@@ -0,0 +1,162 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build 386 || amd64 || amd64p32
+
+package cpu
+
+import "runtime"
+
+const cacheLineSize = 64
+
+func initOptions() {
+ options = []option{
+ {Name: "adx", Feature: &X86.HasADX},
+ {Name: "aes", Feature: &X86.HasAES},
+ {Name: "avx", Feature: &X86.HasAVX},
+ {Name: "avx2", Feature: &X86.HasAVX2},
+ {Name: "avx512", Feature: &X86.HasAVX512},
+ {Name: "avx512f", Feature: &X86.HasAVX512F},
+ {Name: "avx512cd", Feature: &X86.HasAVX512CD},
+ {Name: "avx512er", Feature: &X86.HasAVX512ER},
+ {Name: "avx512pf", Feature: &X86.HasAVX512PF},
+ {Name: "avx512vl", Feature: &X86.HasAVX512VL},
+ {Name: "avx512bw", Feature: &X86.HasAVX512BW},
+ {Name: "avx512dq", Feature: &X86.HasAVX512DQ},
+ {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA},
+ {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI},
+ {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW},
+ {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS},
+ {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ},
+ {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ},
+ {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI},
+ {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI},
+ {Name: "avx512vaes", Feature: &X86.HasAVX512VAES},
+ {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2},
+ {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG},
+ {Name: "avx512bf16", Feature: &X86.HasAVX512BF16},
+ {Name: "amxtile", Feature: &X86.HasAMXTile},
+ {Name: "amxint8", Feature: &X86.HasAMXInt8},
+ {Name: "amxbf16", Feature: &X86.HasAMXBF16},
+ {Name: "bmi1", Feature: &X86.HasBMI1},
+ {Name: "bmi2", Feature: &X86.HasBMI2},
+ {Name: "cx16", Feature: &X86.HasCX16},
+ {Name: "erms", Feature: &X86.HasERMS},
+ {Name: "fma", Feature: &X86.HasFMA},
+ {Name: "osxsave", Feature: &X86.HasOSXSAVE},
+ {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ},
+ {Name: "popcnt", Feature: &X86.HasPOPCNT},
+ {Name: "rdrand", Feature: &X86.HasRDRAND},
+ {Name: "rdseed", Feature: &X86.HasRDSEED},
+ {Name: "sse3", Feature: &X86.HasSSE3},
+ {Name: "sse41", Feature: &X86.HasSSE41},
+ {Name: "sse42", Feature: &X86.HasSSE42},
+ {Name: "ssse3", Feature: &X86.HasSSSE3},
+ {Name: "avxifma", Feature: &X86.HasAVXIFMA},
+ {Name: "avxvnni", Feature: &X86.HasAVXVNNI},
+ {Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8},
+
+ // These capabilities should always be enabled on amd64:
+ {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"},
+ }
+}
+
+func archInit() {
+
+ Initialized = true
+
+ maxID, _, _, _ := cpuid(0, 0)
+
+ if maxID < 1 {
+ return
+ }
+
+ _, _, ecx1, edx1 := cpuid(1, 0)
+ X86.HasSSE2 = isSet(26, edx1)
+
+ X86.HasSSE3 = isSet(0, ecx1)
+ X86.HasPCLMULQDQ = isSet(1, ecx1)
+ X86.HasSSSE3 = isSet(9, ecx1)
+ X86.HasFMA = isSet(12, ecx1)
+ X86.HasCX16 = isSet(13, ecx1)
+ X86.HasSSE41 = isSet(19, ecx1)
+ X86.HasSSE42 = isSet(20, ecx1)
+ X86.HasPOPCNT = isSet(23, ecx1)
+ X86.HasAES = isSet(25, ecx1)
+ X86.HasOSXSAVE = isSet(27, ecx1)
+ X86.HasRDRAND = isSet(30, ecx1)
+
+ var osSupportsAVX, osSupportsAVX512 bool
+ // For XGETBV, OSXSAVE bit is required and sufficient.
+ if X86.HasOSXSAVE {
+ eax, _ := xgetbv()
+ // Check if XMM and YMM registers have OS support.
+ osSupportsAVX = isSet(1, eax) && isSet(2, eax)
+
+ if runtime.GOOS == "darwin" {
+ // Darwin requires special AVX512 checks, see cpu_darwin_x86.go
+ osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512()
+ } else {
+ // Check if OPMASK and ZMM registers have OS support.
+ osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax)
+ }
+ }
+
+ X86.HasAVX = isSet(28, ecx1) && osSupportsAVX
+
+ if maxID < 7 {
+ return
+ }
+
+ eax7, ebx7, ecx7, edx7 := cpuid(7, 0)
+ X86.HasBMI1 = isSet(3, ebx7)
+ X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX
+ X86.HasBMI2 = isSet(8, ebx7)
+ X86.HasERMS = isSet(9, ebx7)
+ X86.HasRDSEED = isSet(18, ebx7)
+ X86.HasADX = isSet(19, ebx7)
+
+ X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension
+ if X86.HasAVX512 {
+ X86.HasAVX512F = true
+ X86.HasAVX512CD = isSet(28, ebx7)
+ X86.HasAVX512ER = isSet(27, ebx7)
+ X86.HasAVX512PF = isSet(26, ebx7)
+ X86.HasAVX512VL = isSet(31, ebx7)
+ X86.HasAVX512BW = isSet(30, ebx7)
+ X86.HasAVX512DQ = isSet(17, ebx7)
+ X86.HasAVX512IFMA = isSet(21, ebx7)
+ X86.HasAVX512VBMI = isSet(1, ecx7)
+ X86.HasAVX5124VNNIW = isSet(2, edx7)
+ X86.HasAVX5124FMAPS = isSet(3, edx7)
+ X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7)
+ X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7)
+ X86.HasAVX512VNNI = isSet(11, ecx7)
+ X86.HasAVX512GFNI = isSet(8, ecx7)
+ X86.HasAVX512VAES = isSet(9, ecx7)
+ X86.HasAVX512VBMI2 = isSet(6, ecx7)
+ X86.HasAVX512BITALG = isSet(12, ecx7)
+ }
+
+ X86.HasAMXTile = isSet(24, edx7)
+ X86.HasAMXInt8 = isSet(25, edx7)
+ X86.HasAMXBF16 = isSet(22, edx7)
+
+ // These features depend on the second level of extended features.
+ if eax7 >= 1 {
+ eax71, _, _, edx71 := cpuid(7, 1)
+ if X86.HasAVX512 {
+ X86.HasAVX512BF16 = isSet(5, eax71)
+ }
+ if X86.HasAVX {
+ X86.HasAVXIFMA = isSet(23, eax71)
+ X86.HasAVXVNNI = isSet(4, eax71)
+ X86.HasAVXVNNIInt8 = isSet(4, edx71)
+ }
+ }
+}
+
+func isSet(bitpos uint, value uint32) bool {
+ return value&(1<> 63))
+)
+
+// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2
+// These are initialized in cpu_$GOARCH.go
+// and should not be changed after they are initialized.
+var hwCap uint
+var hwCap2 uint
+
+func readHWCAP() error {
+ // For Go 1.21+, get auxv from the Go runtime.
+ if a := getAuxv(); len(a) > 0 {
+ for len(a) >= 2 {
+ tag, val := a[0], uint(a[1])
+ a = a[2:]
+ switch tag {
+ case _AT_HWCAP:
+ hwCap = val
+ case _AT_HWCAP2:
+ hwCap2 = val
+ }
+ }
+ return nil
+ }
+
+ buf, err := os.ReadFile(procAuxv)
+ if err != nil {
+ // e.g. on android /proc/self/auxv is not accessible, so silently
+ // ignore the error and leave Initialized = false. On some
+ // architectures (e.g. arm64) doinit() implements a fallback
+ // readout and will set Initialized = true again.
+ return err
+ }
+ bo := hostByteOrder()
+ for len(buf) >= 2*(uintSize/8) {
+ var tag, val uint
+ switch uintSize {
+ case 32:
+ tag = uint(bo.Uint32(buf[0:]))
+ val = uint(bo.Uint32(buf[4:]))
+ buf = buf[8:]
+ case 64:
+ tag = uint(bo.Uint64(buf[0:]))
+ val = uint(bo.Uint64(buf[8:]))
+ buf = buf[16:]
+ }
+ switch tag {
+ case _AT_HWCAP:
+ hwCap = val
+ case _AT_HWCAP2:
+ hwCap2 = val
+ }
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/sys/cpu/parse.go b/vendor/golang.org/x/sys/cpu/parse.go
new file mode 100644
index 0000000000..56a7e1a176
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/parse.go
@@ -0,0 +1,43 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+import "strconv"
+
+// parseRelease parses a dot-separated version number. It follows the semver
+// syntax, but allows the minor and patch versions to be elided.
+//
+// This is a copy of the Go runtime's parseRelease from
+// https://golang.org/cl/209597.
+func parseRelease(rel string) (major, minor, patch int, ok bool) {
+ // Strip anything after a dash or plus.
+ for i := range len(rel) {
+ if rel[i] == '-' || rel[i] == '+' {
+ rel = rel[:i]
+ break
+ }
+ }
+
+ next := func() (int, bool) {
+ for i := range len(rel) {
+ if rel[i] == '.' {
+ ver, err := strconv.Atoi(rel[:i])
+ rel = rel[i+1:]
+ return ver, err == nil
+ }
+ }
+ ver, err := strconv.Atoi(rel)
+ rel = ""
+ return ver, err == nil
+ }
+ if major, ok = next(); !ok || rel == "" {
+ return
+ }
+ if minor, ok = next(); !ok || rel == "" {
+ return
+ }
+ patch, ok = next()
+ return
+}
diff --git a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go
new file mode 100644
index 0000000000..4cd64c7042
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go
@@ -0,0 +1,53 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && arm64
+
+package cpu
+
+import (
+ "errors"
+ "io"
+ "os"
+ "strings"
+)
+
+func readLinuxProcCPUInfo() error {
+ f, err := os.Open("/proc/cpuinfo")
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ var buf [1 << 10]byte // enough for first CPU
+ n, err := io.ReadFull(f, buf[:])
+ if err != nil && err != io.ErrUnexpectedEOF {
+ return err
+ }
+ in := string(buf[:n])
+ const features = "\nFeatures : "
+ i := strings.Index(in, features)
+ if i == -1 {
+ return errors.New("no CPU features found")
+ }
+ in = in[i+len(features):]
+ if i := strings.Index(in, "\n"); i != -1 {
+ in = in[:i]
+ }
+ m := map[string]*bool{}
+
+ initOptions() // need it early here; it's harmless to call twice
+ for _, o := range options {
+ m[o.Name] = o.Feature
+ }
+ // The EVTSTRM field has alias "evstrm" in Go, but Linux calls it "evtstrm".
+ m["evtstrm"] = &ARM64.HasEVTSTRM
+
+ for _, f := range strings.Fields(in) {
+ if p, ok := m[f]; ok {
+ *p = true
+ }
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv.go b/vendor/golang.org/x/sys/cpu/runtime_auxv.go
new file mode 100644
index 0000000000..5f92ac9a2e
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/runtime_auxv.go
@@ -0,0 +1,16 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+// getAuxvFn is non-nil on Go 1.21+ (via runtime_auxv_go121.go init)
+// on platforms that use auxv.
+var getAuxvFn func() []uintptr
+
+func getAuxv() []uintptr {
+ if getAuxvFn == nil {
+ return nil
+ }
+ return getAuxvFn()
+}
diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go
new file mode 100644
index 0000000000..4c9788ea8e
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go
@@ -0,0 +1,18 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package cpu
+
+import (
+ _ "unsafe" // for linkname
+)
+
+//go:linkname runtime_getAuxv runtime.getAuxv
+func runtime_getAuxv() []uintptr
+
+func init() {
+ getAuxvFn = runtime_getAuxv
+}
diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go
new file mode 100644
index 0000000000..1b9ccb091a
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go
@@ -0,0 +1,26 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Recreate a getsystemcfg syscall handler instead of
+// using the one provided by x/sys/unix to avoid having
+// the dependency between them. (See golang.org/issue/32102)
+// Moreover, this file will be used during the building of
+// gccgo's libgo and thus must not used a CGo method.
+
+//go:build aix && gccgo
+
+package cpu
+
+import (
+ "syscall"
+)
+
+//extern getsystemcfg
+func gccgoGetsystemcfg(label uint32) (r uint64)
+
+func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) {
+ r1 = uintptr(gccgoGetsystemcfg(uint32(label)))
+ e1 = syscall.GetErrno()
+ return
+}
diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go
new file mode 100644
index 0000000000..e8b6cdbe9a
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go
@@ -0,0 +1,35 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Minimal copy of x/sys/unix so the cpu package can make a
+// system call on AIX without depending on x/sys/unix.
+// (See golang.org/issue/32102)
+
+//go:build aix && ppc64 && gc
+
+package cpu
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o"
+
+//go:linkname libc_getsystemcfg libc_getsystemcfg
+
+type syscallFunc uintptr
+
+var libc_getsystemcfg syscallFunc
+
+type errno = syscall.Errno
+
+// Implemented in runtime/syscall_aix.go.
+func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno)
+func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno)
+
+func callgetsystemcfg(label int) (r1 uintptr, e1 errno) {
+ r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0)
+ return
+}
diff --git a/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go
new file mode 100644
index 0000000000..4d0888b0c0
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go
@@ -0,0 +1,98 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Minimal copy of x/sys/unix so the cpu package can make a
+// system call on Darwin without depending on x/sys/unix.
+
+//go:build darwin && amd64 && gc
+
+package cpu
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+type _C_int int32
+
+// adapted from unix.Uname() at x/sys/unix/syscall_darwin.go L419
+func darwinOSRelease(release *[256]byte) error {
+ // from x/sys/unix/zerrors_openbsd_amd64.go
+ const (
+ CTL_KERN = 0x1
+ KERN_OSRELEASE = 0x2
+ )
+
+ mib := []_C_int{CTL_KERN, KERN_OSRELEASE}
+ n := unsafe.Sizeof(*release)
+
+ return sysctl(mib, &release[0], &n, nil, 0)
+}
+
+type Errno = syscall.Errno
+
+var _zero uintptr // Single-word zero for use when we need a valid pointer to 0 bytes.
+
+// from x/sys/unix/zsyscall_darwin_amd64.go L791-807
+func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error {
+ var _p0 unsafe.Pointer
+ if len(mib) > 0 {
+ _p0 = unsafe.Pointer(&mib[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ if _, _, err := syscall_syscall6(
+ libc_sysctl_trampoline_addr,
+ uintptr(_p0),
+ uintptr(len(mib)),
+ uintptr(unsafe.Pointer(old)),
+ uintptr(unsafe.Pointer(oldlen)),
+ uintptr(unsafe.Pointer(new)),
+ uintptr(newlen),
+ ); err != 0 {
+ return err
+ }
+
+ return nil
+}
+
+var libc_sysctl_trampoline_addr uintptr
+
+// adapted from internal/cpu/cpu_arm64_darwin.go
+func darwinSysctlEnabled(name []byte) bool {
+ out := int32(0)
+ nout := unsafe.Sizeof(out)
+ if ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); ret != nil {
+ return false
+ }
+ return out > 0
+}
+
+//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib"
+
+var libc_sysctlbyname_trampoline_addr uintptr
+
+// adapted from runtime/sys_darwin.go in the pattern of sysctl() above, as defined in x/sys/unix
+func sysctlbyname(name *byte, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error {
+ if _, _, err := syscall_syscall6(
+ libc_sysctlbyname_trampoline_addr,
+ uintptr(unsafe.Pointer(name)),
+ uintptr(unsafe.Pointer(old)),
+ uintptr(unsafe.Pointer(oldlen)),
+ uintptr(unsafe.Pointer(new)),
+ uintptr(newlen),
+ 0,
+ ); err != 0 {
+ return err
+ }
+
+ return nil
+}
+
+//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib"
+
+// Implemented in the runtime package (runtime/sys_darwin.go)
+func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
+
+//go:linkname syscall_syscall6 syscall.syscall6
diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go
index 6e5c81acd0..3c7a6d6e2f 100644
--- a/vendor/golang.org/x/sys/unix/affinity_linux.go
+++ b/vendor/golang.org/x/sys/unix/affinity_linux.go
@@ -38,9 +38,7 @@ func SchedSetaffinity(pid int, set *CPUSet) error {
// Zero clears the set s, so that it contains no CPUs.
func (s *CPUSet) Zero() {
- for i := range s {
- s[i] = 0
- }
+ clear(s[:])
}
func cpuBitsIndex(cpu int) int {
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
index abc3955477..18a3d9bdab 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -629,7 +629,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys Kill(pid int, signum syscall.Signal) (err error)
//sys Lchown(path string, uid int, gid int) (err error)
//sys Link(path string, link string) (err error)
-//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_llisten
+//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_listen
//sys Lstat(path string, stat *Stat_t) (err error)
//sys Madvise(b []byte, advice int) (err error)
//sys Mkdir(path string, mode uint32) (err error)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
index c6545413c4..b4609c20c2 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
@@ -72,7 +72,7 @@ import (
//go:cgo_import_dynamic libc_kill kill "libc.so"
//go:cgo_import_dynamic libc_lchown lchown "libc.so"
//go:cgo_import_dynamic libc_link link "libc.so"
-//go:cgo_import_dynamic libc___xnet_llisten __xnet_llisten "libsocket.so"
+//go:cgo_import_dynamic libc___xnet_listen __xnet_listen "libsocket.so"
//go:cgo_import_dynamic libc_lstat lstat "libc.so"
//go:cgo_import_dynamic libc_madvise madvise "libc.so"
//go:cgo_import_dynamic libc_mkdir mkdir "libc.so"
@@ -221,7 +221,7 @@ import (
//go:linkname procKill libc_kill
//go:linkname procLchown libc_lchown
//go:linkname procLink libc_link
-//go:linkname proc__xnet_llisten libc___xnet_llisten
+//go:linkname proc__xnet_listen libc___xnet_listen
//go:linkname procLstat libc_lstat
//go:linkname procMadvise libc_madvise
//go:linkname procMkdir libc_mkdir
@@ -371,7 +371,7 @@ var (
procKill,
procLchown,
procLink,
- proc__xnet_llisten,
+ proc__xnet_listen,
procLstat,
procMadvise,
procMkdir,
@@ -1178,7 +1178,7 @@ func Link(path string, link string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Listen(s int, backlog int) (err error) {
- _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0)
+ _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_listen)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index cd236443f6..944e75a11c 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -632,6 +632,8 @@ const (
IFA_FLAGS = 0x8
IFA_RT_PRIORITY = 0x9
IFA_TARGET_NETNSID = 0xa
+ IFAL_LABEL = 0x2
+ IFAL_ADDRESS = 0x1
RT_SCOPE_UNIVERSE = 0x0
RT_SCOPE_SITE = 0xc8
RT_SCOPE_LINK = 0xfd
@@ -689,6 +691,7 @@ const (
SizeofRtAttr = 0x4
SizeofIfInfomsg = 0x10
SizeofIfAddrmsg = 0x8
+ SizeofIfAddrlblmsg = 0xc
SizeofIfaCacheinfo = 0x10
SizeofRtMsg = 0xc
SizeofRtNexthop = 0x8
@@ -740,6 +743,15 @@ type IfAddrmsg struct {
Index uint32
}
+type IfAddrlblmsg struct {
+ Family uint8
+ _ uint8
+ Prefixlen uint8
+ Flags uint8
+ Index uint32
+ Seq uint32
+}
+
type IfaCacheinfo struct {
Prefered uint32
Valid uint32
@@ -3052,6 +3064,23 @@ const (
)
const (
+ TCA_UNSPEC = 0x0
+ TCA_KIND = 0x1
+ TCA_OPTIONS = 0x2
+ TCA_STATS = 0x3
+ TCA_XSTATS = 0x4
+ TCA_RATE = 0x5
+ TCA_FCNT = 0x6
+ TCA_STATS2 = 0x7
+ TCA_STAB = 0x8
+ TCA_PAD = 0x9
+ TCA_DUMP_INVISIBLE = 0xa
+ TCA_CHAIN = 0xb
+ TCA_HW_OFFLOAD = 0xc
+ TCA_INGRESS_BLOCK = 0xd
+ TCA_EGRESS_BLOCK = 0xe
+ TCA_DUMP_FLAGS = 0xf
+ TCA_EXT_WARN_MSG = 0x10
RTNLGRP_NONE = 0x0
RTNLGRP_LINK = 0x1
RTNLGRP_NOTIFY = 0x2
@@ -3086,6 +3115,18 @@ const (
RTNLGRP_IPV6_MROUTE_R = 0x1f
RTNLGRP_NEXTHOP = 0x20
RTNLGRP_BRVLAN = 0x21
+ RTNLGRP_MCTP_IFADDR = 0x22
+ RTNLGRP_TUNNEL = 0x23
+ RTNLGRP_STATS = 0x24
+ RTNLGRP_IPV4_MCADDR = 0x25
+ RTNLGRP_IPV6_MCADDR = 0x26
+ RTNLGRP_IPV6_ACADDR = 0x27
+ TCA_ROOT_UNSPEC = 0x0
+ TCA_ROOT_TAB = 0x1
+ TCA_ROOT_FLAGS = 0x2
+ TCA_ROOT_COUNT = 0x3
+ TCA_ROOT_TIME_DELTA = 0x4
+ TCA_ROOT_EXT_WARN_MSG = 0x5
)
type CapUserHeader struct {
diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
index fc1835d8a2..bc1ce4360b 100644
--- a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
@@ -52,7 +52,7 @@ var (
)
func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) {
- r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result)))
+ r0, _, _ := syscall.SyscallN(procRegConnectRegistryW.Addr(), uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -60,7 +60,7 @@ func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall
}
func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) {
- r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)))
+ r0, _, _ := syscall.SyscallN(procRegCreateKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -68,7 +68,7 @@ func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *
}
func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) {
- r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0)
+ r0, _, _ := syscall.SyscallN(procRegDeleteKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -76,7 +76,7 @@ func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) {
}
func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) {
- r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0)
+ r0, _, _ := syscall.SyscallN(procRegDeleteValueW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -84,7 +84,7 @@ func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) {
}
func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) {
- r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0)
+ r0, _, _ := syscall.SyscallN(procRegEnumValueW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -92,7 +92,7 @@ func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint3
}
func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) {
- r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRegLoadMUIStringW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -100,7 +100,7 @@ func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint
}
func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) {
- r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize))
+ r0, _, _ := syscall.SyscallN(procRegSetValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -108,7 +108,7 @@ func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype
}
func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
+ r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index 958bcf47a3..993a2297db 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -1976,6 +1976,12 @@ const (
SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1
)
+// FILE_ZERO_DATA_INFORMATION from winioctl.h
+type FileZeroDataInformation struct {
+ FileOffset int64
+ BeyondFinalZero int64
+}
+
const (
ComputerNameNetBIOS = 0
ComputerNameDnsHostname = 1
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index a58bc48b8e..641a5f4b77 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -546,25 +546,25 @@ var (
)
func cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) {
- r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_Status.Addr(), 4, uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCM_Get_DevNode_Status.Addr(), uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags))
ret = CONFIGRET(r0)
return
}
func cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) {
- r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_ListW.Addr(), 5, uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags), 0)
+ r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_ListW.Addr(), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags))
ret = CONFIGRET(r0)
return
}
func cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) {
- r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_List_SizeW.Addr(), 4, uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_List_SizeW.Addr(), uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags))
ret = CONFIGRET(r0)
return
}
func cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) {
- r0, _, _ := syscall.Syscall(procCM_MapCrToWin32Err.Addr(), 2, uintptr(configRet), uintptr(defaultWin32Error), 0)
+ r0, _, _ := syscall.SyscallN(procCM_MapCrToWin32Err.Addr(), uintptr(configRet), uintptr(defaultWin32Error))
ret = Errno(r0)
return
}
@@ -574,7 +574,7 @@ func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups,
if resetToDefault {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
+ r1, _, e1 := syscall.SyscallN(procAdjustTokenGroups.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -586,7 +586,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok
if disableAllPrivileges {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
+ r1, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -594,7 +594,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok
}
func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) {
- r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0)
+ r1, _, e1 := syscall.SyscallN(procAllocateAndInitializeSid.Addr(), uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -602,7 +602,7 @@ func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, s
}
func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) {
- r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor)))
+ r0, _, _ := syscall.SyscallN(procBuildSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -610,7 +610,7 @@ func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries
}
func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) {
- r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info)))
+ r1, _, e1 := syscall.SyscallN(procChangeServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -618,7 +618,7 @@ func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err err
}
func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) {
- r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0)
+ r1, _, e1 := syscall.SyscallN(procChangeServiceConfigW.Addr(), uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -626,7 +626,7 @@ func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, e
}
func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) {
- r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember)))
+ r1, _, e1 := syscall.SyscallN(procCheckTokenMembership.Addr(), uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -634,7 +634,7 @@ func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (
}
func CloseServiceHandle(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCloseServiceHandle.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -642,7 +642,7 @@ func CloseServiceHandle(handle Handle) (err error) {
}
func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) {
- r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status)))
+ r1, _, e1 := syscall.SyscallN(procControlService.Addr(), uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -650,7 +650,7 @@ func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err
}
func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0)
+ r1, _, e1 := syscall.SyscallN(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -658,7 +658,7 @@ func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR
}
func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0)
+ r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -675,7 +675,7 @@ func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision ui
}
func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -683,7 +683,7 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision
}
func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) {
- r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0)
+ r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -691,7 +691,7 @@ func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) {
}
func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) {
- r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid)))
+ r1, _, e1 := syscall.SyscallN(procCopySid.Addr(), uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -703,7 +703,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc
if inheritHandles {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall12(procCreateProcessAsUserW.Addr(), 11, uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0)
+ r1, _, e1 := syscall.SyscallN(procCreateProcessAsUserW.Addr(), uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -711,7 +711,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc
}
func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -720,7 +720,7 @@ func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access
}
func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCreateWellKnownSid.Addr(), uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -728,7 +728,7 @@ func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, s
}
func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procCryptAcquireContextW.Addr(), uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -736,7 +736,7 @@ func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16
}
func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) {
- r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
+ r1, _, e1 := syscall.SyscallN(procCryptGenRandom.Addr(), uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -744,7 +744,7 @@ func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) {
}
func CryptReleaseContext(provhandle Handle, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procCryptReleaseContext.Addr(), uintptr(provhandle), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -752,7 +752,7 @@ func CryptReleaseContext(provhandle Handle, flags uint32) (err error) {
}
func DeleteService(service Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDeleteService.Addr(), uintptr(service))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -760,7 +760,7 @@ func DeleteService(service Handle) (err error) {
}
func DeregisterEventSource(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDeregisterEventSource.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -768,7 +768,7 @@ func DeregisterEventSource(handle Handle) (err error) {
}
func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) {
- r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken)))
+ r1, _, e1 := syscall.SyscallN(procDuplicateTokenEx.Addr(), uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -776,7 +776,7 @@ func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes
}
func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)))
+ r1, _, e1 := syscall.SyscallN(procEnumDependentServicesW.Addr(), uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -784,7 +784,7 @@ func EnumDependentServices(service Handle, activityState uint32, services *ENUM_
}
func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) {
- r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procEnumServicesStatusExW.Addr(), uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -792,13 +792,13 @@ func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serv
}
func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) {
- r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0)
+ r0, _, _ := syscall.SyscallN(procEqualSid.Addr(), uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)))
isEqual = r0 != 0
return
}
func FreeSid(sid *SID) (err error) {
- r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFreeSid.Addr(), uintptr(unsafe.Pointer(sid)))
if r1 != 0 {
err = errnoErr(e1)
}
@@ -806,7 +806,7 @@ func FreeSid(sid *SID) (err error) {
}
func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) {
- r1, _, e1 := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce)))
+ r1, _, e1 := syscall.SyscallN(procGetAce.Addr(), uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -814,7 +814,7 @@ func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) {
}
func GetLengthSid(sid *SID) (len uint32) {
- r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetLengthSid.Addr(), uintptr(unsafe.Pointer(sid)))
len = uint32(r0)
return
}
@@ -829,7 +829,7 @@ func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security
}
func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) {
- r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0)
+ r0, _, _ := syscall.SyscallN(procGetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -837,7 +837,7 @@ func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi
}
func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision)))
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -853,7 +853,7 @@ func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl
if *daclDefaulted {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)))
*daclPresent = _p0 != 0
*daclDefaulted = _p1 != 0
if r1 == 0 {
@@ -867,7 +867,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau
if *groupDefaulted {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0)))
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0)))
*groupDefaulted = _p0 != 0
if r1 == 0 {
err = errnoErr(e1)
@@ -876,7 +876,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau
}
func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) {
- r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorLength.Addr(), uintptr(unsafe.Pointer(sd)))
len = uint32(r0)
return
}
@@ -886,7 +886,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau
if *ownerDefaulted {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0)))
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0)))
*ownerDefaulted = _p0 != 0
if r1 == 0 {
err = errnoErr(e1)
@@ -895,7 +895,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau
}
func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) {
- r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0)
+ r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -911,7 +911,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl
if *saclDefaulted {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)))
*saclPresent = _p0 != 0
*saclDefaulted = _p1 != 0
if r1 == 0 {
@@ -921,7 +921,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl
}
func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) {
- r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0)
+ r0, _, _ := syscall.SyscallN(procGetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -929,25 +929,25 @@ func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati
}
func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) {
- r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetSidIdentifierAuthority.Addr(), uintptr(unsafe.Pointer(sid)))
authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0))
return
}
func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) {
- r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0)
+ r0, _, _ := syscall.SyscallN(procGetSidSubAuthority.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(index))
subAuthority = (*uint32)(unsafe.Pointer(r0))
return
}
func getSidSubAuthorityCount(sid *SID) (count *uint8) {
- r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetSidSubAuthorityCount.Addr(), uintptr(unsafe.Pointer(sid)))
count = (*uint8)(unsafe.Pointer(r0))
return
}
func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -955,7 +955,7 @@ func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint
}
func ImpersonateSelf(impersonationlevel uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(impersonationlevel))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -963,7 +963,7 @@ func ImpersonateSelf(impersonationlevel uint32) (err error) {
}
func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0)
+ r1, _, e1 := syscall.SyscallN(procInitializeSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -979,7 +979,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint
if rebootAfterShutdown {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason))
+ r1, _, e1 := syscall.SyscallN(procInitiateSystemShutdownExW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -987,7 +987,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint
}
func isTokenRestricted(tokenHandle Token) (ret bool, err error) {
- r0, _, e1 := syscall.Syscall(procIsTokenRestricted.Addr(), 1, uintptr(tokenHandle), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procIsTokenRestricted.Addr(), uintptr(tokenHandle))
ret = r0 != 0
if !ret {
err = errnoErr(e1)
@@ -996,25 +996,25 @@ func isTokenRestricted(tokenHandle Token) (ret bool, err error) {
}
func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) {
- r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsValidSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(sd)))
isValid = r0 != 0
return
}
func isValidSid(sid *SID) (isValid bool) {
- r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsValidSid.Addr(), uintptr(unsafe.Pointer(sid)))
isValid = r0 != 0
return
}
func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) {
- r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0)
+ r0, _, _ := syscall.SyscallN(procIsWellKnownSid.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(sidType))
isWellKnown = r0 != 0
return
}
func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1022,7 +1022,7 @@ func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen
}
func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1030,7 +1030,7 @@ func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint3
}
func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) {
- r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
+ r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1038,7 +1038,7 @@ func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err err
}
func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0)
+ r1, _, e1 := syscall.SyscallN(procMakeAbsoluteSD.Addr(), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1046,7 +1046,7 @@ func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DE
}
func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize)))
+ r1, _, e1 := syscall.SyscallN(procMakeSelfRelativeSD.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1054,7 +1054,7 @@ func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURIT
}
func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) {
- r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier)))
+ r0, _, _ := syscall.SyscallN(procNotifyServiceStatusChangeW.Addr(), uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1062,7 +1062,7 @@ func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERV
}
func OpenProcessToken(process Handle, access uint32, token *Token) (err error) {
- r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token)))
+ r1, _, e1 := syscall.SyscallN(procOpenProcessToken.Addr(), uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1070,7 +1070,7 @@ func OpenProcessToken(process Handle, access uint32, token *Token) (err error) {
}
func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access))
+ r0, _, e1 := syscall.SyscallN(procOpenSCManagerW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1079,7 +1079,7 @@ func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (ha
}
func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access))
+ r0, _, e1 := syscall.SyscallN(procOpenServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1092,7 +1092,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token
if openAsSelf {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1100,7 +1100,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token
}
func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1108,7 +1108,7 @@ func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize
}
func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceConfigW.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1120,7 +1120,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf
if err != nil {
return
}
- r1, _, e1 := syscall.Syscall(procQueryServiceDynamicInformation.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo))
+ r1, _, e1 := syscall.SyscallN(procQueryServiceDynamicInformation.Addr(), uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1128,7 +1128,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf
}
func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceLockStatusW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1136,7 +1136,7 @@ func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, b
}
func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) {
- r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(status)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1144,7 +1144,7 @@ func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) {
}
func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceStatusEx.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1152,7 +1152,7 @@ func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize
}
func RegCloseKey(key Handle) (regerrno error) {
- r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRegCloseKey.Addr(), uintptr(key))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1160,7 +1160,7 @@ func RegCloseKey(key Handle) (regerrno error) {
}
func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) {
- r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0)
+ r0, _, _ := syscall.SyscallN(procRegEnumKeyExW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1176,7 +1176,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32,
if asynchronous {
_p1 = 1
}
- r0, _, _ := syscall.Syscall6(procRegNotifyChangeKeyValue.Addr(), 5, uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1), 0)
+ r0, _, _ := syscall.SyscallN(procRegNotifyChangeKeyValue.Addr(), uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1184,7 +1184,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32,
}
func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) {
- r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0)
+ r0, _, _ := syscall.SyscallN(procRegOpenKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1192,7 +1192,7 @@ func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint
}
func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) {
- r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime)))
+ r0, _, _ := syscall.SyscallN(procRegQueryInfoKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1200,7 +1200,7 @@ func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint
}
func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) {
- r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)))
+ r0, _, _ := syscall.SyscallN(procRegQueryValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1208,7 +1208,7 @@ func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32
}
func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0)
+ r0, _, e1 := syscall.SyscallN(procRegisterEventSourceW.Addr(), uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1217,7 +1217,7 @@ func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Hand
}
func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procRegisterServiceCtrlHandlerExW.Addr(), 3, uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context))
+ r0, _, e1 := syscall.SyscallN(procRegisterServiceCtrlHandlerExW.Addr(), uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1226,7 +1226,7 @@ func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, cont
}
func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData)))
+ r1, _, e1 := syscall.SyscallN(procReportEventW.Addr(), uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1234,7 +1234,7 @@ func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrS
}
func RevertToSelf() (err error) {
- r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
+ r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr())
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1242,7 +1242,7 @@ func RevertToSelf() (err error) {
}
func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) {
- r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSetEntriesInAclW.Addr(), uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1250,7 +1250,7 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE
}
func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) {
- r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor)))
+ r1, _, e1 := syscall.SyscallN(procSetKernelObjectSecurity.Addr(), uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1267,7 +1267,7 @@ func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security
}
func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) {
- r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1275,7 +1275,7 @@ func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi
}
func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) {
- r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet))
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1291,7 +1291,7 @@ func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *
if daclDefaulted {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1303,7 +1303,7 @@ func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaul
if groupDefaulted {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0))
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1315,7 +1315,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul
if ownerDefaulted {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0))
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1323,7 +1323,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul
}
func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) {
- syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0)
+ syscall.SyscallN(procSetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)))
return
}
@@ -1336,7 +1336,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *
if saclDefaulted {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1344,7 +1344,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *
}
func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) {
- r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1352,7 +1352,7 @@ func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati
}
func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) {
- r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceStatus)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1360,7 +1360,7 @@ func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error)
}
func SetThreadToken(thread *Handle, token Token) (err error) {
- r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0)
+ r1, _, e1 := syscall.SyscallN(procSetThreadToken.Addr(), uintptr(unsafe.Pointer(thread)), uintptr(token))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1368,7 +1368,7 @@ func SetThreadToken(thread *Handle, token Token) (err error) {
}
func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1376,7 +1376,7 @@ func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint
}
func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) {
- r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procStartServiceCtrlDispatcherW.Addr(), uintptr(unsafe.Pointer(serviceTable)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1384,7 +1384,7 @@ func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) {
}
func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors)))
+ r1, _, e1 := syscall.SyscallN(procStartServiceW.Addr(), uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1392,7 +1392,7 @@ func StartService(service Handle, numArgs uint32, argVectors **uint16) (err erro
}
func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) {
- r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCertAddCertificateContextToStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1400,7 +1400,7 @@ func CertAddCertificateContextToStore(store Handle, certContext *CertContext, ad
}
func CertCloseStore(store Handle, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procCertCloseStore.Addr(), uintptr(store), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1408,7 +1408,7 @@ func CertCloseStore(store Handle, flags uint32) (err error) {
}
func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) {
- r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen))
+ r0, _, e1 := syscall.SyscallN(procCertCreateCertificateContext.Addr(), uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen))
context = (*CertContext)(unsafe.Pointer(r0))
if context == nil {
err = errnoErr(e1)
@@ -1417,7 +1417,7 @@ func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, en
}
func CertDeleteCertificateFromStore(certContext *CertContext) (err error) {
- r1, _, e1 := syscall.Syscall(procCertDeleteCertificateFromStore.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCertDeleteCertificateFromStore.Addr(), uintptr(unsafe.Pointer(certContext)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1425,13 +1425,13 @@ func CertDeleteCertificateFromStore(certContext *CertContext) (err error) {
}
func CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) {
- r0, _, _ := syscall.Syscall(procCertDuplicateCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCertDuplicateCertificateContext.Addr(), uintptr(unsafe.Pointer(certContext)))
dupContext = (*CertContext)(unsafe.Pointer(r0))
return
}
func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) {
- r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0)
+ r0, _, e1 := syscall.SyscallN(procCertEnumCertificatesInStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(prevContext)))
context = (*CertContext)(unsafe.Pointer(r0))
if context == nil {
err = errnoErr(e1)
@@ -1440,7 +1440,7 @@ func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (contex
}
func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) {
- r0, _, e1 := syscall.Syscall6(procCertFindCertificateInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext)))
+ r0, _, e1 := syscall.SyscallN(procCertFindCertificateInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext)))
cert = (*CertContext)(unsafe.Pointer(r0))
if cert == nil {
err = errnoErr(e1)
@@ -1449,7 +1449,7 @@ func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags
}
func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) {
- r0, _, e1 := syscall.Syscall6(procCertFindChainInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext)))
+ r0, _, e1 := syscall.SyscallN(procCertFindChainInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext)))
certchain = (*CertChainContext)(unsafe.Pointer(r0))
if certchain == nil {
err = errnoErr(e1)
@@ -1458,18 +1458,18 @@ func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint3
}
func CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) {
- r0, _, _ := syscall.Syscall(procCertFindExtension.Addr(), 3, uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions)))
+ r0, _, _ := syscall.SyscallN(procCertFindExtension.Addr(), uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions)))
ret = (*CertExtension)(unsafe.Pointer(r0))
return
}
func CertFreeCertificateChain(ctx *CertChainContext) {
- syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0)
+ syscall.SyscallN(procCertFreeCertificateChain.Addr(), uintptr(unsafe.Pointer(ctx)))
return
}
func CertFreeCertificateContext(ctx *CertContext) (err error) {
- r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCertFreeCertificateContext.Addr(), uintptr(unsafe.Pointer(ctx)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1477,7 +1477,7 @@ func CertFreeCertificateContext(ctx *CertContext) (err error) {
}
func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) {
- r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0)
+ r1, _, e1 := syscall.SyscallN(procCertGetCertificateChain.Addr(), uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1485,13 +1485,13 @@ func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, a
}
func CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) {
- r0, _, _ := syscall.Syscall6(procCertGetNameStringW.Addr(), 6, uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size))
+ r0, _, _ := syscall.SyscallN(procCertGetNameStringW.Addr(), uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size))
chars = uint32(r0)
return
}
func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0)
+ r0, _, e1 := syscall.SyscallN(procCertOpenStore.Addr(), uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1500,7 +1500,7 @@ func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptPr
}
func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) {
- r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0)
+ r0, _, e1 := syscall.SyscallN(procCertOpenSystemStoreW.Addr(), uintptr(hprov), uintptr(unsafe.Pointer(name)))
store = Handle(r0)
if store == 0 {
err = errnoErr(e1)
@@ -1509,7 +1509,7 @@ func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) {
}
func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) {
- r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCertVerifyCertificateChainPolicy.Addr(), uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1521,7 +1521,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete
if *callerFreeProvOrNCryptKey {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procCryptAcquireCertificatePrivateKey.Addr(), 6, uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0)))
+ r1, _, e1 := syscall.SyscallN(procCryptAcquireCertificatePrivateKey.Addr(), uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0)))
*callerFreeProvOrNCryptKey = _p0 != 0
if r1 == 0 {
err = errnoErr(e1)
@@ -1530,7 +1530,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete
}
func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procCryptDecodeObject.Addr(), 7, uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCryptDecodeObject.Addr(), uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1538,7 +1538,7 @@ func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte
}
func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) {
- r1, _, e1 := syscall.Syscall9(procCryptProtectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCryptProtectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1546,7 +1546,7 @@ func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob,
}
func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) {
- r1, _, e1 := syscall.Syscall12(procCryptQueryObject.Addr(), 11, uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)), 0)
+ r1, _, e1 := syscall.SyscallN(procCryptQueryObject.Addr(), uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1554,7 +1554,7 @@ func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentT
}
func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) {
- r1, _, e1 := syscall.Syscall9(procCryptUnprotectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCryptUnprotectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1562,7 +1562,7 @@ func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBl
}
func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) {
- r0, _, e1 := syscall.Syscall(procPFXImportCertStore.Addr(), 3, uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags))
+ r0, _, e1 := syscall.SyscallN(procPFXImportCertStore.Addr(), uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags))
store = Handle(r0)
if store == 0 {
err = errnoErr(e1)
@@ -1571,7 +1571,7 @@ func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (sto
}
func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) {
- r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0)
+ r0, _, _ := syscall.SyscallN(procDnsNameCompare_W.Addr(), uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)))
same = r0 != 0
return
}
@@ -1586,7 +1586,7 @@ func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSR
}
func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) {
- r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr)))
+ r0, _, _ := syscall.SyscallN(procDnsQuery_W.Addr(), uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr)))
if r0 != 0 {
status = syscall.Errno(r0)
}
@@ -1594,12 +1594,12 @@ func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DN
}
func DnsRecordListFree(rl *DNSRecord, freetype uint32) {
- syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0)
+ syscall.SyscallN(procDnsRecordListFree.Addr(), uintptr(unsafe.Pointer(rl)), uintptr(freetype))
return
}
func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) {
- r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0)
+ r0, _, _ := syscall.SyscallN(procDwmGetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1607,7 +1607,7 @@ func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si
}
func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) {
- r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0)
+ r0, _, _ := syscall.SyscallN(procDwmSetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1615,7 +1615,7 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si
}
func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) {
- r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1623,7 +1623,7 @@ func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) {
}
func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) {
- r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0)
+ r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1631,7 +1631,7 @@ func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapter
}
func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0)
+ r0, _, _ := syscall.SyscallN(procGetAdaptersInfo.Addr(), uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1639,7 +1639,7 @@ func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) {
}
func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)), 0)
+ r0, _, _ := syscall.SyscallN(procGetBestInterfaceEx.Addr(), uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1647,7 +1647,7 @@ func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcod
}
func GetIfEntry(pIfRow *MibIfRow) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetIfEntry.Addr(), uintptr(unsafe.Pointer(pIfRow)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1655,7 +1655,7 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) {
}
func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0)
+ r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1663,7 +1663,7 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) {
}
func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1675,7 +1675,7 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa
if initialNotification {
_p0 = 1
}
- r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0)
+ r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1687,7 +1687,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext
if initialNotification {
_p0 = 1
}
- r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0)
+ r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1695,7 +1695,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext
}
func AddDllDirectory(path *uint16) (cookie uintptr, err error) {
- r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procAddDllDirectory.Addr(), uintptr(unsafe.Pointer(path)))
cookie = uintptr(r0)
if cookie == 0 {
err = errnoErr(e1)
@@ -1704,7 +1704,7 @@ func AddDllDirectory(path *uint16) (cookie uintptr, err error) {
}
func AssignProcessToJobObject(job Handle, process Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0)
+ r1, _, e1 := syscall.SyscallN(procAssignProcessToJobObject.Addr(), uintptr(job), uintptr(process))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1712,7 +1712,7 @@ func AssignProcessToJobObject(job Handle, process Handle) (err error) {
}
func CancelIo(s Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCancelIo.Addr(), uintptr(s))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1720,7 +1720,7 @@ func CancelIo(s Handle) (err error) {
}
func CancelIoEx(s Handle, o *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0)
+ r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(s), uintptr(unsafe.Pointer(o)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1728,7 +1728,7 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) {
}
func ClearCommBreak(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procClearCommBreak.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1736,7 +1736,7 @@ func ClearCommBreak(handle Handle) (err error) {
}
func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) {
- r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat)))
+ r1, _, e1 := syscall.SyscallN(procClearCommError.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1744,7 +1744,7 @@ func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error
}
func CloseHandle(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCloseHandle.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1752,12 +1752,12 @@ func CloseHandle(handle Handle) (err error) {
}
func ClosePseudoConsole(console Handle) {
- syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0)
+ syscall.SyscallN(procClosePseudoConsole.Addr(), uintptr(console))
return
}
func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1765,7 +1765,7 @@ func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) {
}
func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) {
- r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0)
+ r1, _, e1 := syscall.SyscallN(procCreateDirectoryW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1773,7 +1773,7 @@ func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) {
}
func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateEventExW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1782,7 +1782,7 @@ func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, d
}
func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateEventW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1791,7 +1791,7 @@ func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialStat
}
func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name)))
+ r0, _, e1 := syscall.SyscallN(procCreateFileMappingW.Addr(), uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1800,7 +1800,7 @@ func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxS
}
func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -1809,7 +1809,7 @@ func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes
}
func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved))
+ r1, _, e1 := syscall.SyscallN(procCreateHardLinkW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved))
if r1&0xff == 0 {
err = errnoErr(e1)
}
@@ -1817,7 +1817,7 @@ func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr
}
func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1826,7 +1826,7 @@ func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, thr
}
func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0)
+ r0, _, e1 := syscall.SyscallN(procCreateJobObjectW.Addr(), uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1835,7 +1835,7 @@ func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle,
}
func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateMutexExW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1848,7 +1848,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16
if initialOwner {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name)))
+ r0, _, e1 := syscall.SyscallN(procCreateMutexW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1857,7 +1857,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16
}
func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
+ r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -1866,7 +1866,7 @@ func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances u
}
func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCreatePipe.Addr(), uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1878,7 +1878,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA
if inheritHandles {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCreateProcessW.Addr(), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1886,7 +1886,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA
}
func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) {
- r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0)
+ r0, _, _ := syscall.SyscallN(procCreatePseudoConsole.Addr(), uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)))
if r0 != 0 {
hr = syscall.Errno(r0)
}
@@ -1894,7 +1894,7 @@ func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pcons
}
func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags))
+ r1, _, e1 := syscall.SyscallN(procCreateSymbolicLinkW.Addr(), uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags))
if r1&0xff == 0 {
err = errnoErr(e1)
}
@@ -1902,7 +1902,7 @@ func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags u
}
func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0)
+ r0, _, e1 := syscall.SyscallN(procCreateToolhelp32Snapshot.Addr(), uintptr(flags), uintptr(processId))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -1911,7 +1911,7 @@ func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, er
}
func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)))
+ r1, _, e1 := syscall.SyscallN(procDefineDosDeviceW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1919,7 +1919,7 @@ func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err
}
func DeleteFile(path *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDeleteFileW.Addr(), uintptr(unsafe.Pointer(path)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1927,12 +1927,12 @@ func DeleteFile(path *uint16) (err error) {
}
func deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) {
- syscall.Syscall(procDeleteProcThreadAttributeList.Addr(), 1, uintptr(unsafe.Pointer(attrlist)), 0, 0)
+ syscall.SyscallN(procDeleteProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist)))
return
}
func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDeleteVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1940,7 +1940,7 @@ func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) {
}
func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procDeviceIoControl.Addr(), uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1948,7 +1948,7 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff
}
func DisconnectNamedPipe(pipe Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1960,7 +1960,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP
if bInheritHandle {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDuplicateHandle.Addr(), uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1968,7 +1968,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP
}
func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0)
+ r1, _, e1 := syscall.SyscallN(procEscapeCommFunction.Addr(), uintptr(handle), uintptr(dwFunc))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1976,12 +1976,12 @@ func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) {
}
func ExitProcess(exitcode uint32) {
- syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0)
+ syscall.SyscallN(procExitProcess.Addr(), uintptr(exitcode))
return
}
func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
+ r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -1990,7 +1990,7 @@ func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32,
}
func FindClose(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindClose.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1998,7 +1998,7 @@ func FindClose(handle Handle) (err error) {
}
func FindCloseChangeNotification(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindCloseChangeNotification.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindCloseChangeNotification.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2019,7 +2019,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter
if watchSubtree {
_p1 = 1
}
- r0, _, e1 := syscall.Syscall(procFindFirstChangeNotificationW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter))
+ r0, _, e1 := syscall.SyscallN(procFindFirstChangeNotificationW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2028,7 +2028,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter
}
func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0)
+ r0, _, e1 := syscall.SyscallN(procFindFirstFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2037,7 +2037,7 @@ func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err erro
}
func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
+ r0, _, e1 := syscall.SyscallN(procFindFirstVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2046,7 +2046,7 @@ func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, b
}
func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0)
+ r0, _, e1 := syscall.SyscallN(procFindFirstVolumeW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2055,7 +2055,7 @@ func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, er
}
func FindNextChangeNotification(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindNextChangeNotification.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindNextChangeNotification.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2063,7 +2063,7 @@ func FindNextChangeNotification(handle Handle) (err error) {
}
func findNextFile1(handle Handle, data *win32finddata1) (err error) {
- r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0)
+ r1, _, e1 := syscall.SyscallN(procFindNextFileW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2071,7 +2071,7 @@ func findNextFile1(handle Handle, data *win32finddata1) (err error) {
}
func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
+ r1, _, e1 := syscall.SyscallN(procFindNextVolumeMountPointW.Addr(), uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2079,7 +2079,7 @@ func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uin
}
func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength))
+ r1, _, e1 := syscall.SyscallN(procFindNextVolumeW.Addr(), uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2087,7 +2087,7 @@ func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32)
}
func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) {
- r0, _, e1 := syscall.Syscall(procFindResourceW.Addr(), 3, uintptr(module), uintptr(name), uintptr(resType))
+ r0, _, e1 := syscall.SyscallN(procFindResourceW.Addr(), uintptr(module), uintptr(name), uintptr(resType))
resInfo = Handle(r0)
if resInfo == 0 {
err = errnoErr(e1)
@@ -2096,7 +2096,7 @@ func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle,
}
func FindVolumeClose(findVolume Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindVolumeClose.Addr(), uintptr(findVolume))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2104,7 +2104,7 @@ func FindVolumeClose(findVolume Handle) (err error) {
}
func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindVolumeMountPointClose.Addr(), uintptr(findVolumeMountPoint))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2112,7 +2112,7 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) {
}
func FlushFileBuffers(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2120,7 +2120,7 @@ func FlushFileBuffers(handle Handle) (err error) {
}
func FlushViewOfFile(addr uintptr, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0)
+ r1, _, e1 := syscall.SyscallN(procFlushViewOfFile.Addr(), uintptr(addr), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2132,7 +2132,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu
if len(buf) > 0 {
_p0 = &buf[0]
}
- r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procFormatMessageW.Addr(), uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2141,7 +2141,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu
}
func FreeEnvironmentStrings(envs *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFreeEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(envs)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2149,7 +2149,7 @@ func FreeEnvironmentStrings(envs *uint16) (err error) {
}
func FreeLibrary(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFreeLibrary.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2157,7 +2157,7 @@ func FreeLibrary(handle Handle) (err error) {
}
func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0)
+ r1, _, e1 := syscall.SyscallN(procGenerateConsoleCtrlEvent.Addr(), uintptr(ctrlEvent), uintptr(processGroupID))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2165,19 +2165,19 @@ func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err erro
}
func GetACP() (acp uint32) {
- r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetACP.Addr())
acp = uint32(r0)
return
}
func GetActiveProcessorCount(groupNumber uint16) (ret uint32) {
- r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetActiveProcessorCount.Addr(), uintptr(groupNumber))
ret = uint32(r0)
return
}
func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetCommModemStatus.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2185,7 +2185,7 @@ func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) {
}
func GetCommState(handle Handle, lpDCB *DCB) (err error) {
- r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2193,7 +2193,7 @@ func GetCommState(handle Handle, lpDCB *DCB) (err error) {
}
func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
- r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2201,13 +2201,13 @@ func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
}
func GetCommandLine() (cmd *uint16) {
- r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetCommandLineW.Addr())
cmd = (*uint16)(unsafe.Pointer(r0))
return
}
func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)))
+ r1, _, e1 := syscall.SyscallN(procGetComputerNameExW.Addr(), uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2215,7 +2215,7 @@ func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) {
}
func GetComputerName(buf *uint16, n *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetComputerNameW.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2223,7 +2223,7 @@ func GetComputerName(buf *uint16, n *uint32) (err error) {
}
func GetConsoleCP() (cp uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetConsoleCP.Addr())
cp = uint32(r0)
if cp == 0 {
err = errnoErr(e1)
@@ -2232,7 +2232,7 @@ func GetConsoleCP() (cp uint32, err error) {
}
func GetConsoleMode(console Handle, mode *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetConsoleMode.Addr(), uintptr(console), uintptr(unsafe.Pointer(mode)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2240,7 +2240,7 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) {
}
func GetConsoleOutputCP() (cp uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetConsoleOutputCP.Addr())
cp = uint32(r0)
if cp == 0 {
err = errnoErr(e1)
@@ -2249,7 +2249,7 @@ func GetConsoleOutputCP() (cp uint32, err error) {
}
func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) {
- r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetConsoleScreenBufferInfo.Addr(), uintptr(console), uintptr(unsafe.Pointer(info)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2257,7 +2257,7 @@ func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (
}
func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetCurrentDirectoryW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2266,19 +2266,19 @@ func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) {
}
func GetCurrentProcessId() (pid uint32) {
- r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetCurrentProcessId.Addr())
pid = uint32(r0)
return
}
func GetCurrentThreadId() (id uint32) {
- r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetCurrentThreadId.Addr())
id = uint32(r0)
return
}
func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetDiskFreeSpaceExW.Addr(), uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2286,13 +2286,13 @@ func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint6
}
func GetDriveType(rootPathName *uint16) (driveType uint32) {
- r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetDriveTypeW.Addr(), uintptr(unsafe.Pointer(rootPathName)))
driveType = uint32(r0)
return
}
func GetEnvironmentStrings() (envs *uint16, err error) {
- r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetEnvironmentStringsW.Addr())
envs = (*uint16)(unsafe.Pointer(r0))
if envs == nil {
err = errnoErr(e1)
@@ -2301,7 +2301,7 @@ func GetEnvironmentStrings() (envs *uint16, err error) {
}
func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size))
+ r0, _, e1 := syscall.SyscallN(procGetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2310,7 +2310,7 @@ func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32
}
func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetExitCodeProcess.Addr(), uintptr(handle), uintptr(unsafe.Pointer(exitcode)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2318,7 +2318,7 @@ func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) {
}
func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) {
- r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info)))
+ r1, _, e1 := syscall.SyscallN(procGetFileAttributesExW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2326,7 +2326,7 @@ func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) {
}
func GetFileAttributes(name *uint16) (attrs uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name)))
attrs = uint32(r0)
if attrs == INVALID_FILE_ATTRIBUTES {
err = errnoErr(e1)
@@ -2335,7 +2335,7 @@ func GetFileAttributes(name *uint16) (attrs uint32, err error) {
}
func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) {
- r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandle.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2343,7 +2343,7 @@ func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (e
}
func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandleEx.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2351,7 +2351,7 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte,
}
func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2359,7 +2359,7 @@ func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim
}
func GetFileType(filehandle Handle) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetFileType.Addr(), uintptr(filehandle))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2368,7 +2368,7 @@ func GetFileType(filehandle Handle) (n uint32, err error) {
}
func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetFinalPathNameByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2377,7 +2377,7 @@ func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32
}
func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetFullPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2386,13 +2386,13 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (
}
func GetLargePageMinimum() (size uintptr) {
- r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetLargePageMinimum.Addr())
size = uintptr(r0)
return
}
func GetLastError() (lasterr error) {
- r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetLastError.Addr())
if r0 != 0 {
lasterr = syscall.Errno(r0)
}
@@ -2400,7 +2400,7 @@ func GetLastError() (lasterr error) {
}
func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetLogicalDriveStringsW.Addr(), uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2409,7 +2409,7 @@ func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err
}
func GetLogicalDrives() (drivesBitMask uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetLogicalDrives.Addr())
drivesBitMask = uint32(r0)
if drivesBitMask == 0 {
err = errnoErr(e1)
@@ -2418,7 +2418,7 @@ func GetLogicalDrives() (drivesBitMask uint32, err error) {
}
func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen))
+ r0, _, e1 := syscall.SyscallN(procGetLongPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2427,13 +2427,13 @@ func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err er
}
func GetMaximumProcessorCount(groupNumber uint16) (ret uint32) {
- r0, _, _ := syscall.Syscall(procGetMaximumProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetMaximumProcessorCount.Addr(), uintptr(groupNumber))
ret = uint32(r0)
return
}
func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size))
+ r0, _, e1 := syscall.SyscallN(procGetModuleFileNameW.Addr(), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2442,7 +2442,7 @@ func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32,
}
func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module)))
+ r1, _, e1 := syscall.SyscallN(procGetModuleHandleExW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2450,7 +2450,7 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er
}
func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeClientProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2458,7 +2458,7 @@ func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err erro
}
func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2466,7 +2466,7 @@ func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, m
}
func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2474,7 +2474,7 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3
}
func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeServerProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2486,7 +2486,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa
if wait {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetOverlappedResult.Addr(), uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2494,7 +2494,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa
}
func GetPriorityClass(process Handle) (ret uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetPriorityClass.Addr(), uintptr(process))
ret = uint32(r0)
if ret == 0 {
err = errnoErr(e1)
@@ -2512,7 +2512,7 @@ func GetProcAddress(module Handle, procname string) (proc uintptr, err error) {
}
func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) {
- r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetProcAddress.Addr(), uintptr(module), uintptr(unsafe.Pointer(procname)))
proc = uintptr(r0)
if proc == 0 {
err = errnoErr(e1)
@@ -2521,7 +2521,7 @@ func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) {
}
func GetProcessId(process Handle) (id uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetProcessId.Addr(), uintptr(process))
id = uint32(r0)
if id == 0 {
err = errnoErr(e1)
@@ -2530,7 +2530,7 @@ func GetProcessId(process Handle) (id uint32, err error) {
}
func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetProcessPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2538,7 +2538,7 @@ func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uin
}
func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetProcessShutdownParameters.Addr(), uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2546,7 +2546,7 @@ func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) {
}
func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetProcessTimes.Addr(), uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2554,12 +2554,12 @@ func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime,
}
func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) {
- syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0)
+ syscall.SyscallN(procGetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)))
return
}
func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0)
+ r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2567,7 +2567,7 @@ func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overl
}
func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen))
+ r0, _, e1 := syscall.SyscallN(procGetShortPathNameW.Addr(), uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2576,12 +2576,12 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin
}
func getStartupInfo(startupInfo *StartupInfo) {
- syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0)
+ syscall.SyscallN(procGetStartupInfoW.Addr(), uintptr(unsafe.Pointer(startupInfo)))
return
}
func GetStdHandle(stdhandle uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetStdHandle.Addr(), uintptr(stdhandle))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2590,7 +2590,7 @@ func GetStdHandle(stdhandle uint32) (handle Handle, err error) {
}
func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0)
+ r0, _, e1 := syscall.SyscallN(procGetSystemDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen))
len = uint32(r0)
if len == 0 {
err = errnoErr(e1)
@@ -2599,7 +2599,7 @@ func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
}
func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetSystemPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2607,17 +2607,17 @@ func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint
}
func GetSystemTimeAsFileTime(time *Filetime) {
- syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0)
+ syscall.SyscallN(procGetSystemTimeAsFileTime.Addr(), uintptr(unsafe.Pointer(time)))
return
}
func GetSystemTimePreciseAsFileTime(time *Filetime) {
- syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0)
+ syscall.SyscallN(procGetSystemTimePreciseAsFileTime.Addr(), uintptr(unsafe.Pointer(time)))
return
}
func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0)
+ r0, _, e1 := syscall.SyscallN(procGetSystemWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen))
len = uint32(r0)
if len == 0 {
err = errnoErr(e1)
@@ -2626,7 +2626,7 @@ func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err erro
}
func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetTempPathW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2635,7 +2635,7 @@ func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) {
}
func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetThreadPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2643,13 +2643,13 @@ func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint
}
func getTickCount64() (ms uint64) {
- r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetTickCount64.Addr())
ms = uint64(r0)
return
}
func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetTimeZoneInformation.Addr(), uintptr(unsafe.Pointer(tzi)))
rc = uint32(r0)
if rc == 0xffffffff {
err = errnoErr(e1)
@@ -2658,7 +2658,7 @@ func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) {
}
func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetUserPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2666,7 +2666,7 @@ func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16
}
func GetVersion() (ver uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetVersion.Addr())
ver = uint32(r0)
if ver == 0 {
err = errnoErr(e1)
@@ -2675,7 +2675,7 @@ func GetVersion() (ver uint32, err error) {
}
func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0)
+ r1, _, e1 := syscall.SyscallN(procGetVolumeInformationByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2683,7 +2683,7 @@ func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeN
}
func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0)
+ r1, _, e1 := syscall.SyscallN(procGetVolumeInformationW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2691,7 +2691,7 @@ func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volume
}
func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength))
+ r1, _, e1 := syscall.SyscallN(procGetVolumeNameForVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2699,7 +2699,7 @@ func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint
}
func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength))
+ r1, _, e1 := syscall.SyscallN(procGetVolumePathNameW.Addr(), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2707,7 +2707,7 @@ func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength ui
}
func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetVolumePathNamesForVolumeNameW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2715,7 +2715,7 @@ func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16
}
func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0)
+ r0, _, e1 := syscall.SyscallN(procGetWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen))
len = uint32(r0)
if len == 0 {
err = errnoErr(e1)
@@ -2724,7 +2724,7 @@ func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
}
func initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procInitializeProcThreadAttributeList.Addr(), 4, uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procInitializeProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2736,7 +2736,7 @@ func IsWow64Process(handle Handle, isWow64 *bool) (err error) {
if *isWow64 {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0)
+ r1, _, e1 := syscall.SyscallN(procIsWow64Process.Addr(), uintptr(handle), uintptr(unsafe.Pointer(&_p0)))
*isWow64 = _p0 != 0
if r1 == 0 {
err = errnoErr(e1)
@@ -2749,7 +2749,7 @@ func IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint1
if err != nil {
return
}
- r1, _, e1 := syscall.Syscall(procIsWow64Process2.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine)))
+ r1, _, e1 := syscall.SyscallN(procIsWow64Process2.Addr(), uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2766,7 +2766,7 @@ func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, e
}
func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags))
+ r0, _, e1 := syscall.SyscallN(procLoadLibraryExW.Addr(), uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2784,7 +2784,7 @@ func LoadLibrary(libname string) (handle Handle, err error) {
}
func _LoadLibrary(libname *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procLoadLibraryW.Addr(), uintptr(unsafe.Pointer(libname)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2793,7 +2793,7 @@ func _LoadLibrary(libname *uint16) (handle Handle, err error) {
}
func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLoadResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0)
+ r0, _, e1 := syscall.SyscallN(procLoadResource.Addr(), uintptr(module), uintptr(resInfo))
resData = Handle(r0)
if resData == 0 {
err = errnoErr(e1)
@@ -2802,7 +2802,7 @@ func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) {
}
func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) {
- r0, _, e1 := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(length), 0)
+ r0, _, e1 := syscall.SyscallN(procLocalAlloc.Addr(), uintptr(flags), uintptr(length))
ptr = uintptr(r0)
if ptr == 0 {
err = errnoErr(e1)
@@ -2811,7 +2811,7 @@ func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) {
}
func LocalFree(hmem Handle) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procLocalFree.Addr(), uintptr(hmem))
handle = Handle(r0)
if handle != 0 {
err = errnoErr(e1)
@@ -2820,7 +2820,7 @@ func LocalFree(hmem Handle) (handle Handle, err error) {
}
func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)))
+ r1, _, e1 := syscall.SyscallN(procLockFileEx.Addr(), uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2828,7 +2828,7 @@ func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, byt
}
func LockResource(resData Handle) (addr uintptr, err error) {
- r0, _, e1 := syscall.Syscall(procLockResource.Addr(), 1, uintptr(resData), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procLockResource.Addr(), uintptr(resData))
addr = uintptr(r0)
if addr == 0 {
err = errnoErr(e1)
@@ -2837,7 +2837,7 @@ func LockResource(resData Handle) (addr uintptr, err error) {
}
func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) {
- r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0)
+ r0, _, e1 := syscall.SyscallN(procMapViewOfFile.Addr(), uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length))
addr = uintptr(r0)
if addr == 0 {
err = errnoErr(e1)
@@ -2846,7 +2846,7 @@ func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow ui
}
func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procModule32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2854,7 +2854,7 @@ func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) {
}
func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procModule32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2862,7 +2862,7 @@ func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) {
}
func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
+ r1, _, e1 := syscall.SyscallN(procMoveFileExW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2870,7 +2870,7 @@ func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) {
}
func MoveFile(from *uint16, to *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0)
+ r1, _, e1 := syscall.SyscallN(procMoveFileW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2878,7 +2878,7 @@ func MoveFile(from *uint16, to *uint16) (err error) {
}
func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) {
- r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar))
+ r0, _, e1 := syscall.SyscallN(procMultiByteToWideChar.Addr(), uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar))
nwrite = int32(r0)
if nwrite == 0 {
err = errnoErr(e1)
@@ -2891,7 +2891,7 @@ func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H
if inheritHandle {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name)))
+ r0, _, e1 := syscall.SyscallN(procOpenEventW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2904,7 +2904,7 @@ func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H
if inheritHandle {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name)))
+ r0, _, e1 := syscall.SyscallN(procOpenMutexW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2917,7 +2917,7 @@ func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (ha
if inheritHandle {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId))
+ r0, _, e1 := syscall.SyscallN(procOpenProcess.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(processId))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2930,7 +2930,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand
if inheritHandle {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId))
+ r0, _, e1 := syscall.SyscallN(procOpenThread.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(threadId))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2939,7 +2939,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand
}
func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procPostQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2947,7 +2947,7 @@ func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overla
}
func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procProcess32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2955,7 +2955,7 @@ func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) {
}
func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procProcess32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2963,7 +2963,7 @@ func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) {
}
func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0)
+ r1, _, e1 := syscall.SyscallN(procProcessIdToSessionId.Addr(), uintptr(pid), uintptr(unsafe.Pointer(sessionid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2971,7 +2971,7 @@ func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) {
}
func PulseEvent(event Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procPulseEvent.Addr(), uintptr(event))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2979,7 +2979,7 @@ func PulseEvent(event Handle) (err error) {
}
func PurgeComm(handle Handle, dwFlags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0)
+ r1, _, e1 := syscall.SyscallN(procPurgeComm.Addr(), uintptr(handle), uintptr(dwFlags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2987,7 +2987,7 @@ func PurgeComm(handle Handle, dwFlags uint32) (err error) {
}
func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max))
+ r0, _, e1 := syscall.SyscallN(procQueryDosDeviceW.Addr(), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2996,7 +2996,7 @@ func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint3
}
func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryFullProcessImageNameW.Addr(), 4, uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procQueryFullProcessImageNameW.Addr(), uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3004,7 +3004,7 @@ func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size
}
func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0)
+ r1, _, e1 := syscall.SyscallN(procQueryInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3012,7 +3012,7 @@ func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobO
}
func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) {
- r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0)
+ r1, _, e1 := syscall.SyscallN(procReadConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3024,7 +3024,7 @@ func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree
if watchSubTree {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0)
+ r1, _, e1 := syscall.SyscallN(procReadDirectoryChangesW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3036,7 +3036,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (
if len(buf) > 0 {
_p0 = &buf[0]
}
- r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procReadFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3044,7 +3044,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (
}
func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procReadProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)), 0)
+ r1, _, e1 := syscall.SyscallN(procReadProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3052,7 +3052,7 @@ func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size u
}
func ReleaseMutex(mutex Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procReleaseMutex.Addr(), uintptr(mutex))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3060,7 +3060,7 @@ func ReleaseMutex(mutex Handle) (err error) {
}
func RemoveDirectory(path *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procRemoveDirectoryW.Addr(), uintptr(unsafe.Pointer(path)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3068,7 +3068,7 @@ func RemoveDirectory(path *uint16) (err error) {
}
func RemoveDllDirectory(cookie uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procRemoveDllDirectory.Addr(), uintptr(cookie))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3076,7 +3076,7 @@ func RemoveDllDirectory(cookie uintptr) (err error) {
}
func ResetEvent(event Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procResetEvent.Addr(), uintptr(event))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3084,7 +3084,7 @@ func ResetEvent(event Handle) (err error) {
}
func resizePseudoConsole(pconsole Handle, size uint32) (hr error) {
- r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0)
+ r0, _, _ := syscall.SyscallN(procResizePseudoConsole.Addr(), uintptr(pconsole), uintptr(size))
if r0 != 0 {
hr = syscall.Errno(r0)
}
@@ -3092,7 +3092,7 @@ func resizePseudoConsole(pconsole Handle, size uint32) (hr error) {
}
func ResumeThread(thread Handle) (ret uint32, err error) {
- r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procResumeThread.Addr(), uintptr(thread))
ret = uint32(r0)
if ret == 0xffffffff {
err = errnoErr(e1)
@@ -3101,7 +3101,7 @@ func ResumeThread(thread Handle) (ret uint32, err error) {
}
func SetCommBreak(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetCommBreak.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3109,7 +3109,7 @@ func SetCommBreak(handle Handle) (err error) {
}
func SetCommMask(handle Handle, dwEvtMask uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0)
+ r1, _, e1 := syscall.SyscallN(procSetCommMask.Addr(), uintptr(handle), uintptr(dwEvtMask))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3117,7 +3117,7 @@ func SetCommMask(handle Handle, dwEvtMask uint32) (err error) {
}
func SetCommState(handle Handle, lpDCB *DCB) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3125,7 +3125,7 @@ func SetCommState(handle Handle, lpDCB *DCB) (err error) {
}
func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3133,7 +3133,7 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
}
func SetConsoleCP(cp uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetConsoleCP.Addr(), uintptr(cp))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3141,7 +3141,7 @@ func SetConsoleCP(cp uint32) (err error) {
}
func setConsoleCursorPosition(console Handle, position uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0)
+ r1, _, e1 := syscall.SyscallN(procSetConsoleCursorPosition.Addr(), uintptr(console), uintptr(position))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3149,7 +3149,7 @@ func setConsoleCursorPosition(console Handle, position uint32) (err error) {
}
func SetConsoleMode(console Handle, mode uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0)
+ r1, _, e1 := syscall.SyscallN(procSetConsoleMode.Addr(), uintptr(console), uintptr(mode))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3157,7 +3157,7 @@ func SetConsoleMode(console Handle, mode uint32) (err error) {
}
func SetConsoleOutputCP(cp uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetConsoleOutputCP.Addr(), uintptr(cp))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3165,7 +3165,7 @@ func SetConsoleOutputCP(cp uint32) (err error) {
}
func SetCurrentDirectory(path *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetCurrentDirectoryW.Addr(), uintptr(unsafe.Pointer(path)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3173,7 +3173,7 @@ func SetCurrentDirectory(path *uint16) (err error) {
}
func SetDefaultDllDirectories(directoryFlags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetDefaultDllDirectories.Addr(), 1, uintptr(directoryFlags), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetDefaultDllDirectories.Addr(), uintptr(directoryFlags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3190,7 +3190,7 @@ func SetDllDirectory(path string) (err error) {
}
func _SetDllDirectory(path *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetDllDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetDllDirectoryW.Addr(), uintptr(unsafe.Pointer(path)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3198,7 +3198,7 @@ func _SetDllDirectory(path *uint16) (err error) {
}
func SetEndOfFile(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetEndOfFile.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3206,7 +3206,7 @@ func SetEndOfFile(handle Handle) (err error) {
}
func SetEnvironmentVariable(name *uint16, value *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3214,13 +3214,13 @@ func SetEnvironmentVariable(name *uint16, value *uint16) (err error) {
}
func SetErrorMode(mode uint32) (ret uint32) {
- r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSetErrorMode.Addr(), uintptr(mode))
ret = uint32(r0)
return
}
func SetEvent(event Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetEvent.Addr(), uintptr(event))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3228,7 +3228,7 @@ func SetEvent(event Handle) (err error) {
}
func SetFileAttributes(name *uint16, attrs uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(attrs))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3236,7 +3236,7 @@ func SetFileAttributes(name *uint16, attrs uint32) (err error) {
}
func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) {
- r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(handle), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3244,7 +3244,7 @@ func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error)
}
func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileInformationByHandle.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3252,7 +3252,7 @@ func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inB
}
func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) {
- r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procSetFilePointer.Addr(), uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence))
newlowoffset = uint32(r0)
if newlowoffset == 0xffffffff {
err = errnoErr(e1)
@@ -3261,7 +3261,7 @@ func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence
}
func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3269,7 +3269,7 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim
}
func SetFileValidData(handle Handle, validDataLength int64) (err error) {
- r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileValidData.Addr(), uintptr(handle), uintptr(validDataLength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3277,7 +3277,7 @@ func SetFileValidData(handle Handle, validDataLength int64) (err error) {
}
func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags))
+ r1, _, e1 := syscall.SyscallN(procSetHandleInformation.Addr(), uintptr(handle), uintptr(mask), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3285,7 +3285,7 @@ func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error)
}
func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) {
- r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procSetInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength))
ret = int(r0)
if ret == 0 {
err = errnoErr(e1)
@@ -3294,7 +3294,7 @@ func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobOb
}
func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetNamedPipeHandleState.Addr(), 4, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetNamedPipeHandleState.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3302,7 +3302,7 @@ func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uin
}
func SetPriorityClass(process Handle, priorityClass uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0)
+ r1, _, e1 := syscall.SyscallN(procSetPriorityClass.Addr(), uintptr(process), uintptr(priorityClass))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3314,7 +3314,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) {
if disable {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0)
+ r1, _, e1 := syscall.SyscallN(procSetProcessPriorityBoost.Addr(), uintptr(process), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3322,7 +3322,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) {
}
func SetProcessShutdownParameters(level uint32, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procSetProcessShutdownParameters.Addr(), uintptr(level), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3330,7 +3330,7 @@ func SetProcessShutdownParameters(level uint32, flags uint32) (err error) {
}
func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3338,7 +3338,7 @@ func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr
}
func SetStdHandle(stdhandle uint32, handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0)
+ r1, _, e1 := syscall.SyscallN(procSetStdHandle.Addr(), uintptr(stdhandle), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3346,7 +3346,7 @@ func SetStdHandle(stdhandle uint32, handle Handle) (err error) {
}
func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetVolumeLabelW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3354,7 +3354,7 @@ func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) {
}
func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3362,7 +3362,7 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro
}
func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue))
+ r1, _, e1 := syscall.SyscallN(procSetupComm.Addr(), uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3370,7 +3370,7 @@ func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) {
}
func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) {
- r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0)
+ r0, _, e1 := syscall.SyscallN(procSizeofResource.Addr(), uintptr(module), uintptr(resInfo))
size = uint32(r0)
if size == 0 {
err = errnoErr(e1)
@@ -3383,13 +3383,13 @@ func SleepEx(milliseconds uint32, alertable bool) (ret uint32) {
if alertable {
_p0 = 1
}
- r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0)
+ r0, _, _ := syscall.SyscallN(procSleepEx.Addr(), uintptr(milliseconds), uintptr(_p0))
ret = uint32(r0)
return
}
func TerminateJobObject(job Handle, exitCode uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0)
+ r1, _, e1 := syscall.SyscallN(procTerminateJobObject.Addr(), uintptr(job), uintptr(exitCode))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3397,7 +3397,7 @@ func TerminateJobObject(job Handle, exitCode uint32) (err error) {
}
func TerminateProcess(handle Handle, exitcode uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0)
+ r1, _, e1 := syscall.SyscallN(procTerminateProcess.Addr(), uintptr(handle), uintptr(exitcode))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3405,7 +3405,7 @@ func TerminateProcess(handle Handle, exitcode uint32) (err error) {
}
func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procThread32First.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3413,7 +3413,7 @@ func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) {
}
func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procThread32Next.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3421,7 +3421,7 @@ func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) {
}
func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procUnlockFileEx.Addr(), uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3429,7 +3429,7 @@ func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint3
}
func UnmapViewOfFile(addr uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procUnmapViewOfFile.Addr(), uintptr(addr))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3437,7 +3437,7 @@ func UnmapViewOfFile(addr uintptr) (err error) {
}
func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall9(procUpdateProcThreadAttribute.Addr(), 7, uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procUpdateProcThreadAttribute.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3445,7 +3445,7 @@ func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32,
}
func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) {
- r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procVirtualAlloc.Addr(), uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect))
value = uintptr(r0)
if value == 0 {
err = errnoErr(e1)
@@ -3454,7 +3454,7 @@ func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint3
}
func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype))
+ r1, _, e1 := syscall.SyscallN(procVirtualFree.Addr(), uintptr(address), uintptr(size), uintptr(freetype))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3462,7 +3462,7 @@ func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) {
}
func VirtualLock(addr uintptr, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualLock.Addr(), uintptr(addr), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3470,7 +3470,7 @@ func VirtualLock(addr uintptr, length uintptr) (err error) {
}
func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualProtect.Addr(), uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3478,7 +3478,7 @@ func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect
}
func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procVirtualProtectEx.Addr(), 5, uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)), 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualProtectEx.Addr(), uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3486,7 +3486,7 @@ func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect
}
func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procVirtualQuery.Addr(), 3, uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length))
+ r1, _, e1 := syscall.SyscallN(procVirtualQuery.Addr(), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3494,7 +3494,7 @@ func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintpt
}
func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procVirtualQueryEx.Addr(), 4, uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualQueryEx.Addr(), uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3502,7 +3502,7 @@ func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformat
}
func VirtualUnlock(addr uintptr, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualUnlock.Addr(), uintptr(addr), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3510,13 +3510,13 @@ func VirtualUnlock(addr uintptr, length uintptr) (err error) {
}
func WTSGetActiveConsoleSessionId() (sessionID uint32) {
- r0, _, _ := syscall.Syscall(procWTSGetActiveConsoleSessionId.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procWTSGetActiveConsoleSessionId.Addr())
sessionID = uint32(r0)
return
}
func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped)))
+ r1, _, e1 := syscall.SyscallN(procWaitCommEvent.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3528,7 +3528,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil
if waitAll {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procWaitForMultipleObjects.Addr(), uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds))
event = uint32(r0)
if event == 0xffffffff {
err = errnoErr(e1)
@@ -3537,7 +3537,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil
}
func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) {
- r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0)
+ r0, _, e1 := syscall.SyscallN(procWaitForSingleObject.Addr(), uintptr(handle), uintptr(waitMilliseconds))
event = uint32(r0)
if event == 0xffffffff {
err = errnoErr(e1)
@@ -3546,7 +3546,7 @@ func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32,
}
func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) {
- r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0)
+ r1, _, e1 := syscall.SyscallN(procWriteConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3558,7 +3558,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped)
if len(buf) > 0 {
_p0 = &buf[0]
}
- r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procWriteFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3566,7 +3566,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped)
}
func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procWriteProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)), 0)
+ r1, _, e1 := syscall.SyscallN(procWriteProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3574,7 +3574,7 @@ func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size
}
func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procAcceptEx.Addr(), uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3582,12 +3582,12 @@ func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32
}
func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) {
- syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0)
+ syscall.SyscallN(procGetAcceptExSockaddrs.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)))
return
}
func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procTransmitFile.Addr(), uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3595,7 +3595,7 @@ func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint
}
func NetApiBufferFree(buf *byte) (neterr error) {
- r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procNetApiBufferFree.Addr(), uintptr(unsafe.Pointer(buf)))
if r0 != 0 {
neterr = syscall.Errno(r0)
}
@@ -3603,7 +3603,7 @@ func NetApiBufferFree(buf *byte) (neterr error) {
}
func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) {
- r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType)))
+ r0, _, _ := syscall.SyscallN(procNetGetJoinInformation.Addr(), uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType)))
if r0 != 0 {
neterr = syscall.Errno(r0)
}
@@ -3611,7 +3611,7 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete
}
func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) {
- r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0)
+ r0, _, _ := syscall.SyscallN(procNetUserEnum.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)))
if r0 != 0 {
neterr = syscall.Errno(r0)
}
@@ -3619,7 +3619,7 @@ func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, pr
}
func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) {
- r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procNetUserGetInfo.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)))
if r0 != 0 {
neterr = syscall.Errno(r0)
}
@@ -3627,7 +3627,7 @@ func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **by
}
func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength), 0)
+ r0, _, _ := syscall.SyscallN(procNtCreateFile.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3635,7 +3635,7 @@ func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO
}
func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) {
- r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
+ r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3643,7 +3643,7 @@ func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, i
}
func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)), 0)
+ r0, _, _ := syscall.SyscallN(procNtQueryInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3651,7 +3651,7 @@ func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe
}
func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procNtQuerySystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3659,7 +3659,7 @@ func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInf
}
func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class), 0)
+ r0, _, _ := syscall.SyscallN(procNtSetInformationFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3667,7 +3667,7 @@ func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte,
}
func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procNtSetInformationProcess.Addr(), 4, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), 0, 0)
+ r0, _, _ := syscall.SyscallN(procNtSetInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3675,7 +3675,7 @@ func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.P
}
func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall(procNtSetSystemInformation.Addr(), 3, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen))
+ r0, _, _ := syscall.SyscallN(procNtSetSystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3683,13 +3683,13 @@ func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoL
}
func RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) {
- r0, _, _ := syscall.Syscall(procRtlAddFunctionTable.Addr(), 3, uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress))
+ r0, _, _ := syscall.SyscallN(procRtlAddFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress))
ret = r0 != 0
return
}
func RtlDefaultNpAcl(acl **ACL) (ntstatus error) {
- r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(acl)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(acl)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3697,13 +3697,13 @@ func RtlDefaultNpAcl(acl **ACL) (ntstatus error) {
}
func RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) {
- r0, _, _ := syscall.Syscall(procRtlDeleteFunctionTable.Addr(), 1, uintptr(unsafe.Pointer(functionTable)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlDeleteFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable)))
ret = r0 != 0
return
}
func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3711,7 +3711,7 @@ func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFile
}
func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3719,18 +3719,18 @@ func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString
}
func RtlGetCurrentPeb() (peb *PEB) {
- r0, _, _ := syscall.Syscall(procRtlGetCurrentPeb.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlGetCurrentPeb.Addr())
peb = (*PEB)(unsafe.Pointer(r0))
return
}
func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) {
- syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber)))
+ syscall.SyscallN(procRtlGetNtVersionNumbers.Addr(), uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber)))
return
}
func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) {
- r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlGetVersion.Addr(), uintptr(unsafe.Pointer(info)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3738,23 +3738,23 @@ func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) {
}
func RtlInitString(destinationString *NTString, sourceString *byte) {
- syscall.Syscall(procRtlInitString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0)
+ syscall.SyscallN(procRtlInitString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)))
return
}
func RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) {
- syscall.Syscall(procRtlInitUnicodeString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0)
+ syscall.SyscallN(procRtlInitUnicodeString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)))
return
}
func rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) {
- r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(ntstatus), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(ntstatus))
ret = syscall.Errno(r0)
return
}
func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) {
- r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0)
+ r0, _, _ := syscall.SyscallN(procCLSIDFromString.Addr(), uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3762,7 +3762,7 @@ func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) {
}
func coCreateGuid(pguid *GUID) (ret error) {
- r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCoCreateGuid.Addr(), uintptr(unsafe.Pointer(pguid)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3770,7 +3770,7 @@ func coCreateGuid(pguid *GUID) (ret error) {
}
func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) {
- r0, _, _ := syscall.Syscall6(procCoGetObject.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCoGetObject.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3778,7 +3778,7 @@ func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable *
}
func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) {
- r0, _, _ := syscall.Syscall(procCoInitializeEx.Addr(), 2, uintptr(reserved), uintptr(coInit), 0)
+ r0, _, _ := syscall.SyscallN(procCoInitializeEx.Addr(), uintptr(reserved), uintptr(coInit))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3786,23 +3786,23 @@ func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) {
}
func CoTaskMemFree(address unsafe.Pointer) {
- syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0)
+ syscall.SyscallN(procCoTaskMemFree.Addr(), uintptr(address))
return
}
func CoUninitialize() {
- syscall.Syscall(procCoUninitialize.Addr(), 0, 0, 0, 0)
+ syscall.SyscallN(procCoUninitialize.Addr())
return
}
func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) {
- r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax))
+ r0, _, _ := syscall.SyscallN(procStringFromGUID2.Addr(), uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax))
chars = int32(r0)
return
}
func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procEnumProcessModules.Addr(), 4, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procEnumProcessModules.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3810,7 +3810,7 @@ func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uin
}
func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procEnumProcessModulesEx.Addr(), 5, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag), 0)
+ r1, _, e1 := syscall.SyscallN(procEnumProcessModulesEx.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3818,7 +3818,7 @@ func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *u
}
func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned)))
+ r1, _, e1 := syscall.SyscallN(procEnumProcesses.Addr(), uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3826,7 +3826,7 @@ func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err
}
func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetModuleBaseNameW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetModuleBaseNameW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3834,7 +3834,7 @@ func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uin
}
func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetModuleFileNameExW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetModuleFileNameExW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3842,7 +3842,7 @@ func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size u
}
func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetModuleInformation.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetModuleInformation.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3850,7 +3850,7 @@ func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb
}
func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb))
+ r1, _, e1 := syscall.SyscallN(procQueryWorkingSetEx.Addr(), uintptr(process), uintptr(pv), uintptr(cb))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3862,7 +3862,7 @@ func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callb
if ret != nil {
return
}
- r0, _, _ := syscall.Syscall6(procSubscribeServiceChangeNotifications.Addr(), 5, uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)), 0)
+ r0, _, _ := syscall.SyscallN(procSubscribeServiceChangeNotifications.Addr(), uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3874,12 +3874,12 @@ func UnsubscribeServiceChangeNotifications(subscription uintptr) (err error) {
if err != nil {
return
}
- syscall.Syscall(procUnsubscribeServiceChangeNotifications.Addr(), 1, uintptr(subscription), 0, 0)
+ syscall.SyscallN(procUnsubscribeServiceChangeNotifications.Addr(), uintptr(subscription))
return
}
func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize)))
+ r1, _, e1 := syscall.SyscallN(procGetUserNameExW.Addr(), uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize)))
if r1&0xff == 0 {
err = errnoErr(e1)
}
@@ -3887,7 +3887,7 @@ func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err er
}
func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0)
+ r1, _, e1 := syscall.SyscallN(procTranslateNameW.Addr(), uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)))
if r1&0xff == 0 {
err = errnoErr(e1)
}
@@ -3895,7 +3895,7 @@ func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint
}
func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiBuildDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType))
+ r1, _, e1 := syscall.SyscallN(procSetupDiBuildDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3903,7 +3903,7 @@ func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa
}
func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiCallClassInstaller.Addr(), 3, uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiCallClassInstaller.Addr(), uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3911,7 +3911,7 @@ func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInf
}
func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiCancelDriverInfoSearch.Addr(), 1, uintptr(deviceInfoSet), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiCancelDriverInfoSearch.Addr(), uintptr(deviceInfoSet))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3919,7 +3919,7 @@ func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) {
}
func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiClassGuidsFromNameExW.Addr(), 6, uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
+ r1, _, e1 := syscall.SyscallN(procSetupDiClassGuidsFromNameExW.Addr(), uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3927,7 +3927,7 @@ func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGu
}
func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiClassNameFromGuidExW.Addr(), 6, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
+ r1, _, e1 := syscall.SyscallN(procSetupDiClassNameFromGuidExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3935,7 +3935,7 @@ func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSiz
}
func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) {
- r0, _, e1 := syscall.Syscall6(procSetupDiCreateDeviceInfoListExW.Addr(), 4, uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoListExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
handle = DevInfo(r0)
if handle == DevInfo(InvalidHandle) {
err = errnoErr(e1)
@@ -3944,7 +3944,7 @@ func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineN
}
func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall9(procSetupDiCreateDeviceInfoW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3952,7 +3952,7 @@ func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUI
}
func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiDestroyDeviceInfoList.Addr(), 1, uintptr(deviceInfoSet), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDeviceInfoList.Addr(), uintptr(deviceInfoSet))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3960,7 +3960,7 @@ func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) {
}
func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiDestroyDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType))
+ r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3968,7 +3968,7 @@ func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfo
}
func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiEnumDeviceInfo.Addr(), 3, uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiEnumDeviceInfo.Addr(), uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3976,7 +3976,7 @@ func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfo
}
func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiEnumDriverInfoW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiEnumDriverInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3984,7 +3984,7 @@ func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, d
}
func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) {
- r0, _, e1 := syscall.Syscall9(procSetupDiGetClassDevsExW.Addr(), 7, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procSetupDiGetClassDevsExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
handle = DevInfo(r0)
if handle == DevInfo(InvalidHandle) {
err = errnoErr(e1)
@@ -3993,7 +3993,7 @@ func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintp
}
func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiGetClassInstallParamsW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4001,7 +4001,7 @@ func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo
}
func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInfoListDetailW.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInfoListDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4009,7 +4009,7 @@ func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailDa
}
func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4017,7 +4017,7 @@ func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf
}
func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiGetDeviceInstanceIdW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstanceIdW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4025,7 +4025,7 @@ func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa
}
func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procSetupDiGetDevicePropertyW.Addr(), 8, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDevicePropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4033,7 +4033,7 @@ func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData
}
func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procSetupDiGetDeviceRegistryPropertyW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4041,7 +4041,7 @@ func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev
}
func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiGetDriverInfoDetailW.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDriverInfoDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4049,7 +4049,7 @@ func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa
}
func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4057,7 +4057,7 @@ func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData
}
func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4065,7 +4065,7 @@ func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData
}
func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procSetupDiOpenDevRegKey.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired))
+ r0, _, e1 := syscall.SyscallN(procSetupDiOpenDevRegKey.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired))
key = Handle(r0)
if key == InvalidHandle {
err = errnoErr(e1)
@@ -4074,7 +4074,7 @@ func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Sc
}
func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiSetClassInstallParamsW.Addr(), 4, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4082,7 +4082,7 @@ func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo
}
func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiSetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4090,7 +4090,7 @@ func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf
}
func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiSetDeviceRegistryPropertyW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4098,7 +4098,7 @@ func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev
}
func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4106,7 +4106,7 @@ func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData
}
func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4114,7 +4114,7 @@ func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData
}
func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupUninstallOEMInfW.Addr(), 3, uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved))
+ r1, _, e1 := syscall.SyscallN(procSetupUninstallOEMInfW.Addr(), uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4122,7 +4122,7 @@ func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (er
}
func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) {
- r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0)
+ r0, _, e1 := syscall.SyscallN(procCommandLineToArgvW.Addr(), uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)))
argv = (**uint16)(unsafe.Pointer(r0))
if argv == nil {
err = errnoErr(e1)
@@ -4131,7 +4131,7 @@ func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) {
}
func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) {
- r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSHGetKnownFolderPath.Addr(), uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -4139,7 +4139,7 @@ func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **u
}
func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) {
- r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd))
+ r1, _, e1 := syscall.SyscallN(procShellExecuteW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd))
if r1 <= 32 {
err = errnoErr(e1)
}
@@ -4147,12 +4147,12 @@ func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *ui
}
func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) {
- syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param))
+ syscall.SyscallN(procEnumChildWindows.Addr(), uintptr(hwnd), uintptr(enumFunc), uintptr(param))
return
}
func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) {
- r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0)
+ r1, _, e1 := syscall.SyscallN(procEnumWindows.Addr(), uintptr(enumFunc), uintptr(param))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4160,7 +4160,7 @@ func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) {
}
func ExitWindowsEx(flags uint32, reason uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0)
+ r1, _, e1 := syscall.SyscallN(procExitWindowsEx.Addr(), uintptr(flags), uintptr(reason))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4168,7 +4168,7 @@ func ExitWindowsEx(flags uint32, reason uint32) (err error) {
}
func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) {
- r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount))
+ r0, _, e1 := syscall.SyscallN(procGetClassNameW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount))
copied = int32(r0)
if copied == 0 {
err = errnoErr(e1)
@@ -4177,19 +4177,19 @@ func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, e
}
func GetDesktopWindow() (hwnd HWND) {
- r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetDesktopWindow.Addr())
hwnd = HWND(r0)
return
}
func GetForegroundWindow() (hwnd HWND) {
- r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetForegroundWindow.Addr())
hwnd = HWND(r0)
return
}
func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) {
- r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetGUIThreadInfo.Addr(), uintptr(thread), uintptr(unsafe.Pointer(info)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4197,19 +4197,19 @@ func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) {
}
func GetKeyboardLayout(tid uint32) (hkl Handle) {
- r0, _, _ := syscall.Syscall(procGetKeyboardLayout.Addr(), 1, uintptr(tid), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetKeyboardLayout.Addr(), uintptr(tid))
hkl = Handle(r0)
return
}
func GetShellWindow() (shellWindow HWND) {
- r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetShellWindow.Addr())
shellWindow = HWND(r0)
return
}
func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetWindowThreadProcessId.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(pid)))
tid = uint32(r0)
if tid == 0 {
err = errnoErr(e1)
@@ -4218,25 +4218,25 @@ func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) {
}
func IsWindow(hwnd HWND) (isWindow bool) {
- r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsWindow.Addr(), uintptr(hwnd))
isWindow = r0 != 0
return
}
func IsWindowUnicode(hwnd HWND) (isUnicode bool) {
- r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsWindowUnicode.Addr(), uintptr(hwnd))
isUnicode = r0 != 0
return
}
func IsWindowVisible(hwnd HWND) (isVisible bool) {
- r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsWindowVisible.Addr(), uintptr(hwnd))
isVisible = r0 != 0
return
}
func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLoadKeyboardLayoutW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(flags), 0)
+ r0, _, e1 := syscall.SyscallN(procLoadKeyboardLayoutW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags))
hkl = Handle(r0)
if hkl == 0 {
err = errnoErr(e1)
@@ -4245,7 +4245,7 @@ func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) {
}
func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) {
- r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procMessageBoxW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype))
ret = int32(r0)
if ret == 0 {
err = errnoErr(e1)
@@ -4254,13 +4254,13 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i
}
func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) {
- r0, _, _ := syscall.Syscall9(procToUnicodeEx.Addr(), 7, uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl), 0, 0)
+ r0, _, _ := syscall.SyscallN(procToUnicodeEx.Addr(), uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl))
ret = int32(r0)
return
}
func UnloadKeyboardLayout(hkl Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procUnloadKeyboardLayout.Addr(), 1, uintptr(hkl), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procUnloadKeyboardLayout.Addr(), uintptr(hkl))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4272,7 +4272,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (
if inheritExisting {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0))
+ r1, _, e1 := syscall.SyscallN(procCreateEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4280,7 +4280,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (
}
func DestroyEnvironmentBlock(block *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDestroyEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4288,7 +4288,7 @@ func DestroyEnvironmentBlock(block *uint16) (err error) {
}
func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen)))
+ r1, _, e1 := syscall.SyscallN(procGetUserProfileDirectoryW.Addr(), uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4305,7 +4305,7 @@ func GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32
}
func _GetFileVersionInfoSize(filename *uint16, zeroHandle *Handle) (bufSize uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetFileVersionInfoSizeW.Addr(), 2, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetFileVersionInfoSizeW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)))
bufSize = uint32(r0)
if bufSize == 0 {
err = errnoErr(e1)
@@ -4323,7 +4323,7 @@ func GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer u
}
func _GetFileVersionInfo(filename *uint16, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetFileVersionInfoW.Addr(), 4, uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetFileVersionInfoW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4340,7 +4340,7 @@ func VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer
}
func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procVerQueryValueW.Addr(), 4, uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procVerQueryValueW.Addr(), uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4348,7 +4348,7 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint
}
func TimeBeginPeriod(period uint32) (err error) {
- r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0)
+ r1, _, e1 := syscall.SyscallN(proctimeBeginPeriod.Addr(), uintptr(period))
if r1 != 0 {
err = errnoErr(e1)
}
@@ -4356,7 +4356,7 @@ func TimeBeginPeriod(period uint32) (err error) {
}
func TimeEndPeriod(period uint32) (err error) {
- r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0)
+ r1, _, e1 := syscall.SyscallN(proctimeEndPeriod.Addr(), uintptr(period))
if r1 != 0 {
err = errnoErr(e1)
}
@@ -4364,7 +4364,7 @@ func TimeEndPeriod(period uint32) (err error) {
}
func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) {
- r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data)))
+ r0, _, _ := syscall.SyscallN(procWinVerifyTrustEx.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -4372,12 +4372,12 @@ func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error)
}
func FreeAddrInfoW(addrinfo *AddrinfoW) {
- syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0)
+ syscall.SyscallN(procFreeAddrInfoW.Addr(), uintptr(unsafe.Pointer(addrinfo)))
return
}
func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) {
- r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetAddrInfoW.Addr(), uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)))
if r0 != 0 {
sockerr = syscall.Errno(r0)
}
@@ -4385,7 +4385,7 @@ func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, resul
}
func WSACleanup() (err error) {
- r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSACleanup.Addr())
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4393,7 +4393,7 @@ func WSACleanup() (err error) {
}
func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) {
- r1, _, e1 := syscall.Syscall(procWSADuplicateSocketW.Addr(), 3, uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info)))
+ r1, _, e1 := syscall.SyscallN(procWSADuplicateSocketW.Addr(), uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info)))
if r1 != 0 {
err = errnoErr(e1)
}
@@ -4401,7 +4401,7 @@ func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err
}
func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) {
- r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength)))
+ r0, _, e1 := syscall.SyscallN(procWSAEnumProtocolsW.Addr(), uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength)))
n = int32(r0)
if n == -1 {
err = errnoErr(e1)
@@ -4414,7 +4414,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f
if wait {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
+ r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4422,7 +4422,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f
}
func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine))
+ r1, _, e1 := syscall.SyscallN(procWSAIoctl.Addr(), uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4430,7 +4430,7 @@ func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbo
}
func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle)))
+ r1, _, e1 := syscall.SyscallN(procWSALookupServiceBeginW.Addr(), uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4438,7 +4438,7 @@ func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle)
}
func WSALookupServiceEnd(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSALookupServiceEnd.Addr(), uintptr(handle))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4446,7 +4446,7 @@ func WSALookupServiceEnd(handle Handle) (err error) {
}
func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) {
- r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSALookupServiceNextW.Addr(), uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4454,7 +4454,7 @@ func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WS
}
func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSARecv.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4462,7 +4462,7 @@ func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32
}
func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
+ r1, _, e1 := syscall.SyscallN(procWSARecvFrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4470,7 +4470,7 @@ func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *ui
}
func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSASend.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4478,7 +4478,7 @@ func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32,
}
func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
+ r1, _, e1 := syscall.SyscallN(procWSASendTo.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4486,7 +4486,7 @@ func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32
}
func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags))
+ r0, _, e1 := syscall.SyscallN(procWSASocketW.Addr(), uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -4495,7 +4495,7 @@ func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo,
}
func WSAStartup(verreq uint32, data *WSAData) (sockerr error) {
- r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0)
+ r0, _, _ := syscall.SyscallN(procWSAStartup.Addr(), uintptr(verreq), uintptr(unsafe.Pointer(data)))
if r0 != 0 {
sockerr = syscall.Errno(r0)
}
@@ -4503,7 +4503,7 @@ func WSAStartup(verreq uint32, data *WSAData) (sockerr error) {
}
func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) {
- r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
+ r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4511,7 +4511,7 @@ func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) {
}
func Closesocket(s Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procclosesocket.Addr(), uintptr(s))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4519,7 +4519,7 @@ func Closesocket(s Handle) (err error) {
}
func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) {
- r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
+ r1, _, e1 := syscall.SyscallN(procconnect.Addr(), uintptr(s), uintptr(name), uintptr(namelen))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4536,7 +4536,7 @@ func GetHostByName(name string) (h *Hostent, err error) {
}
func _GetHostByName(name *byte) (h *Hostent, err error) {
- r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procgethostbyname.Addr(), uintptr(unsafe.Pointer(name)))
h = (*Hostent)(unsafe.Pointer(r0))
if h == nil {
err = errnoErr(e1)
@@ -4545,7 +4545,7 @@ func _GetHostByName(name *byte) (h *Hostent, err error) {
}
func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) {
- r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4562,7 +4562,7 @@ func GetProtoByName(name string) (p *Protoent, err error) {
}
func _GetProtoByName(name *byte) (p *Protoent, err error) {
- r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procgetprotobyname.Addr(), uintptr(unsafe.Pointer(name)))
p = (*Protoent)(unsafe.Pointer(r0))
if p == nil {
err = errnoErr(e1)
@@ -4585,7 +4585,7 @@ func GetServByName(name string, proto string) (s *Servent, err error) {
}
func _GetServByName(name *byte, proto *byte) (s *Servent, err error) {
- r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0)
+ r0, _, e1 := syscall.SyscallN(procgetservbyname.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)))
s = (*Servent)(unsafe.Pointer(r0))
if s == nil {
err = errnoErr(e1)
@@ -4594,7 +4594,7 @@ func _GetServByName(name *byte, proto *byte) (s *Servent, err error) {
}
func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) {
- r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4602,7 +4602,7 @@ func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) {
}
func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) {
- r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0)
+ r1, _, e1 := syscall.SyscallN(procgetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4610,7 +4610,7 @@ func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int3
}
func listen(s Handle, backlog int32) (err error) {
- r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0)
+ r1, _, e1 := syscall.SyscallN(proclisten.Addr(), uintptr(s), uintptr(backlog))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4618,7 +4618,7 @@ func listen(s Handle, backlog int32) (err error) {
}
func Ntohs(netshort uint16) (u uint16) {
- r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0)
+ r0, _, _ := syscall.SyscallN(procntohs.Addr(), uintptr(netshort))
u = uint16(r0)
return
}
@@ -4628,7 +4628,7 @@ func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *
if len(buf) > 0 {
_p0 = &buf[0]
}
- r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+ r0, _, e1 := syscall.SyscallN(procrecvfrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
n = int32(r0)
if n == -1 {
err = errnoErr(e1)
@@ -4641,7 +4641,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (
if len(buf) > 0 {
_p0 = &buf[0]
}
- r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen))
+ r1, _, e1 := syscall.SyscallN(procsendto.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4649,7 +4649,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (
}
func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) {
- r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0)
+ r1, _, e1 := syscall.SyscallN(procsetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4657,7 +4657,7 @@ func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32
}
func shutdown(s Handle, how int32) (err error) {
- r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0)
+ r1, _, e1 := syscall.SyscallN(procshutdown.Addr(), uintptr(s), uintptr(how))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4665,7 +4665,7 @@ func shutdown(s Handle, how int32) (err error) {
}
func socket(af int32, typ int32, protocol int32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol))
+ r0, _, e1 := syscall.SyscallN(procsocket.Addr(), uintptr(af), uintptr(typ), uintptr(protocol))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -4674,7 +4674,7 @@ func socket(af int32, typ int32, protocol int32) (handle Handle, err error) {
}
func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0)
+ r1, _, e1 := syscall.SyscallN(procWTSEnumerateSessionsW.Addr(), uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4682,12 +4682,12 @@ func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessio
}
func WTSFreeMemory(ptr uintptr) {
- syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0)
+ syscall.SyscallN(procWTSFreeMemory.Addr(), uintptr(ptr))
return
}
func WTSQueryUserToken(session uint32, token *Token) (err error) {
- r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0)
+ r1, _, e1 := syscall.SyscallN(procWTSQueryUserToken.Addr(), uintptr(session), uintptr(unsafe.Pointer(token)))
if r1 == 0 {
err = errnoErr(e1)
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index 3dea235735..d954a64c38 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -277,11 +277,13 @@ func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status
if err == nil { // transport has not been closed
// Note: The trailer fields are compressed with hpack after this call returns.
// No WireLength field is set here.
+ s.hdrMu.Lock()
for _, sh := range ht.stats {
sh.HandleRPC(s.Context(), &stats.OutTrailer{
Trailer: s.trailer.Copy(),
})
}
+ s.hdrMu.Unlock()
}
ht.Close(errors.New("finished writing status"))
return err
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 9f725e15a8..83cee314c8 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -1353,10 +1353,10 @@ func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCo
// called to interrupt the potential blocking on other goroutines.
s.cancel()
- oldState := s.swapState(streamDone)
- if oldState == streamDone {
- return
- }
+ // We can't return early even if the stream's state is "done" as the state
+ // might have been set by the `finishStream` method. Deleting the stream via
+ // `finishStream` can get blocked on flow control.
+ s.swapState(streamDone)
t.deleteStream(s, eosReceived)
t.controlBuf.put(&cleanupStream{
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index bc1eb290f6..468f110658 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.75.0"
+const Version = "1.75.1"
diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
index bf1aba0e85..7b9f01afb0 100644
--- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
@@ -9,7 +9,7 @@ import "google.golang.org/protobuf/types/descriptorpb"
const (
Minimum = descriptorpb.Edition_EDITION_PROTO2
- Maximum = descriptorpb.Edition_EDITION_2023
+ Maximum = descriptorpb.Edition_EDITION_2024
// MaximumKnown is the maximum edition that is known to Go Protobuf, but not
// declared as supported. In other words: end users cannot use it, but
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
index a0aad2777f..66ba906806 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
@@ -13,8 +13,10 @@ import (
"google.golang.org/protobuf/reflect/protoreflect"
)
-var defaultsCache = make(map[Edition]EditionFeatures)
-var defaultsKeys = []Edition{}
+var (
+ defaultsCache = make(map[Edition]EditionFeatures)
+ defaultsKeys = []Edition{}
+)
func init() {
unmarshalEditionDefaults(editiondefaults.Defaults)
@@ -41,7 +43,7 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures {
b = b[m:]
parent.StripEnumPrefix = int(v)
default:
- panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num))
+ panic(fmt.Sprintf("unknown field number %d while unmarshalling GoFeatures", num))
}
}
return parent
@@ -76,7 +78,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures {
// DefaultSymbolVisibility is enforced in protoc, runtimes should not
// inspect this value.
default:
- panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num))
+ panic(fmt.Sprintf("unknown field number %d while unmarshalling FeatureSet", num))
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
@@ -150,7 +152,7 @@ func unmarshalEditionDefaults(b []byte) {
_, m := protowire.ConsumeVarint(b)
b = b[m:]
default:
- panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num))
+ panic(fmt.Sprintf("unknown field number %d while unmarshalling EditionDefault", num))
}
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
index df8f918501..3ceb6fa7f5 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
@@ -27,6 +27,7 @@ const (
Api_SourceContext_field_name protoreflect.Name = "source_context"
Api_Mixins_field_name protoreflect.Name = "mixins"
Api_Syntax_field_name protoreflect.Name = "syntax"
+ Api_Edition_field_name protoreflect.Name = "edition"
Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name"
Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods"
@@ -35,6 +36,7 @@ const (
Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context"
Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins"
Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax"
+ Api_Edition_field_fullname protoreflect.FullName = "google.protobuf.Api.edition"
)
// Field numbers for google.protobuf.Api.
@@ -46,6 +48,7 @@ const (
Api_SourceContext_field_number protoreflect.FieldNumber = 5
Api_Mixins_field_number protoreflect.FieldNumber = 6
Api_Syntax_field_number protoreflect.FieldNumber = 7
+ Api_Edition_field_number protoreflect.FieldNumber = 8
)
// Names for google.protobuf.Method.
@@ -63,6 +66,7 @@ const (
Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming"
Method_Options_field_name protoreflect.Name = "options"
Method_Syntax_field_name protoreflect.Name = "syntax"
+ Method_Edition_field_name protoreflect.Name = "edition"
Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name"
Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url"
@@ -71,6 +75,7 @@ const (
Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming"
Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options"
Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax"
+ Method_Edition_field_fullname protoreflect.FullName = "google.protobuf.Method.edition"
)
// Field numbers for google.protobuf.Method.
@@ -82,6 +87,7 @@ const (
Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5
Method_Options_field_number protoreflect.FieldNumber = 6
Method_Syntax_field_number protoreflect.FieldNumber = 7
+ Method_Edition_field_number protoreflect.FieldNumber = 8
)
// Names for google.protobuf.Mixin.
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index a53364c599..31e79a6535 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -52,7 +52,7 @@ import (
const (
Major = 1
Minor = 36
- Patch = 7
+ Patch = 9
PreRelease = ""
)
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index 6843b0beaf..4eacb523c3 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -2873,7 +2873,10 @@ type FieldOptions struct {
// for accessors, or it will be completely ignored; in the very least, this
// is a formalization for deprecating fields.
Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // DEPRECATED. DO NOT USE!
// For Google-internal migration only. Do not use.
+ //
+ // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
// Indicate that the field value should not be printed out when using debug
// formats, e.g. when the field contains sensitive credentials.
@@ -2977,6 +2980,7 @@ func (x *FieldOptions) GetDeprecated() bool {
return Default_FieldOptions_Deprecated
}
+// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
func (x *FieldOptions) GetWeak() bool {
if x != nil && x.Weak != nil {
return *x.Weak
@@ -4843,7 +4847,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
"&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" +
"\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" +
- "\"\x9d\r\n" +
+ "\"\xa1\r\n" +
"\fFieldOptions\x12A\n" +
"\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" +
"\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" +
@@ -4852,9 +4856,9 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
"\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" +
"\n" +
"deprecated\x18\x03 \x01(\b:\x05falseR\n" +
- "deprecated\x12\x19\n" +
+ "deprecated\x12\x1d\n" +
"\x04weak\x18\n" +
- " \x01(\b:\x05falseR\x04weak\x12(\n" +
+ " \x01(\b:\x05falseB\x02\x18\x01R\x04weak\x12(\n" +
"\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" +
"\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" +
"\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" +
diff --git a/vendor/modules.txt b/vendor/modules.txt
index f57930032a..6a1f41569c 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -19,10 +19,7 @@ github.com/Microsoft/go-winio/pkg/guid
github.com/Microsoft/go-winio/pkg/process
github.com/Microsoft/go-winio/tools/mkwinsyscall
github.com/Microsoft/go-winio/vhd
-# github.com/OneOfOne/xxhash v1.2.8
-## explicit; go 1.11
-github.com/OneOfOne/xxhash
-# github.com/agnivade/levenshtein v1.2.0
+# github.com/agnivade/levenshtein v1.2.1
## explicit; go 1.21
github.com/agnivade/levenshtein
# github.com/akavel/rsrc v0.10.2
@@ -58,7 +55,7 @@ github.com/containerd/containerd/api/runtime/task/v3
github.com/containerd/containerd/api/services/ttrpc/events/v1
github.com/containerd/containerd/api/types
github.com/containerd/containerd/api/types/task
-# github.com/containerd/containerd/v2 v2.1.2
+# github.com/containerd/containerd/v2 v2.1.4
## explicit; go 1.23.0
github.com/containerd/containerd/v2/core/events
github.com/containerd/containerd/v2/core/mount
@@ -128,10 +125,10 @@ github.com/containerd/typeurl/v2
# github.com/coreos/go-systemd/v22 v22.5.0
## explicit; go 1.12
github.com/coreos/go-systemd/v22/dbus
-# github.com/cpuguy83/go-md2man/v2 v2.0.5
-## explicit; go 1.11
+# github.com/cpuguy83/go-md2man/v2 v2.0.7
+## explicit; go 1.12
github.com/cpuguy83/go-md2man/v2/md2man
-# github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0
+# github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0
## explicit; go 1.17
github.com/decred/dcrd/dcrec/secp256k1/v4
# github.com/docker/cli v24.0.0+incompatible
@@ -176,8 +173,8 @@ github.com/gobwas/glob/syntax/ast
github.com/gobwas/glob/syntax/lexer
github.com/gobwas/glob/util/runes
github.com/gobwas/glob/util/strings
-# github.com/goccy/go-json v0.10.2
-## explicit; go 1.12
+# github.com/goccy/go-json v0.10.5
+## explicit; go 1.19
github.com/goccy/go-json
github.com/goccy/go-json/internal/decoder
github.com/goccy/go-json/internal/encoder
@@ -235,9 +232,6 @@ github.com/google/go-containerregistry/pkg/v1/types
# github.com/google/uuid v1.6.0
## explicit
github.com/google/uuid
-# github.com/gorilla/mux v1.8.1
-## explicit; go 1.20
-github.com/gorilla/mux
# github.com/josephspurrier/goversioninfo v1.5.0
## explicit; go 1.18
github.com/josephspurrier/goversioninfo
@@ -255,12 +249,25 @@ github.com/klauspost/compress/zstd/internal/xxhash
# github.com/lestrrat-go/backoff/v2 v2.0.8
## explicit; go 1.16
github.com/lestrrat-go/backoff/v2
-# github.com/lestrrat-go/blackmagic v1.0.2
-## explicit; go 1.16
+# github.com/lestrrat-go/blackmagic v1.0.4
+## explicit; go 1.23
github.com/lestrrat-go/blackmagic
+# github.com/lestrrat-go/dsig v1.0.0
+## explicit; go 1.23.0
+github.com/lestrrat-go/dsig
+github.com/lestrrat-go/dsig/internal/ecutil
+# github.com/lestrrat-go/dsig-secp256k1 v1.0.0
+## explicit; go 1.23.0
+github.com/lestrrat-go/dsig-secp256k1
# github.com/lestrrat-go/httpcc v1.0.1
## explicit; go 1.16
github.com/lestrrat-go/httpcc
+# github.com/lestrrat-go/httprc/v3 v3.0.1
+## explicit; go 1.23.0
+github.com/lestrrat-go/httprc/v3
+github.com/lestrrat-go/httprc/v3/errsink
+github.com/lestrrat-go/httprc/v3/proxysink
+github.com/lestrrat-go/httprc/v3/tracesink
# github.com/lestrrat-go/iter v1.0.2
## explicit; go 1.13
github.com/lestrrat-go/iter/arrayiter
@@ -275,9 +282,42 @@ github.com/lestrrat-go/jwx/internal/pool
github.com/lestrrat-go/jwx/jwa
github.com/lestrrat-go/jwx/jwk
github.com/lestrrat-go/jwx/x25519
+# github.com/lestrrat-go/jwx/v3 v3.0.11
+## explicit; go 1.24.4
+github.com/lestrrat-go/jwx/v3
+github.com/lestrrat-go/jwx/v3/cert
+github.com/lestrrat-go/jwx/v3/internal/base64
+github.com/lestrrat-go/jwx/v3/internal/ecutil
+github.com/lestrrat-go/jwx/v3/internal/json
+github.com/lestrrat-go/jwx/v3/internal/jwxio
+github.com/lestrrat-go/jwx/v3/internal/keyconv
+github.com/lestrrat-go/jwx/v3/internal/pool
+github.com/lestrrat-go/jwx/v3/internal/tokens
+github.com/lestrrat-go/jwx/v3/jwa
+github.com/lestrrat-go/jwx/v3/jwe
+github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc
+github.com/lestrrat-go/jwx/v3/jwe/internal/cipher
+github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf
+github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt
+github.com/lestrrat-go/jwx/v3/jwe/internal/keygen
+github.com/lestrrat-go/jwx/v3/jwe/jwebb
+github.com/lestrrat-go/jwx/v3/jwk
+github.com/lestrrat-go/jwx/v3/jwk/ecdsa
+github.com/lestrrat-go/jwx/v3/jwk/jwkbb
+github.com/lestrrat-go/jwx/v3/jws
+github.com/lestrrat-go/jwx/v3/jws/internal/keytype
+github.com/lestrrat-go/jwx/v3/jws/jwsbb
+github.com/lestrrat-go/jwx/v3/jws/legacy
+github.com/lestrrat-go/jwx/v3/jwt
+github.com/lestrrat-go/jwx/v3/jwt/internal/errors
+github.com/lestrrat-go/jwx/v3/jwt/internal/types
+github.com/lestrrat-go/jwx/v3/transform
# github.com/lestrrat-go/option v1.0.1
## explicit; go 1.16
github.com/lestrrat-go/option
+# github.com/lestrrat-go/option/v2 v2.0.0
+## explicit; go 1.23
+github.com/lestrrat-go/option/v2
# github.com/linuxkit/virtsock v0.0.0-20241009230534-cb6a20cc0422
## explicit; go 1.17
github.com/linuxkit/virtsock/pkg/vsock
@@ -305,18 +345,12 @@ github.com/moby/sys/userns
# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
## explicit
github.com/munnerz/goautoneg
-# github.com/open-policy-agent/opa v0.70.0
-## explicit; go 1.21
+# github.com/open-policy-agent/opa v1.10.1
+## explicit; go 1.24.6
github.com/open-policy-agent/opa/ast
-github.com/open-policy-agent/opa/ast/internal/scanner
-github.com/open-policy-agent/opa/ast/internal/tokens
github.com/open-policy-agent/opa/ast/json
-github.com/open-policy-agent/opa/ast/location
github.com/open-policy-agent/opa/bundle
github.com/open-policy-agent/opa/capabilities
-github.com/open-policy-agent/opa/config
-github.com/open-policy-agent/opa/format
-github.com/open-policy-agent/opa/hooks
github.com/open-policy-agent/opa/internal/bundle
github.com/open-policy-agent/opa/internal/cidr/merge
github.com/open-policy-agent/opa/internal/compiler
@@ -331,19 +365,7 @@ github.com/open-policy-agent/opa/internal/file/archive
github.com/open-policy-agent/opa/internal/file/url
github.com/open-policy-agent/opa/internal/future
github.com/open-policy-agent/opa/internal/gojsonschema
-github.com/open-policy-agent/opa/internal/gqlparser/ast
-github.com/open-policy-agent/opa/internal/gqlparser/gqlerror
-github.com/open-policy-agent/opa/internal/gqlparser/lexer
-github.com/open-policy-agent/opa/internal/gqlparser/parser
-github.com/open-policy-agent/opa/internal/gqlparser/validator
-github.com/open-policy-agent/opa/internal/gqlparser/validator/rules
github.com/open-policy-agent/opa/internal/json/patch
-github.com/open-policy-agent/opa/internal/jwx/buffer
-github.com/open-policy-agent/opa/internal/jwx/jwa
-github.com/open-policy-agent/opa/internal/jwx/jwk
-github.com/open-policy-agent/opa/internal/jwx/jws
-github.com/open-policy-agent/opa/internal/jwx/jws/sign
-github.com/open-policy-agent/opa/internal/jwx/jws/verify
github.com/open-policy-agent/opa/internal/lcss
github.com/open-policy-agent/opa/internal/leb128
github.com/open-policy-agent/opa/internal/merge
@@ -368,33 +390,49 @@ github.com/open-policy-agent/opa/internal/wasm/opcode
github.com/open-policy-agent/opa/internal/wasm/sdk/opa/capabilities
github.com/open-policy-agent/opa/internal/wasm/types
github.com/open-policy-agent/opa/internal/wasm/util
-github.com/open-policy-agent/opa/ir
-github.com/open-policy-agent/opa/keys
github.com/open-policy-agent/opa/loader
-github.com/open-policy-agent/opa/loader/extension
-github.com/open-policy-agent/opa/loader/filter
-github.com/open-policy-agent/opa/logging
-github.com/open-policy-agent/opa/metrics
-github.com/open-policy-agent/opa/plugins
-github.com/open-policy-agent/opa/plugins/rest
github.com/open-policy-agent/opa/rego
-github.com/open-policy-agent/opa/resolver
-github.com/open-policy-agent/opa/resolver/wasm
-github.com/open-policy-agent/opa/schemas
github.com/open-policy-agent/opa/storage
github.com/open-policy-agent/opa/storage/inmem
-github.com/open-policy-agent/opa/storage/internal/errors
-github.com/open-policy-agent/opa/storage/internal/ptr
github.com/open-policy-agent/opa/topdown
-github.com/open-policy-agent/opa/topdown/builtins
-github.com/open-policy-agent/opa/topdown/cache
-github.com/open-policy-agent/opa/topdown/copypropagation
github.com/open-policy-agent/opa/topdown/print
-github.com/open-policy-agent/opa/tracing
-github.com/open-policy-agent/opa/types
-github.com/open-policy-agent/opa/util
-github.com/open-policy-agent/opa/util/decoding
-github.com/open-policy-agent/opa/version
+github.com/open-policy-agent/opa/v1/ast
+github.com/open-policy-agent/opa/v1/ast/internal/scanner
+github.com/open-policy-agent/opa/v1/ast/internal/tokens
+github.com/open-policy-agent/opa/v1/ast/json
+github.com/open-policy-agent/opa/v1/ast/location
+github.com/open-policy-agent/opa/v1/bundle
+github.com/open-policy-agent/opa/v1/capabilities
+github.com/open-policy-agent/opa/v1/config
+github.com/open-policy-agent/opa/v1/format
+github.com/open-policy-agent/opa/v1/hooks
+github.com/open-policy-agent/opa/v1/ir
+github.com/open-policy-agent/opa/v1/keys
+github.com/open-policy-agent/opa/v1/loader
+github.com/open-policy-agent/opa/v1/loader/extension
+github.com/open-policy-agent/opa/v1/loader/filter
+github.com/open-policy-agent/opa/v1/logging
+github.com/open-policy-agent/opa/v1/metrics
+github.com/open-policy-agent/opa/v1/plugins
+github.com/open-policy-agent/opa/v1/plugins/rest
+github.com/open-policy-agent/opa/v1/rego
+github.com/open-policy-agent/opa/v1/resolver
+github.com/open-policy-agent/opa/v1/resolver/wasm
+github.com/open-policy-agent/opa/v1/schemas
+github.com/open-policy-agent/opa/v1/storage
+github.com/open-policy-agent/opa/v1/storage/inmem
+github.com/open-policy-agent/opa/v1/storage/internal/errors
+github.com/open-policy-agent/opa/v1/storage/internal/ptr
+github.com/open-policy-agent/opa/v1/topdown
+github.com/open-policy-agent/opa/v1/topdown/builtins
+github.com/open-policy-agent/opa/v1/topdown/cache
+github.com/open-policy-agent/opa/v1/topdown/copypropagation
+github.com/open-policy-agent/opa/v1/topdown/print
+github.com/open-policy-agent/opa/v1/tracing
+github.com/open-policy-agent/opa/v1/types
+github.com/open-policy-agent/opa/v1/util
+github.com/open-policy-agent/opa/v1/util/decoding
+github.com/open-policy-agent/opa/v1/version
# github.com/opencontainers/cgroups v0.0.4
## explicit; go 1.23.0
github.com/opencontainers/cgroups/devices/config
@@ -418,32 +456,41 @@ github.com/pelletier/go-toml
# github.com/pkg/errors v0.9.1
## explicit
github.com/pkg/errors
-# github.com/prometheus/client_golang v1.22.0
-## explicit; go 1.22
+# github.com/prometheus/client_golang v1.23.2
+## explicit; go 1.23.0
github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal
-# github.com/prometheus/client_model v0.6.1
-## explicit; go 1.19
+# github.com/prometheus/client_model v0.6.2
+## explicit; go 1.22.0
github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.62.0
-## explicit; go 1.21
+# github.com/prometheus/common v0.66.1
+## explicit; go 1.23.0
github.com/prometheus/common/expfmt
github.com/prometheus/common/model
-# github.com/prometheus/procfs v0.15.1
-## explicit; go 1.20
+# github.com/prometheus/procfs v0.17.0
+## explicit; go 1.23.0
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
-# github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
+# github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9
## explicit
github.com/rcrowley/go-metrics
# github.com/russross/blackfriday/v2 v2.1.0
## explicit
github.com/russross/blackfriday/v2
-# github.com/sirupsen/logrus v1.9.3
+# github.com/segmentio/asm v1.2.0
+## explicit; go 1.18
+github.com/segmentio/asm/base64
+github.com/segmentio/asm/cpu
+github.com/segmentio/asm/cpu/arm
+github.com/segmentio/asm/cpu/arm64
+github.com/segmentio/asm/cpu/cpuid
+github.com/segmentio/asm/cpu/x86
+github.com/segmentio/asm/internal/unsafebytes
+# github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af
## explicit; go 1.13
github.com/sirupsen/logrus
-# github.com/tchap/go-patricia/v2 v2.3.2
+# github.com/tchap/go-patricia/v2 v2.3.3
## explicit; go 1.16
github.com/tchap/go-patricia/v2/patricia
# github.com/urfave/cli v1.22.16
@@ -452,9 +499,22 @@ github.com/urfave/cli
# github.com/urfave/cli/v2 v2.27.6
## explicit; go 1.18
github.com/urfave/cli/v2
+# github.com/valyala/fastjson v1.6.4
+## explicit; go 1.12
+github.com/valyala/fastjson
+github.com/valyala/fastjson/fastfloat
# github.com/vbatts/tar-split v0.11.5
## explicit; go 1.17
github.com/vbatts/tar-split/archive/tar
+# github.com/vektah/gqlparser/v2 v2.5.30
+## explicit; go 1.22
+github.com/vektah/gqlparser/v2/ast
+github.com/vektah/gqlparser/v2/gqlerror
+github.com/vektah/gqlparser/v2/lexer
+github.com/vektah/gqlparser/v2/parser
+github.com/vektah/gqlparser/v2/validator
+github.com/vektah/gqlparser/v2/validator/core
+github.com/vektah/gqlparser/v2/validator/rules
# github.com/veraison/go-cose v1.1.0
## explicit; go 1.18
github.com/veraison/go-cose
@@ -507,7 +567,7 @@ go.opencensus.io/trace/tracestate
## explicit; go 1.22.0
go.opentelemetry.io/auto/sdk
go.opentelemetry.io/auto/sdk/internal/telemetry
-# go.opentelemetry.io/otel v1.37.0
+# go.opentelemetry.io/otel v1.38.0
## explicit; go 1.23.0
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
@@ -518,12 +578,14 @@ go.opentelemetry.io/otel/internal/baggage
go.opentelemetry.io/otel/internal/global
go.opentelemetry.io/otel/propagation
go.opentelemetry.io/otel/semconv/v1.26.0
-go.opentelemetry.io/otel/semconv/v1.34.0
-# go.opentelemetry.io/otel/metric v1.37.0
+go.opentelemetry.io/otel/semconv/v1.37.0
+go.opentelemetry.io/otel/semconv/v1.37.0/otelconv
+# go.opentelemetry.io/otel/metric v1.38.0
## explicit; go 1.23.0
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/embedded
-# go.opentelemetry.io/otel/sdk v1.37.0
+go.opentelemetry.io/otel/metric/noop
+# go.opentelemetry.io/otel/sdk v1.38.0
## explicit; go 1.23.0
go.opentelemetry.io/otel/sdk
go.opentelemetry.io/otel/sdk/instrumentation
@@ -531,7 +593,8 @@ go.opentelemetry.io/otel/sdk/internal/env
go.opentelemetry.io/otel/sdk/internal/x
go.opentelemetry.io/otel/sdk/resource
go.opentelemetry.io/otel/sdk/trace
-# go.opentelemetry.io/otel/trace v1.37.0
+go.opentelemetry.io/otel/sdk/trace/internal/x
+# go.opentelemetry.io/otel/trace v1.38.0
## explicit; go 1.23.0
go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
@@ -542,17 +605,21 @@ go.opentelemetry.io/otel/trace/noop
go.uber.org/mock/gomock
go.uber.org/mock/mockgen
go.uber.org/mock/mockgen/model
-# golang.org/x/crypto v0.41.0
-## explicit; go 1.23.0
+# go.yaml.in/yaml/v2 v2.4.2
+## explicit; go 1.15
+go.yaml.in/yaml/v2
+# golang.org/x/crypto v0.42.0
+## explicit; go 1.24.0
golang.org/x/crypto/curve25519
+golang.org/x/crypto/pbkdf2
# golang.org/x/mod v0.27.0
## explicit; go 1.23.0
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/modfile
golang.org/x/mod/module
golang.org/x/mod/semver
-# golang.org/x/net v0.43.0
-## explicit; go 1.23.0
+# golang.org/x/net v0.44.0
+## explicit; go 1.24.0
golang.org/x/net/bpf
golang.org/x/net/http/httpguts
golang.org/x/net/http2
@@ -561,11 +628,12 @@ golang.org/x/net/idna
golang.org/x/net/internal/httpcommon
golang.org/x/net/internal/timeseries
golang.org/x/net/trace
-# golang.org/x/sync v0.16.0
-## explicit; go 1.23.0
+# golang.org/x/sync v0.17.0
+## explicit; go 1.24.0
golang.org/x/sync/errgroup
-# golang.org/x/sys v0.35.0
-## explicit; go 1.23.0
+# golang.org/x/sys v0.36.0
+## explicit; go 1.24.0
+golang.org/x/sys/cpu
golang.org/x/sys/execabs
golang.org/x/sys/unix
golang.org/x/sys/windows
@@ -573,8 +641,8 @@ golang.org/x/sys/windows/registry
golang.org/x/sys/windows/svc
golang.org/x/sys/windows/svc/debug
golang.org/x/sys/windows/svc/mgr
-# golang.org/x/text v0.28.0
-## explicit; go 1.23.0
+# golang.org/x/text v0.29.0
+## explicit; go 1.24.0
golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
@@ -603,10 +671,10 @@ golang.org/x/tools/internal/stdlib
golang.org/x/tools/internal/typeparams
golang.org/x/tools/internal/typesinternal
golang.org/x/tools/internal/versions
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5
## explicit; go 1.23.0
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.75.0
+# google.golang.org/grpc v1.75.1
## explicit; go 1.23.0
google.golang.org/grpc
google.golang.org/grpc/attributes
@@ -671,8 +739,8 @@ google.golang.org/grpc/tap
# google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1
## explicit; go 1.21
google.golang.org/grpc/cmd/protoc-gen-go-grpc
-# google.golang.org/protobuf v1.36.7
-## explicit; go 1.22
+# google.golang.org/protobuf v1.36.9
+## explicit; go 1.23
google.golang.org/protobuf/cmd/protoc-gen-go
google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo
google.golang.org/protobuf/compiler/protogen
@@ -725,8 +793,7 @@ google.golang.org/protobuf/types/pluginpb
# gopkg.in/yaml.v3 v3.0.1
## explicit
gopkg.in/yaml.v3
-# sigs.k8s.io/yaml v1.4.0
-## explicit; go 1.12
+# sigs.k8s.io/yaml v1.6.0
+## explicit; go 1.22
sigs.k8s.io/yaml
-sigs.k8s.io/yaml/goyaml.v2
# google.golang.org/genproto => google.golang.org/genproto v0.0.0-20250428153025-10db94c68c34
diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml
deleted file mode 100644
index 54ed8f9cb9..0000000000
--- a/vendor/sigs.k8s.io/yaml/.travis.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-language: go
-arch: arm64
-dist: focal
-go: 1.15.x
-script:
- - diff -u <(echo -n) <(gofmt -d *.go)
- - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON)
- - GO111MODULE=on go vet .
- - GO111MODULE=on go test -v -race ./...
- - git diff --exit-code
-install:
- - GO111MODULE=off go get golang.org/x/lint/golint
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS b/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS
deleted file mode 100644
index 73be0a3a9b..0000000000
--- a/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS
+++ /dev/null
@@ -1,24 +0,0 @@
-# See the OWNERS docs at https://go.k8s.io/owners
-
-approvers:
-- dims
-- jpbetz
-- smarterclayton
-- deads2k
-- sttts
-- liggitt
-- natasha41575
-- knverey
-reviewers:
-- dims
-- thockin
-- jpbetz
-- smarterclayton
-- deads2k
-- derekwaynecarr
-- mikedanese
-- liggitt
-- sttts
-- tallclair
-labels:
-- sig/api-machinery
diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go
index fc10246bdb..aa01acd45d 100644
--- a/vendor/sigs.k8s.io/yaml/yaml.go
+++ b/vendor/sigs.k8s.io/yaml/yaml.go
@@ -24,7 +24,7 @@ import (
"reflect"
"strconv"
- "sigs.k8s.io/yaml/goyaml.v2"
+ "go.yaml.in/yaml/v2"
)
// Marshal marshals obj into JSON using stdlib json.Marshal, and then converts JSON to YAML using JSONToYAML (see that method for more reference)
@@ -92,7 +92,7 @@ func jsonUnmarshal(reader io.Reader, obj interface{}, opts ...JSONOpt) error {
d = opt(d)
}
if err := d.Decode(&obj); err != nil {
- return fmt.Errorf("while decoding JSON: %v", err)
+ return fmt.Errorf("while decoding JSON: %w", err)
}
return nil
}
@@ -417,3 +417,10 @@ func jsonToYAMLValue(j interface{}) interface{} {
}
return j
}
+
+// DisallowUnknownFields configures the JSON decoder to error out if unknown
+// fields come along, instead of dropping them by default.
+func DisallowUnknownFields(d *json.Decoder) *json.Decoder {
+ d.DisallowUnknownFields()
+ return d
+}
diff --git a/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/sigs.k8s.io/yaml/yaml_go110.go
deleted file mode 100644
index 94abc1719d..0000000000
--- a/vendor/sigs.k8s.io/yaml/yaml_go110.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// This file contains changes that are only compatible with go 1.10 and onwards.
-
-//go:build go1.10
-// +build go1.10
-
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package yaml
-
-import "encoding/json"
-
-// DisallowUnknownFields configures the JSON decoder to error out if unknown
-// fields come along, instead of dropping them by default.
-func DisallowUnknownFields(d *json.Decoder) *json.Decoder {
- d.DisallowUnknownFields()
- return d
-}